text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'Daniel'
from PySide.QtCore import *
from PySide.QtGui import *
from urllib.request import urlopen
import sys
class Form(QDialog):
rates = {}
def __init__(self, parent=None):
super(Form, self).__init__(parent)
date = self.getdata()
rates = sorted(self.rates.keys())
dateLabel = QLabel(date)
self.fromComboBox = QComboBox()
self.fromComboBox.addItems(rates)
# Handles floating point values
self.fromSpinBox = QDoubleSpinBox()
# always set range first before setting value.
self.fromSpinBox.setRange(0.01, 10000000.00)
self.fromSpinBox.setValue(1.00)
self.toComboBox = QComboBox()
self.toComboBox.addItems(rates)
self.toLabel = QLabel("1.00")
# Create the grid
grid = QGridLayout()
grid.addWidget(dateLabel, 0, 0)
grid.addWidget(self.fromComboBox, 1, 0)
grid.addWidget(self.fromSpinBox, 1, 1)
grid.addWidget(self.toComboBox, 2, 0)
grid.addWidget(self.toLabel, 2, 1)
self.setLayout(grid)
# Set behaviour of buttons
self.connect(self.fromComboBox,
SIGNAL("currentIndexChanged(int)"), self.updateUi)
self.connect(self.toComboBox,
SIGNAL("currentIndexChanged(int)"), self.updateUi)
self.connect(self.fromSpinBox,
SIGNAL("valueChanged(double)"), self.updateUi)
self.setWindowTitle("Currency")
def updateUi(self):
to = (self.toComboBox.currentText())
from_ = (self.fromComboBox.currentText())
amount = (self.rates[from_] / self.rates[to]) * self.fromSpinBox.value()
self.toLabel.setText("%0.2f" % amount)
def getdata(self): # Idea taken from the Python Cookbook
self.rates = {}
try:
date = "Unknown"
fh = urlopen("http://www.bankofcanada.ca/en/markets/csv/exchange_eng.csv")
for line in fh:
line = line.decode()
# print (type(line))
# Ignore comments
if not line or line.startswith(("#", "Closing ")):
continue
fields = line.split(",")
print(fields)
if line.startswith("Date "):
date = fields[-1]
else:
# Try and convert it, otherwise skip
try:
value = float(fields[-1])
self.rates[(fields[0])] = value
except ValueError:
pass
return "Exchange Rates Date: "+ date
except Exception as e:
return "Failed to download:\n%s" % e
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| {
"repo_name": "daniellowtw/Learning",
"path": "Python GUI and QT/Introduction/curreny_converter.py",
"copies": "1",
"size": "2817",
"license": "cc0-1.0",
"hash": -1857496831854491400,
"line_mean": 33.3536585366,
"line_max": 86,
"alpha_frac": 0.5505857295,
"autogenerated": false,
"ratio": 4.191964285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019406925199616334,
"num_lines": 82
} |
__author__ = 'Daniel'
from PySide.QtCore import *
from PySide.QtGui import *
from urllib.request import urlopen
import sys
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
principle_label = QLabel("Principle:")
rate_label = QLabel("Rate:")
years_label = QLabel("Years:")
amount_label = QLabel("Amount:")
self.principle_spin_box = QDoubleSpinBox()
self.principle_spin_box.setPrefix("$")
self.principle_spin_box.setMaximum(float("inf"))
self.rate_spin_box = QDoubleSpinBox()
self.rate_spin_box.setMaximum(float("inf"))
self.rate_spin_box.setSuffix("%")
self.year_combo_box = QComboBox()
self.year_combo_box.addItems(["1 year", "2 years", "3 years"])
self.amount_calculated = QLabel("$0.00")
# Create the grid
grid = QGridLayout()
grid.addWidget(principle_label, 0, 0)
grid.addWidget(rate_label, 1, 0)
grid.addWidget(years_label, 2, 0)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.principle_spin_box, 0, 1)
grid.addWidget(self.rate_spin_box, 1, 1)
grid.addWidget(self.year_combo_box, 2, 1)
grid.addWidget(self.amount_calculated, 3, 1)
self.setLayout(grid)
self.connect(self.principle_spin_box, SIGNAL("valueChanged(double)"), self.update_ui)
self.connect(self.rate_spin_box, SIGNAL("valueChanged(double)"), self.update_ui)
self.connect(self.year_combo_box, SIGNAL("currentIndexChanged(int)"), self.update_ui)
self.setWindowTitle("Interests")
def update_ui(self):
principle = float(self.principle_spin_box.value())
rate = float(self.rate_spin_box.value())
year = int(self.year_combo_box.currentIndex()) + 1
amount = principle * (1 + rate/100) ** year
self.amount_calculated.setText("%0.2f" % (amount,))
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_() | {
"repo_name": "daniellowtw/Learning",
"path": "Python GUI and QT/Introduction/interest.py",
"copies": "1",
"size": "2002",
"license": "cc0-1.0",
"hash": 3217860228984165400,
"line_mean": 36.7924528302,
"line_max": 93,
"alpha_frac": 0.6283716284,
"autogenerated": false,
"ratio": 3.3366666666666664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44650382950666667,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
from PySide.QtCore import *
import operator
class MyHistoryTableModel(QAbstractTableModel):
def __init__(self, datain, headerdata, parent=None, *args):
"""
:param datain:lists[]
:param headerdata:str[]
:param parent: Defaults None
:param args:
:return:None
"""
QAbstractTableModel.__init__(self, parent, *args)
self.arraydata = datain
self.headerdata = headerdata
def rowCount(self, parent):
return len(self.arraydata)
def columnCount(self, parent):
return len(self.arraydata[0])
def data(self, index, role):
if not index.isValid():
return None
elif role != Qt.DisplayRole:
return None
return self.arraydata[index.row()][index.column()]
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.headerdata[col]
return None
def sort(self, Ncol, order):
"""Sort table by given column number."""
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.arraydata = sorted(self.arraydata, key=operator.itemgetter(Ncol))
if order == Qt.DescendingOrder:
self.arraydata.reverse()
self.emit(SIGNAL("layoutChanged()"))
| {
"repo_name": "daniellowtw/MentalMaths",
"path": "GUI/models.py",
"copies": "1",
"size": "1341",
"license": "mit",
"hash": -5822403598079444000,
"line_mean": 30.1860465116,
"line_max": 78,
"alpha_frac": 0.610738255,
"autogenerated": false,
"ratio": 4.027027027027027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137765282027027,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
from random import randint, randrange
from question import *
from time import clock
from UserData import *
class UnknownCommandException(Exception):
def __init__(self, msg):
self._msg = msg
class NoQuestionException(Exception):
pass
class Game:
"""
Represents a game instance
Recommended training questions based on the following
1) Squaring a number
2) Multiplying two numbers having the same first n-1 digits and ones digit add up to ten
3) Multiplying two numbers ending with 1
4) Multiplication between a range
"""
def __init__(self):
self.lower = 0
self.upper = 100
self.score = 0
self.total_questions = 0
self.current_question = None
self.history = {}
self.user_score = None
self.saved_question = []
self.start_time = 0
self.end_time = 0
self.set_history()
self.question_generator = None
def set_history(self):
"""
Loads history from database
:return:
"""
self.user_score = UserData()
self.user_score.load_or_create_db()
self.history = self.user_score.db
def start_timing(self):
self.start_time = clock()
def end_timing(self):
self.end_time = clock()
def save_current_question(self):
self.saved_question.append(self.current_question)
def gen_next_question(self, lower=None, upper=None, op_type_value=None):
"""
Generate a random question based on the given parameter (if any)
:param lower:
:param upper:
:param op_type_value:int Give the operation value
:return:
"""
lower_bound = self.lower if lower is None else lower
upper_bound = self.upper if upper is None else upper
op1 = randint(lower_bound, upper_bound)
op2 = randint(lower_bound, upper_bound)
if op_type_value is None:
op_type_value = randrange(len(Operation))
self.current_question = Question(op1, op_type_value, op2)
def solve_question(self, user_input):
"""
Take a user input and check whether it solves the current question
:param user_input:int
:return:(is_user_correct, solution, time_taken)
"""
if self.current_question is None:
raise NoQuestionException
time_taken = self.end_time - self.start_time
solution = eval(self.current_question.query)
is_user_correct = solution == user_input
self.total_questions += 1
if hash(self.current_question) in self.history:
self.current_question = self.history[hash(self.current_question)]
if is_user_correct:
self.score += 1
self.current_question.add_correct_time(time_taken)
else:
self.current_question.add_wrong_time(time_taken)
self.history[hash(self.current_question)] = self.current_question
# For displaying purposes
return is_user_correct, solution, time_taken
def gen_squares(self, lower, upper):
"""
Generate a square question between range upper and lower inclusive
:param lower:
:param upper:
:return:
"""
x = randint(lower, upper)
self.current_question = Question(x, Operation.MULTIPLICATION.value, x)
def gen_ones_digit_sum_to_ten(self, upper_bound):
"""
two numbers having the same first n-1 digits and ones digit add up to ten
:param upper_bound:
:return:
"""
ones = randint(1, 9)
ones_complement = 10 - ones
tens1 = randint(1, upper_bound)
self.current_question = Question(tens1 * 10 + ones, Operation.MULTIPLICATION.value,
upper_bound * 10 + ones_complement)
def gen_tens_digit_are_the_same(self, upper_bound):
"""
Generate two numbers with the same first n-1 digits and is <= upperbound
:param upper_bound:
:return:
"""
ones1 = randint(0, 9)
ones2 = randint(0, 9)
tens = randint(upper_bound)
self.current_question = Question(tens * 10 + ones1, Operation.MULTIPLICATION.value, tens * 10 + ones2)
def gen_numbers_ending_with_one(self, upper_bound):
"""
Generate two numbers ending with one with the first n-1 digit <= upper_bound
:param upper_bound:
:return:
"""
tens1 = randint(upper_bound)
tens2 = randint(upper_bound)
self.current_question = Question(tens1 * 10 + 1, Operation.MULTIPLICATION.value, tens2 * 10 + 1)
| {
"repo_name": "daniellowtw/MentalMaths",
"path": "Game.py",
"copies": "1",
"size": "4672",
"license": "mit",
"hash": -1984995407290216000,
"line_mean": 31.6713286713,
"line_max": 110,
"alpha_frac": 0.6059503425,
"autogenerated": false,
"ratio": 3.9829497016197783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005346893620151817,
"num_lines": 143
} |
__author__ = 'daniel'
from sqlalchemy import Integer, Column, String, ForeignKey, Text, DateTime,Boolean
from sqlalchemy.orm import relationship, deferred
from lib.base import Base
from models.racer import Racer
from PyQt4 import QtCore
class Trial(Base):
__tablename__ = "trials"
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
id = Column(Integer, primary_key=True)
name = Column(String(50))
description = Column(String(1000))
core_id = Column(Integer)
real_time = Column(Boolean)
reps = Column(Integer)
status = Column(String(50))
job = Column(Text)
result = deferred(Column(Text))
start_date = Column(DateTime)
end_date = Column(DateTime)
racer_id = Column(Integer, ForeignKey('racers.id'))
racer = relationship("Racer", primaryjoin="Trial.racer_id==Racer.id")
experiment_id = Column(Integer, ForeignKey('experiments.id'))
experiment = relationship("Experiment", primaryjoin="Trial.experiment_id==Experiment.id")
class EchoTrial(Trial):
__tablename__ = 'echo_trials'
__mapper_args__ = {'polymorphic_identity': 'Echo Trial'}
id = Column(Integer, ForeignKey('trials.id'), primary_key=True)
host = Column(String(100))
port = Column(Integer)
delay = Column(Integer)
def duplicate(self):
x = EchoTrial()
#common
x.name = self.name
x.description = self.description
x.core_id = self.core_id
x.real_time = self.real_time
x.reps = self.reps
x.racer = self.racer
x.experiment = self.experiment
x.host = self.host
x.port = self.port
x.delay = self.delay
return x
class HTTPTrial(Trial):
__tablename__ = 'http_trials'
__mapper_args__ = {'polymorphic_identity': 'HTTP Trial'}
id = Column(Integer, ForeignKey('trials.id'), primary_key=True)
request_url = Column(String(500))
request = Column(String(50000))
def duplicate(self):
x = HTTPTrial()
#common
x.name = self.name
x.description = self.description
x.core_id = self.core_id
x.real_time = self.real_time
x.reps = self.reps
x.racer = self.racer
x.experiment = self.experiment
x.request_url = self.request_url
x.request = self.request
return x
| {
"repo_name": "dmayer/time_trial",
"path": "time_trial_gui/models/trial.py",
"copies": "1",
"size": "2388",
"license": "mit",
"hash": -8633667052414398000,
"line_mean": 24.1368421053,
"line_max": 93,
"alpha_frac": 0.6293969849,
"autogenerated": false,
"ratio": 3.6236722306525038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47530692155525034,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
from test import *
from findandreplacedialog import *
from PySide.QtCore import *
from PySide.QtGui import *
import sys
class ControlMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControlMainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
class FindAndReplaceDialog (QDialog, Ui_FindAndReplaceDlg):
def __init__(self, parent=None):
super(FindAndReplaceDialog, self).__init__(parent)
self.__index = 0
# self.ui = Ui_FindAndReplaceDlg()
self.setupUi(self)
self.updateUi()
@Slot(str)
def on_findLineEdit_textEdited(self, text):
self.__index = 0
self.updateUi()
def updateUi(self):
# print(type(self.findLineEdit.text()))
enable = not (len(self.findLineEdit.text()) == 0)
self.pushButton.setEnabled(enable)
# self.replaceButton.setEnabled(enable)
# self.replaceAllButton.setEnabled(enable)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
mySW = FindAndReplaceDialog()
mySW.show()
sys.exit(app.exec_())
| {
"repo_name": "daniellowtw/Learning",
"path": "Python GUI and QT/Qt Designer/testmain.py",
"copies": "1",
"size": "1153",
"license": "cc0-1.0",
"hash": 2876156663214979000,
"line_mean": 26.4523809524,
"line_max": 59,
"alpha_frac": 0.6366001735,
"autogenerated": false,
"ratio": 3.614420062695925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9617913373922395,
"avg_score": 0.026621372454705788,
"num_lines": 42
} |
__author__ = 'daniel'
import logging
from PyQt4 import QtGui
from gui.data_source_model import DataSourceModel
from gui.plotter_widget import PlotterWidget
from lib.timing_data import TimingData
from lib.plot import Plot
class PlotterTab(QtGui.QWidget):
def __init__(self, parent = None):
super(PlotterTab, self).__init__(parent)
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
# data sources
self.data_box = QtGui.QGroupBox(self, title="Data Sources")
self.layout.addWidget(self.data_box,0,0)
data_box_layout = QtGui.QGridLayout(self.data_box)
self.data_box.setLayout(data_box_layout)
self.data_source_model = DataSourceModel()
self.data_source_table = QtGui.QTableView()
self.data_source_table.setModel(self.data_source_model)
self.data_source_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.data_source_table.activated.connect(self.event_open_data_source_edit)
data_box_layout.addWidget(self.data_source_table, 0, 0)
self.plotter = PlotterWidget(self)
self.plotter.set_data_source_model(self.data_source_model)
self.layout.addWidget(self.plotter, 1,0,1,2)
self.data_source_model.rowsInserted.connect(self.plotter.update_plot)
# main buttons
add_file_button = QtGui.QPushButton(self.data_box)
add_file_button.setText("Add File")
add_file_button.released.connect(self.event_show_select_file_dialog)
self.layout.addWidget(add_file_button,0,1)
def event_open_data_source_edit(self, index):
dialog = EditDataSourceDialog(index.data(QtCore.Qt.EditRole), self.main_widget)
dialog.accepted.connect(self.event_data_source_edited)
dialog.exec()
def event_data_source_edited(self):
self.data_source_table.resizeColumnsToContents()
self.update_plot()
def event_show_select_file_dialog(self):
file_dialog = QtGui.QFileDialog()
file_dialog.setAcceptMode(QtGui.QFileDialog.AcceptOpen)
filters = [ "PEM Files (*.pem)", "Any files (*)" ]
# file_dialog.fileSelected.connect(self.event_file_selected)
file_dialog.filesSelected.connect(self.event_files_selected)
file_dialog.setFileMode(QtGui.QFileDialog.ExistingFiles)
file_dialog.exec()
def event_files_selected(self, file_names):
print(file_names)
for f in file_names:
self.event_file_selected(f)
def event_file_selected(self,file_name):
new_data = TimingData()
new_data.load_from_csv(file_name)
new_plot = Plot(new_data)
self.data_source_model.add_data(new_plot)
self.data_source_table.resizeColumnsToContents()
#data = parse_csv(file_name)
#self.plot_canvas.add_plot(data, 200, [min(data), 26*1000*1000], "100 micros", 'red')
#self.plot_canvas.update_figure()
def add_data_row(self, data):
pass
| {
"repo_name": "dmayer/time_trial",
"path": "time_trial_gui/gui/plotter_tab.py",
"copies": "1",
"size": "3007",
"license": "mit",
"hash": 5148493041827399000,
"line_mean": 33.5632183908,
"line_max": 93,
"alpha_frac": 0.6704356501,
"autogenerated": false,
"ratio": 3.452353616532721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4622789266632721,
"avg_score": null,
"num_lines": null
} |
__author__ = 'daniel'
import logging
import requests
from bs4 import BeautifulSoup
from Transaction import Buy, Dividend, Sell, Split
def _get_fund_price(name):
url = 'https://www.di.se/fonder/%s/' % name
logging.info("Getting price for {}: {}".format(name, url))
response = requests.get(url)
if response.status_code != 200:
return ""
soup = BeautifulSoup(response.text, 'html.parser')
return "".join(soup.find('div',
class_='js_instrument-details__price '
'instrument-details__price-main').getText(
strip=True).split())
class Stock:
def __init__(self, key, name, currency, kind="Aktie",
descriptions=None, dividend_per_year=1, dividend_forecast=0.0,
bloomberg_quote=None, avanza_id=None,
avanza_name=None, is_stock=1):
if descriptions is None:
descriptions = []
self.key = key
self.name = name
self.currency = currency
self.bloomberg_finance = bloomberg_quote
self.transactions = []
self.kind = kind
self.descriptions = descriptions
self.dividend_per_year = dividend_per_year
self.dividend_forecast_per_stock = dividend_forecast
if is_stock == 1:
self.avanza_url = f"https://www.avanza.se/aktier/om-aktien.html/{avanza_id}/" \
f"{avanza_name}"
self.avanza_price = f"IMPORTXML(\"{self.avanza_url}\"; \"// span [@class='pushBox " \
f"roundCorners3']\")"
elif is_stock == 0:
self.avanza_url = f"https://www.avanza.se/fonder/om-fonden.html/{avanza_id}/" \
f"{avanza_name}"
self.avanza_price = _get_fund_price(self.bloomberg_finance)
logging.info("Fund price %s (%s)", self.avanza_price,
self.avanza_price.replace(" ", ""))
elif is_stock == 2:
self.avanza_url = f"https://www.avanza.se/borshandlade-produkter/etf-torg/om-fonden" \
f".html/{avanza_id}/{avanza_name}"
self.avanza_price = f"IMPORTXML(\"{self.avanza_url}\"; \"// span [@class='pushBox " \
f"roundCorners3']\")"
self.total_amount = 0
self.total_units = 0
self.total_dividends = 0
self.realized_gain = 0
self.purchasing_sum = 0
self.sum_of_units = 0
self.sold_units = 0
self.sold_sum = 0
def get_total_price(self):
if self.total_units == 0:
return 0
return self.total_amount // self.total_units
def has_description(self, description):
return description in self.descriptions or self.key == description
def gain_of_transaction(self, transaction):
return (-1 * (transaction.amount // transaction.units) -
(self.total_amount // self.total_units)) * transaction.units * -1
def add_transaction(self, transaction):
add_transaction = True
if isinstance(transaction, Split):
for trans in self.transactions:
if trans.date == transaction.date and trans.units == transaction.units:
add_transaction = False
if add_transaction and transaction.units > 0:
logging.debug("%s", [self.name, self.total_amount, transaction.amount])
self.total_units = self.total_units * transaction.units
elif isinstance(transaction, Dividend):
self.total_dividends += transaction.units * transaction.price
elif isinstance(transaction, Buy):
self.total_units += transaction.units
self.total_amount -= transaction.amount
self.purchasing_sum -= transaction.amount
self.sum_of_units += transaction.units
logging.debug("%s", [self.name, transaction.str_type, self.total_amount,
transaction.amount,
self.total_units, transaction.units])
elif isinstance(transaction, Sell):
logging.debug(transaction)
self.realized_gain += self.gain_of_transaction(transaction)
self.total_units += transaction.units
self.total_amount -= transaction.amount
self.sold_units += transaction.units
self.sold_sum -= transaction.amount
logging.debug("%s", [self.name, transaction.str_type, self.total_amount,
transaction.amount, self.total_units,
transaction.units, self.realized_gain])
if add_transaction:
if self.total_units == 0:
self.total_amount = 0
self.transactions.append(transaction)
def get_dividend_forecast(self):
return self.dividend_forecast_per_stock
def get_latest_dividend(self):
dividend = 0
latest = "2016-01-01 00:00:00"
for trans in self.transactions:
if not isinstance(trans, Dividend):
continue
if trans.date > latest:
dividend += trans.price
return dividend
def get_total_dividends(self, start_date=None, end_date=None):
if not start_date and not end_date:
return self.total_dividends
total_dividends = 0
for trans in self.transactions:
if isinstance(trans, Dividend) and start_date <= trans.date <= end_date:
total_dividends += trans.amount
return total_dividends
def calculate_transaction_average(self, transaction):
if False and self.currency == "SEK":
return transaction.units * transaction.price + transaction.fee
return transaction.amount
| {
"repo_name": "dahuuhad/Stocks",
"path": "stock.py",
"copies": "1",
"size": "5838",
"license": "apache-2.0",
"hash": -6233075179217019000,
"line_mean": 40.7,
"line_max": 98,
"alpha_frac": 0.574169236,
"autogenerated": false,
"ratio": 3.9714285714285715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045597807428571,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
import pandas
from cassandra.cluster import Cluster
from cassandra.query import BatchStatement
def loadDataIntoDatabase(stockId, dbSession):
"save data to databaes"
query = dbSession.prepare("insert into stockdata (stock_id, time, open_price, high_price, low_price, close_price, volumne, adj_close) values(?, ?, ?, ?, ?, ?, ?, ?)")
batch = BatchStatement()
csvData = pandas.io.parsers.read_csv('Data/sampleData.csv', sep=',', skiprows=1)
for index, row in csvData.iterrows():
batch.add(
query, (stockId, pandas.to_datetime(row[0]), row[1], row[2], row[3], row[4], row[5], row[6])
)
dbSession.execute(batch)
print("save data for stock:" + stockId)
return
cluster = Cluster()
session = cluster.connect("briskyprocess")
#loadDataIntoDatabase("GOOG", session)
#loadDataIntoDatabase("MSFT", session)
#loadDataIntoDatabase("ORCL", session)
#loadDataIntoDatabase("IBM", session)
#loadDataIntoDatabase("SAP", session)
#loadDataIntoDatabase("SNE", session)
#loadDataIntoDatabase("VOXX", session)
#loadDataIntoDatabase("ADBE", session)
#loadDataIntoDatabase("CSCO", session)
result = session.execute("Select count(*) from stockdata limit 200000")
print(result)
| {
"repo_name": "SoySauceClub/BriskyProcess",
"path": "playground/Cassandra/DataLoader.py",
"copies": "1",
"size": "1236",
"license": "apache-2.0",
"hash": 4196252790686091300,
"line_mean": 37.625,
"line_max": 170,
"alpha_frac": 0.7071197411,
"autogenerated": false,
"ratio": 3.3136729222520107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9468969363145053,
"avg_score": 0.010364660041391431,
"num_lines": 32
} |
__author__ = 'Daniel'
import platform
from PySide.QtCore import *
from PySide.QtGui import *
import GUI.ui_mainwindow
from GUI.component import *
from Game import Game
from GUI.models import MyHistoryTableModel
from UserData import config
from utility import is_debug_mode, __version__
class MainWindow(QMainWindow, GUI.ui_mainwindow.Ui_MainWindow):
def keyPressEvent(self, event):
if is_debug_mode():
print('key: %s -' % hex(event.key()))
print('modifiers:', hex(int(event.modifiers())))
def checkAns(self, user_input):
if is_debug_mode():
print(self.game.current_question.answer)
try:
return float(user_input) == self.game.current_question.answer
except:
return False
def start_quick_game(self):
g = GameDialog(self)
g.start()
def start_training(self):
# TODO: Create a new dialog asking for type
self.start_quick_game()
def display_settings(self):
# TODO: Create interface to change settings
QMessageBox.information(self, "Settings", "Coming soon")
def display_history(self):
"""Create dumb dialog and display table"""
data = []
for q in self.game.history.values():
data.append([q.query.strip(), len(q.correct_times), len(q.wrong_times)])
header = ['Query', 'Correct', 'Wrong']
model = MyHistoryTableModel(data, header, self)
h = HistoryDlg(self)
h.set_up_history(model)
h.show()
def display_about(self):
QMessageBox.about(self, "About Mental Math",
"""<b>Mental Math</b> v %s
<br>
Made by Daniel Low
<p>This application can be used to train your mental calculation.
<p>Running on Python %s on %s""" % (
__version__, platform.python_version(), platform.system()))
def __init__(self, game):
self.game = game
super(MainWindow, self).__init__()
self.setupUi(self)
self.setWindowTitle("Mental Math trainer")
# self.statusBar.showMessage('Ready')
self.setCentralWidget(StartWidget(self))
self.actionQuit.triggered.connect(self.close)
self.actionQuick_Game.triggered.connect(self.start_quick_game)
self.actionTraining.triggered.connect(self.start_training)
self.actionHistory.triggered.connect(self.display_history)
self.actionAbout.triggered.connect(self.display_about)
def main():
app = QApplication(sys.argv)
g = Game()
f = MainWindow(g)
f.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| {
"repo_name": "daniellowtw/MentalMaths",
"path": "GUI/main.py",
"copies": "1",
"size": "2742",
"license": "mit",
"hash": 8040755267604540000,
"line_mean": 31.6428571429,
"line_max": 91,
"alpha_frac": 0.5991976659,
"autogenerated": false,
"ratio": 3.934002869440459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.503320053534046,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
import re
class TokenizeABC:
def __init__(self,abc):
self._abc = abc
def tokenize_header(self,header):
header = header.split("\n")
for i,h in enumerate(header):
header[i] = h[2:]
return header
def tokenize_body(self,body):
return body
def tokenize(self):
print "tokenizing score..."
header_i = 0 # an index to the end of the header
header_re = re.compile("\d:")
# iterate through score until we find "V:", the end of the header section
while header_re.search(self._abc[header_i:header_i+2]) is not None: # V: is presumably the beginning of the last line of the header
header_i += 1
header_end = self._abc.find("\n",header_i) + 2 # +2 to include the newline
header_fields = self.tokenize_header(self._abc[0:header_end])
score_tokens = self.tokenize_body(self._abc[header_end:])
tokens = (header_fields,score_tokens)
return tokens
| {
"repo_name": "fatisar/guitar-gyro",
"path": "src/utils/TokenizeABC.py",
"copies": "1",
"size": "1034",
"license": "bsd-3-clause",
"hash": -8991544807225574000,
"line_mean": 30.3333333333,
"line_max": 141,
"alpha_frac": 0.5899419729,
"autogenerated": false,
"ratio": 3.7194244604316546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9752911563844329,
"avg_score": 0.011290973897465239,
"num_lines": 33
} |
__author__ = 'Daniel'
import sys
from PySide.QtCore import *
from PySide.QtGui import *
import GUI.ui_menu
import GUI.ui_startwidget
import GUI.ui_gamedialog
import GUI.ui_historywidget
class MenuWidget(QWidget, GUI.ui_menu.Ui_Menu):
def __init__(self):
super(MenuWidget, self).__init__()
self.setupUi(self)
model = QStringListModel(list(map(str, range(100))))
self.myListView.setModel(model)
class GameDialog(QDialog, GUI.ui_gamedialog.Ui_Dialog):
def __init__(self, parent=None):
super(GameDialog, self).__init__(parent)
self.parent_component = parent
self.setupUi(self)
self.game = parent.game
self.t = QTimer()
self.lag = 1
def start(self):
x, ok = QInputDialog.getInt(self, "Rounds", "Rounds", 10)
if not (ok and isinstance(x, int)):
return
self.questions_left = x
self.lcdNumber.display(self.lag)
self.t.timeout.connect(self.on_timeout)
self.t.start(1000)
self.userInputLineEdit.textEdited.connect(self.on_userInputLineEdit_textEdited)
self.userInputLineEdit.returnPressed.connect(self.on_userInputLineEdit_returnPressed)
self.show()
def on_userInputLineEdit_returnPressed(self):
txt = self.userInputLineEdit.text()
self.submit_answer(txt)
def on_userInputLineEdit_textEdited(self, txt):
if self.parent_component.checkAns(txt):
self.submit_answer(txt)
def submit_answer(self, txt):
self.game.end_timing()
_, solution, time_taken = self.game.solve_question(int(txt))
self.lcdNumber.display(time_taken)
self.questions_left -= 1
if self.questions_left == 0:
self.stop_game()
else:
self.start_game()
def on_timeout(self):
self.lag -= 1
self.lcdNumber.display(self.lag)
if self.lag <= 0:
self.t.stop()
self.start_game()
def start_game(self):
self.game.gen_next_question()
self.game.start_timing()
self.userInputLineEdit.setText("")
self.questionLabel.setText(self.game.current_question.query)
def stop_game(self):
self.done()
def done(self, *args):
# Cleanup as well
try:
self.game.user_score.save_db()
QMessageBox.information(self, "Statistics", "blah\nblah\nblah")
self.t.stop()
except:
pass
super(GameDialog, self).done(0)
class StartWidget(QWidget, GUI.ui_startwidget.Ui_Form):
def __init__(self, parent=None):
super(StartWidget, self).__init__(parent)
self.setupUi(self)
self.quickGameBtn.clicked.connect(parent.start_quick_game)
self.historyOrSavedBtn.clicked.connect(parent.display_history)
self.trainingBtn.clicked.connect(parent.start_training)
self.settingsBtn.clicked.connect(parent.display_settings)
class HistoryDlg(QDialog, GUI.ui_historywidget.Ui_Dialog):
def __init__(self, parent=None):
super(HistoryDlg, self).__init__(parent)
self.parent_component = parent
self.setupUi(self)
self.historyTableView.resizeColumnsToContents()
self.historyTableView.setSortingEnabled(True)
def set_up_history(self, model):
self.historyTableView.setModel(model) | {
"repo_name": "daniellowtw/MentalMaths",
"path": "GUI/component.py",
"copies": "1",
"size": "3365",
"license": "mit",
"hash": -5541070931071500000,
"line_mean": 31.3653846154,
"line_max": 93,
"alpha_frac": 0.6350668648,
"autogenerated": false,
"ratio": 3.637837837837838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4772904702637838,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
class LazyDataStore(object):
"""
data.__dict__
hasattr(data, 'foo')
hasattr will trigger __getattr__ if not present
"""
def __init__(self):
self.existing_attr = 5
def __getattr__(self, name):
"""
This method is called when the attribute is NOT present, i.e. accessing
the missing attribute
"""
value = 'Value for % s' % name
setattr(self, name, value)
return value
class LazyDataStore2(object):
def __init__(self):
self.existing_attr = 5
def __getattribute__(self, name):
"""
This special method is called EVERY time an attribute is accessed on an
object, even in cases where it does exist in the attribute dictionary.
"""
try:
return super(LazyDataStore2, self).__getattribute__(name)
except AttributeError:
value = 'Value for %s' % name
setattr(self, name, value)
return value
def __setattr__(self, name, value):
"""
This method is always called every time an attribute is assigned on an
instance
"""
print "Aspect: Save some data to the DB log"
super(LazyDataStore2, self).__setattr__(name, value)
class DictionaryDB(object):
def __init__(self, data):
self._data = data
def __getattribute__(self, name):
"""
To avoid infinite recursion in __getattribute__ and __setattr__ by using
methods from super() (i.e., the object class) to access instance
attributes directly.
"""
_data = super(DictionaryDB, self).__getattribute__('_data')
return _data[name]
| {
"repo_name": "idf/commons-util-py",
"path": "commons_util/fundamentals/dynamic_class.py",
"copies": "1",
"size": "1715",
"license": "apache-2.0",
"hash": 8868576759079808000,
"line_mean": 28.0677966102,
"line_max": 80,
"alpha_frac": 0.5743440233,
"autogenerated": false,
"ratio": 4.431524547803617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5505868571103617,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
class Stack:
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def is_empty(self):
return self.items == []
def size(self):
return len(self.items)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def check_parentheses(inp):
stack = Stack()
for c in inp:
if c == ')' or c == ']' or c == '}' or c == '>':
if stack.is_empty() or stack.pop() != c:
return False
if c == '(':
stack.push(')')
if c == '[':
stack.push(']')
if c == '{':
stack.push('}')
if c == '<':
stack.push('>')
return stack.is_empty()
def to_base(num, base):
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
stack = Stack()
while num > 0:
stack.push(digits[num % base])
num //= base
res = ""
while not stack.is_empty():
res += stack.pop()
return res
def to_binary(num):
return to_base(num, 2)
def to_postfix(string):
tokens = string.split()
prec = {"+": 1, "-": 1, "*": 2, "/": 2, "(": 0, ")": 0}
operators = Stack()
res = []
for token in tokens:
if token == "(":
operators.push("(")
elif token == ")":
op = operators.pop()
while op != "(":
res.append(op)
op = operators.pop()
elif token in "+-*/":
while not operators.is_empty() and prec[token] <= prec[operators.peek()]:
res.append(operators.pop())
operators.push(token)
else:
res.append(token)
while not operators.is_empty():
res.append(operators.pop())
return " ".join(res)
def eval_postfix(string):
tokens = string.split()
def eval_op(f):
right = eval_stack.pop()
left = eval_stack.pop()
eval_stack.push(f(left, right))
eval_stack = Stack()
from operator import add, floordiv, mul, sub
for token in tokens:
if token == "+":
eval_op(add)
elif token == "-":
eval_op(sub)
elif token == "*":
eval_op(mul)
elif token == "/":
eval_op(floordiv)
else:
eval_stack.push(int(token))
return eval_stack.pop()
| {
"repo_name": "DanielFabian/DataStructuresAndAlgorithms",
"path": "Python/stack.py",
"copies": "1",
"size": "2424",
"license": "apache-2.0",
"hash": -8038622576193381000,
"line_mean": 21.2385321101,
"line_max": 85,
"alpha_frac": 0.475660066,
"autogenerated": false,
"ratio": 3.769828926905132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9744955602676841,
"avg_score": 0.00010667804565820353,
"num_lines": 109
} |
__author__ = 'Daniel'
# Responsible for generating UI in terminal
import os
from UIController import AbstractController
from utility import *
from question import Operation
from UserData import config
class InterruptInputException(Exception):
def __init__(self, msg):
self.msg = msg
class TerminalController(AbstractController):
game = None
def get_user_input_for_question(self, query, allowed_operations=[]):
"""
Converts the user input into a float to check whether the input is correct. If that is not possible, this
will keep retrying.
"""
try:
user_input = input(query)
if str.isalpha(user_input):
if user_input in map(lambda x: x.value, allowed_operations):
if TerminalController.QuestionAction.SAVE.value == user_input:
self.game.save_current_question()
print("Saved")
elif self.QuestionAction.QUIT.value == user_input:
raise InterruptInputException(TerminalController.QuestionAction(user_input))
else:
return float(user_input)
except ValueError as e:
print(e)
return self.get_user_input_for_question(query, allowed_operations)
def __init__(self, game):
self.game = game
def play_round_wrapper(self, question_generator):
rounds = get_integer_input("Enter number of rounds (leave empty for 10): ", 10)
score, total = self.play_round(rounds, question_generator)
print("Round completed. %i / %i" % (score, total))
self.game.user_score.save_db()
return
def play_round(self, number_of_rounds, question_generator):
score = 0
total = 0
os.system('cls' if os.name == 'nt' else 'clear')
for _ in range(number_of_rounds):
question_generator()
self.game.start_timing()
try:
user_input = self.get_user_input_for_question(self.game.current_question.query,
[TerminalController.QuestionAction.QUIT,
TerminalController.QuestionAction.SAVE])
except InterruptInputException as e:
if e.msg == TerminalController.QuestionAction.QUIT:
print("Returning to main menu.")
return score, total
elif e.msg == TerminalController.QuestionAction.SAVE:
self.game.save_current_question()
self.game.end_timing()
outcome, correct_value, time_taken = self.game.solve_question(user_input)
if config.getboolean("General", "SHOW_TIMER"):
print("Time taken:" + str(time_taken))
total += 1
if outcome:
score += 1
print("Correct!")
else:
print("Wrong, correct value is %i" % correct_value)
if config.getboolean("General", "PAUSE_AFTER_QUESTION"):
user_input = input("Enter any key to continue. S to save the question, Q to quit")
if str.isalpha(user_input):
if TerminalController.QuestionAction.SAVE.value == user_input:
self.game.save_current_question()
elif TerminalController.QuestionAction.QUIT.value == user_input:
return score, total
return score, total
def run(self):
while True:
self.menu_main()
def menu_main(self):
instruction = """
q - Quit
l - Set limit
s - Start game
h - History
t - Training
"""
print(instruction)
input_string = input("Please enter command:")
if input_string == self.Action.START.value:
self.play_round_wrapper(self.game.gen_next_question)
elif input_string == self.Action.QUIT.value:
exit()
elif input_string == self.Action.SET_LIMIT.value:
lower = int(input("Enter lower limit (inclusive): "))
upper = int(input("Enter upper limit (inclusive): "))
self.game.lower = lower
self.game.upper = upper
elif input_string == self.Action.HISTORY.value:
print("\n%s %s / %s" % ("Query".ljust(10), "R".ljust(4), "W".ljust(4)))
for q in self.game.history.values():
print("%s %s / %s" % (q.query.strip().ljust(10, ' '), str(len(q.correct_times)).ljust(4, ' '),
str(len(q.wrong_times)).ljust(4, ' ')))
elif input_string == self.Action.TRAINING.value:
self.menu_training()
else:
print("unknown command")
def menu_training(self):
instruction = """
1 - Squaring
2 - Multiplying two numbers having the same first n-1 digits and ones digit add up to ten
3 - Multiplying two numbers ending with 1
4 - Multiplication within a range
5 - Addition within a range
6 - Subtraction within a range
7 - Division within a range
8 - Multiplying two numbers having the first n-1 digits
0 - Go back
"""
print(instruction)
choice = get_integer_input("Enter a number: ")
if choice == 0:
return
elif choice == 1:
lower_bound = get_integer_input("Enter a lower bound (leave empty for 1): ", 1)
upper_bound = get_integer_input("Enter an upper bound (leave empty for 99): ", 99)
self.play_round_wrapper(lambda: self.game.gen_squares(lower_bound, upper_bound))
elif choice == 2:
upper_bound = get_integer_input("Enter an upper bound (leave empty for 9): ", 99)
self.play_round_wrapper(lambda: self.game.gen_ones_digit_sum_to_ten(upper_bound))
elif choice == 3:
upper_bound = get_integer_input("Enter an upper bound (leave empty for 9): ", 9)
self.play_round_wrapper(lambda: self.game.gen_numbers_ending_with_one(upper_bound))
elif choice == 4:
lower_bound = get_integer_input("Enter a lower bound (leave empty for 1): ", 1)
upper_bound = get_integer_input("Enter an upper bound (leave empty for 99): ", 99)
self.play_round_wrapper(
lambda: self.game.gen_next_question(lower_bound, upper_bound, Operation.MULTIPLICATION.value))
elif choice == 5:
lower_bound = get_integer_input("Enter a lower bound (leave empty for 100): ", 100)
upper_bound = get_integer_input("Enter an upper bound (leave empty for 999): ", 999)
self.play_round_wrapper(
lambda: self.game.gen_next_question(lower_bound, upper_bound, Operation.ADDITION.value))
elif choice == 6:
lower_bound = get_integer_input("Enter a lower bound (leave empty for 1): ", 1)
upper_bound = get_integer_input("Enter an upper bound (leave empty for 999): ", 999)
self.play_round_wrapper(
lambda: self.game.gen_next_question(lower_bound, upper_bound, Operation.SUBTRACTION.value))
elif choice == 7:
lower_bound = get_integer_input("Enter a lower bound (leave empty for 1): ", 1)
upper_bound = get_integer_input("Enter an upper bound (leave empty for 99): ", 99)
self.play_round_wrapper(
lambda: self.game.gen_next_question(lower_bound, upper_bound, Operation.DIVISION.value))
elif choice == 8:
upper_bound = get_integer_input("Enter an upper bound (leave empty for 9): ", 9)
self.play_round_wrapper(lambda: self.game.gen_tens_digit_are_the_same(upper_bound))
else:
self.menu_training()
| {
"repo_name": "daniellowtw/MentalMaths",
"path": "TerminalController.py",
"copies": "1",
"size": "7745",
"license": "mit",
"hash": -463979837707339460,
"line_mean": 44.5588235294,
"line_max": 113,
"alpha_frac": 0.5842479019,
"autogenerated": false,
"ratio": 4.11968085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.520392875296383,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
# want to create using class
# goal is to have option for dropout, etc.
import numpy as np
from sklearn.metrics import log_loss
class ann_2:
"""
An artificial neural network (2 layer) object
"""
def __init__(self, features, hl1_size, hl2_size, classes,
epochs=10, batch_size=100, rho=0.99, eta=1e-6,
reg_penalty=1, eta_init=0.1, p1=0.5, p2=0.5):
"""
return a ann_2 object with these architectures
:param features: number features input to network (input layer size)
:param hl1_size: number of nodes in hidden layer 1
:param hl2_size: number of nodes in hidden layer 2
:param classes: number of nodes in output layer
:return:
"""
self.features = features
self.hl1_size = hl1_size
self.hl2_size = hl2_size
self.classes = classes
self.epochs = epochs
self.batch_size = batch_size
self.rho = rho
self.eta = eta
self.reg_penalty = reg_penalty
self.eta_init = eta_init
self.p1 = p1
self.p2 = p2
self.w1 = np.random.randn(self.hl1_size * (self.features + 1)) * np.sqrt(2.0/(self.hl1_size * (self.features + 1)))
self.w2 = np.random.randn(self.hl2_size * (self.hl1_size + 1)) * np.sqrt(2.0/(self.hl2_size * (self.hl1_size + 1)))
self.w3 = np.random.randn(self.classes * (self.hl2_size + 1)) * np.sqrt(2.0/(self.classes * (self.hl2_size + 1)))
self.w1 = np.reshape(self.w1, (self.hl1_size, self.features + 1))
self.w2 = np.reshape(self.w2, (self.hl2_size, self.hl1_size + 1))
self.w3 = np.reshape(self.w3, (self.classes, self.hl2_size + 1))
# do these need to have their matrix shapes
# self.w1 = np.random.normal(0, self.eta_init, (self.hl1_size, self.features + 1))
# self.w2 = np.random.normal(0, self.eta_init, (self.hl2_size, self.hl1_size + 1))
# self.w3 = np.random.normal(0, self.eta_init, (self.classes, self.hl2_size + 1))
# initialise hyper parameters - method just to change the hyperparameters
def set_hyperparams(self, epochs=100, batch_size=100, rho=0.99, eta=1e-6, reg_penalty=1,
p1=0.5, p2=0.5):
self.epochs = epochs
self.batch_size = batch_size
self.rho = rho
self.eta = eta
self.reg_penalty = reg_penalty
self.p1 = p1
self.p2 = p2
# is there some way to do this using numpy or numba/numexpr to make it faster
@staticmethod
def f(z):
return z * (z > 0.)
@staticmethod
def f_prime(z):
return z > 0.
@staticmethod
def val_mat(labels):
m = len(labels)
k = len(np.unique(labels))
y_matrix = np.zeros((m, k))
for i in range(m):
y_matrix[i, labels[i]-1] = 1
return y_matrix
# won't include dropout at this stage
def forward_propagation(self, data):
# TODO make add_bias a function, pass in matrix to add bias to
bias1 = np.ones(data.T.shape[1]).reshape(1, data.T.shape[1])
data = np.concatenate((bias1, data.T), axis=0)
z2 = np.dot(self.w1, data)
# f will be our activation function - ReLU
a2 = self.f(z2)
# add the bias term
bias2 = np.ones(a2.shape[1]).reshape(1, a2.shape[1])
a2 = np.concatenate((bias2, a2), axis=0)
# to make up for dropout
a2 *= 0.5
z3 = np.dot(self.w2, a2)
a3 = self.f(z3)
bias3 = np.ones(a3.shape[1]).reshape(1, a3.shape[1])
a3 = np.concatenate((bias3, a3), axis=0)
# to make up for dropout
a3 *= 0.5
z4 = np.dot(self.w3, a3)
a4 = self.f(z4)
return {'z2': z2, 'a2': a2, 'z3': z3, 'a3': a3, 'z4': z4, 'a4': a4}
# should have some function for checking the data and labels are in the correct dimension
# this could be done at the train level
def forward_propagation_dropout(self, data):
bias1 = np.ones(data.T.shape[1]).reshape(1, data.T.shape[1])
data = np.concatenate((bias1, data.T), axis=0)
z2 = np.dot(self.w1, data)
# f will be our activation function - ReLU
a2 = self.f(z2)
# add the bias term
bias2 = np.ones(a2.shape[1]).reshape(1, a2.shape[1])
a2 = np.concatenate((bias2, a2), axis=0)
# first dropout mask here - first hidden layer
a2 *= np.random.binomial(n=1, p=self.p1, size=a2.shape)
z3 = np.dot(self.w2, a2)
a3 = self.f(z3)
bias3 = np.ones(a3.shape[1]).reshape(1, a3.shape[1])
a3 = np.concatenate((bias3, a3), axis=0)
# second dropout mask here - second hidden layer
a3 *= np.random.binomial(n=1, p=self.p2, size=a3.shape)
z4 = np.dot(self.w3, a3)
a4 = self.f(z4)
return {'z2': z2, 'a2': a2, 'z3': z3, 'a3': a3, 'z4': z4, 'a4': a4}
def j(self, data, label_matrix):
"""
:params labels should have shape (xxx,), i.e., vector with no additional shape info
:params data should have shape (features, length same as labels)
"""
m = label_matrix.shape[0]
f_pass = self.forward_propagation(data)
# f_pass['a4'] will be the predictions for given weights
cost = log_loss(label_matrix, f_pass['a4'].T)
regularisation = np.sum(self.w1[:, 1:]**2) + \
np.sum(self.w2[:, 1:]**2) + \
np.sum(self.w3[:, 1:]**2)
regularisation *= (self.reg_penalty / (2 * m))
return cost + regularisation
def gradients(self, data, label_matrix):
# should really create m and label matrix else where
# is it possible to have a function to initialise data and labels
m = label_matrix.shape[0]
# call to this function is where dropout would be called
f_pass = self.forward_propagation_dropout(data)
bias1 = np.ones(data.T.shape[1]).reshape(1, data.T.shape[1])
data = np.concatenate((bias1, data.T), axis=0)
d4 = f_pass['a4'].T - label_matrix
d3 = np.dot(d4, self.w3) * self.f_prime(f_pass['a3']).T
d3 = d3[:,1::]
d2 = np.dot(d3, self.w2) * self.f_prime(f_pass['a2']).T
d2 = d2[:,1::]
D1 = (np.dot(data, d2).T) / m
D2 = (np.dot(f_pass['a2'], d3).T) / m
D3 = (np.dot(f_pass['a3'], d4).T) / m
D1reg = np.zeros_like(D1)
D1reg[:,1::] = (self.reg_penalty/m)* self.w1[:, 1::]
D2reg = np.zeros_like(D2)
D2reg[:,1::] = (self.reg_penalty/m) * self.w2[:, 1::]
D3reg = np.zeros_like(D3)
D3reg[:,1::] = (self.reg_penalty/m) * self.w3[:, 1::]
D1 += D1reg
D2 += D2reg
D3 += D3reg
# should return all three and then update params separately
# will have to test this with gradient checking
# so turn off regularization for now
# should also change regularization to be L2
return {'D1': D1, 'D2': D2, 'D3': D3}
def train(self, data, labels):
""" train records cost and epoch """
m = len(labels)
# initialise cost
# need to create value matrix first
# otherwise small batch sizes could not get correct sized matrix
label_matrix = self.val_mat(labels)
cost = self.j(data, label_matrix)
# initialise accumulators
grad_accum = 0
update_accum = 0
iteration = 0
for i in range(self.epochs):
batch = np.random.choice(m, self.batch_size, replace=False)
# compute gradient
gt_sep = self.gradients(data[batch], label_matrix[batch])
gt = self.unroll_params(gt_sep['D1'],gt_sep['D2'],gt_sep['D3'])
# accumulate gradient for each Delta
grad_accum = (self.rho * grad_accum) + ((1 - self.rho) * gt**2)
# compute update for weights
update = -((np.sqrt(update_accum + self.eta) * gt) / (np.sqrt(grad_accum + self.eta)))
# accumulate update
update_accum = (self.rho * update_accum) + ((1 - self.rho)*update**2)
# apply update
nn_params = self.unroll_params(self.w1, self.w2, self.w3)
nn_params += update
# split nn_params back into w1, w2, w3
params = self.roll_params(nn_params)
self.w1 = params['w1']
self.w2 = params['w2']
self.w3 = params['w3']
# record cost + iterations
cost_iterate = self.j(data, label_matrix)
cost = np.append(cost, cost_iterate)
iteration = np.append(iteration, i)
print("completed %d epochs" % (self.epochs))
return{'cost': cost, 'epoch': iteration}
def train0(self, data, labels):
""" same function as train but without the recording of cost and iteration"""
m = len(labels)
label_matrix = self.val_mat(labels)
# initialise accumulators
grad_accum = 0
update_accum = 0
for i in range(self.epochs):
batch = np.random.choice(m, self.batch_size, replace=False)
# compute gradient
gt_sep = self.gradients(data[batch], label_matrix[batch])
gt = self.unroll_params(gt_sep['D1'],gt_sep['D2'],gt_sep['D3'])
# accumulate gradient for each Delta
grad_accum = (self.rho * grad_accum) + ((1 - self.rho) * gt**2)
# compute update for weights
update = -((np.sqrt(update_accum + self.eta) * gt) / (np.sqrt(grad_accum + self.eta)))
# accumulate update
update_accum = (self.rho * update_accum) + ((1 - self.rho)*update**2)
# apply update
nn_params = self.unroll_params(self.w1, self.w2, self.w3)
nn_params += update
# split nn_params back into w1, w2, w3
params = self.roll_params(nn_params)
self.w1 = params['w1']
self.w2 = params['w2']
self.w3 = params['w3']
# record cost + iterations
print("completed %d epochs" % (self.epochs))
return
@staticmethod
def unroll_params(w1, w2, w3):
w1_flat = w1.ravel(order='C')
w2_flat = w2.ravel(order='C')
w3_flat = w3.ravel(order='C')
return np.concatenate([w1_flat, w2_flat, w3_flat])
def roll_params(self, nn_params):
# separate weights w1, w2, w3
# TODO investigate way to save shapes of weights and use those
w2_start = (self.features + 1)*self.hl1_size
w2_end = w2_start + ((self.hl1_size + 1)*self.hl2_size)
w1 = nn_params[0:w2_start]
w2 = nn_params[w2_start:w2_end]
w3 = nn_params[w2_end:]
# reshape into matrices
w1 = w1.reshape((self.hl1_size, self.features + 1), order='C')
w2 = w2.reshape((self.hl2_size, self.hl1_size + 1), order='C')
w3 = w3.reshape((self.classes, self.hl2_size + 1), order='C')
return {'w1': w1, 'w2' : w2, 'w3': w3}
def predict(self, data):
f_pass = self.forward_propagation(data)
prediction = np.zeros(np.shape(f_pass['z4'])[1])
for i in range(len(prediction)):
prediction[i] = np.argmax(f_pass['a4'][:,i]) + 1
return prediction
def predict_prob(self, data):
f_pass = self.forward_propagation(data)
return f_pass['a4']
def accuracy(self, data, labels):
predictions = self.predict(data)
return np.mean(predictions == labels)
def cross_entropy(self, data, labels):
f_pass = self.forward_propagation(data)
label_matrix = self.val_mat(labels)
return log_loss(label_matrix, f_pass['a4'].T) | {
"repo_name": "dgea005/MLLearning",
"path": "ann/neuralnet.py",
"copies": "1",
"size": "11707",
"license": "mit",
"hash": -6721559817137364000,
"line_mean": 40.5177304965,
"line_max": 123,
"alpha_frac": 0.5609464423,
"autogenerated": false,
"ratio": 3.179521998913634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42404684412136334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel'
# PhraseGraph is a graph-representation of a currently-being-proccessed phrase
# Input is a single, pre-computed phrase of notes
# PhraseGraph builds a graph of all the possible combinations of finger-positions for the set of notes
from NoteMap import NoteMap
from LeftHand import LeftHand
lhand = LeftHand()
class Transition:
def __init__(self, start_note, end_note):
self._start_note = start_note
self._end_note = end_note
self._dist = start_note.distance(end_note)
self._combos = lhand.get_combo_by_distance(self._dist)
self._best_combo = None
def set_combos_by_finger(self, finger):
self._combos = lhand.get_combo_by_finger(finger)
self._best_combo = min(self._combos, key = lambda combo: combo[2])
def get_best_combo(self):
if self._best_combo is None:
self._best_combo = min(self._combos, key = lambda combo: combo[2])
return self._best_combo
def __str__(self):
cstr = "".join(["{0},".format(c) for c in self._combos])
#return "< %s -> %s %s : %s >" % (self._start_note,self._end_note,self._dist,cstr)
return "%s -> %s (%s)" % (self._start_note, self._end_note, self._best_combo)
class PhraseNode:
def __init__(self, fnote, transitions):
self._fnote = fnote
all_trans = [Transition(fnote,t) for t in transitions]
self._transitions = [tr for tr in all_trans if len(tr._combos) > 0] # clear out any transitions that are impossible
self._next = [] # a list of PhraseNodes
self._best_trans = None
self._finger = None
def seed(self, finger):
#print "seeding %s with %s" % (self,finger)
for tn in self._transitions:
tn.set_combos_by_finger(finger)
def set_best_trans(self, best_trans):
for n in self._next:
if n._fnote == best_trans._end_note:
self._best_trans = n
break
def __str__(self):
#trans = "".join(["%s" % t for t in self._transitions])
#return "%s" % trans
return "%s" % self._fnote
class PhraseGraph:
def __init__(self, phrase):
self._graph = []
self._note_map = NoteMap()._note_map
self.build_phrase_graph(phrase)
def build_phrase_graph(self, phrase):
prev = []
cnt = 0
for n in range(len(phrase) - 1):
fnotes = self._note_map[phrase[n]] # the possible FrettedNotes for a given note
trans = self._note_map[phrase[n+1]] # the possible FrettedNotes for the next note
tmp = []
for fn in fnotes:
pn = PhraseNode(fn,trans) # add a new PhraseNode for the FrettedNote for this note, with transitions to the next note
for i in prev:
self._graph[i]._next.append(pn)
print "i:%s" % pn._next
self._graph.append(pn)
tmp.append(cnt)
cnt += 1
print "T:%s" % tmp
prev = tmp
def find_best_path(self):
pnode = self._graph[0]
while pnode:
#best_trans = min(pnode._transitions, key = lambda trans: trans._dist[1])
best_trans = min(pnode._transitions, key = lambda trans: min(trans._combos, key = lambda combo: combo[2]))
best_combo = best_trans.get_best_combo()
pnode.set_best_trans(best_trans)
print "B:%s, %s" % (pnode,best_combo)
pnode = pnode._best_trans
if pnode: pnode.seed(int(best_combo[1]))
return
def __str__(self):
str = []
for node in self._graph:
str.append("{ %s } \n" % node)
return "".join(str) | {
"repo_name": "fatisar/guitar-gyro",
"path": "src/utils/PhraseGraph.py",
"copies": "1",
"size": "3783",
"license": "bsd-3-clause",
"hash": -4958319502079536000,
"line_mean": 32.7857142857,
"line_max": 135,
"alpha_frac": 0.5580227333,
"autogenerated": false,
"ratio": 3.5190697674418603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.457709250074186,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Puschmann'
from virtualisation.aggregation.genericaggregation import GenericAggregator
from virtualisation.misc.jsonobject import JSONObject
from virtualisation.misc.log import Log
from virtualisation.aggregation.paa.paacontrol import PaaControl
class PaaAggregator(GenericAggregator):
def __init__(self):
self.paaobjects = {}
def aggregate(self, data, sensordescription):
result = []
try:
paaobjs = self.paaobjects[sensordescription.uuid]
for f in paaobjs:
g = paaobjs[f].control(data[f])
if g:
r = JSONObject()
r.graph = g
r.sensorID = sensordescription.sensorID
r.propertyType = sensordescription.field
r.category = sensordescription.sensorType
result.append(r)
return result
except KeyError:
Log.e("Paa aggregation failed")
return None
def wrapper_added(self, sensordescription):
paaobjs = {}
for f in sensordescription.fields:
field = sensordescription.field[f]
if field.dataType in ['int', 'float']:
paaobjs[f] = PaaControl()
self.paaobjects[sensordescription.uuid] = paaobjs | {
"repo_name": "CityPulse/CP_Resourcemanagement",
"path": "virtualisation/aggregation/paa/paaaggregator.py",
"copies": "1",
"size": "1325",
"license": "mit",
"hash": 8105223370992445000,
"line_mean": 36.8857142857,
"line_max": 75,
"alpha_frac": 0.601509434,
"autogenerated": false,
"ratio": 4.476351351351352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5577860785351352,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Puschmann'
from virtualisation.events.genericeventwrapper import GenericEventWrapper
from virtualisation.events.eventdescription import EventDescription
from virtualisation.misc.jsonobject import JSONObject
"""
private String ceID = UUID.randomUUID().toString();
private String ceType = "";
private String ceName = "";
private long ceTime;
private Coordinate ceCoordinate;
private int ceLevel;
"""
class AarhusTrafficEventWrapper():
def __init__(self):
eventdescription = EventDescription()
eventdescription.namespace = "http://ict-citypulse.eu/"
eventdescription.graphName = "aarhus_road_traffic_event#"
eventdescription.author = "cityofaarhus"
eventdescription.timestamp.inField = "ceTime"
eventdescription.timestamp.format = "UNIX5"
# added after telco with Dan
eventdescription.uuid = "0815-0815-0815"
eventdescription.location = "POINT (56.45 10.11)"
# optional?
eventdescription.source = "TODO (Possibly messsage bus grounding?)"
eventdescription.eventType = "Aarhus_Road_Traffic_Event"
eventdescription.sourceType = "message_bus"
eventdescription.sourceFormat = "application/json"
eventdescription.information = "Traffic event for the City of Aarhus"
"""@Daniel P: Now I had time to look a bit closer into your code. I would suggest to make just an example
EventDescription to show Dan what we need and to test the annotation part. You will probably need a pseudo
EventGenerator for that.
Later, the ResourceManagement API will receive new EventDescriptions and instanciate EventWrapper
(or best case we can reuse one instance). There is no need to make the metadata file nor to instanciate an
EventWrapper for each REPORT_ID."""
self.eventDescription = eventdescription
def getEventDescription(self):
return self.eventDescription
if __name__ == "__main__":
atew = AarhusTrafficEventWrapper()
from virtualisation.annotation.genericannotation import GenericAnnotation
annotator = GenericAnnotation()
eventData = JSONObject()
eventData.ceID = 123456
eventData.ceType = "traffic"
eventData.ceName = "traffic jam"
eventData.ceTime = 1438591234000L
eventData.ceCoordinate = "(56.12 10.13)"
eventData.ceLevel = 1
print "Incoming event data", eventData.dumps()
print
g = annotator.annotateEvent(eventData, atew.getEventDescription())
print "Resulting graph", g.serialize(format='n3')
print
print atew.getEventDescription().dumps() | {
"repo_name": "CityPulse/CP_Resourcemanagement",
"path": "wrapper_dev/aarhus_traffic/aarhustrafficeventwrapper.py",
"copies": "1",
"size": "2630",
"license": "mit",
"hash": -2114672280528803600,
"line_mean": 37.1304347826,
"line_max": 114,
"alpha_frac": 0.7148288973,
"autogenerated": false,
"ratio": 4.039938556067589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00564302511472443,
"num_lines": 69
} |
__author__ = 'Daniel Puschmann'
from virtualisation.aggregation.genericaggregation import GenericAggregator
from virtualisation.aggregation.dft.dftcontrol import DftControl
from virtualisation.misc.jsonobject import JSONObject
from virtualisation.misc.log import Log
class DftAggregator(GenericAggregator):
def __init__(self, aggregation_configuration):
self.dftobjects = {}
def aggregate(self, data, sensordescription):
result = []
try:
dftobjs = self.dftobjects[sensordescription.uuid]
for f in dftobjs:
g = dftobjs[f].control(data[f])
if g:
r = JSONObject()
r.graph = g
r.sensorID = sensordescription.sensorID
r.propertyType = sensordescription.field[f].propertyName
r.category = sensordescription.sensorType
result.append(r)
return result
except KeyError:
Log.e("Dft aggregation failed")
return None
def wrapper_added(self, sensordescription):
dftobjs = {}
for f in sensordescription.fields:
field = sensordescription.field[f]
if field.dataType == "int" or field.dataType == "float":
dftobjs[f] = DftControl()
self.dftobjects[sensordescription.uuid] = dftobjs | {
"repo_name": "CityPulse/CP_Resourcemanagement",
"path": "virtualisation/aggregation/dft/dftaggregator.py",
"copies": "1",
"size": "1388",
"license": "mit",
"hash": 1551611465630846700,
"line_mean": 36.5405405405,
"line_max": 76,
"alpha_frac": 0.6102305476,
"autogenerated": false,
"ratio": 4.32398753894081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011498340445708867,
"num_lines": 37
} |
__author__ = 'Daniel Sanchez Quiros'
from GitUtils import *
from PathUtils import *
from ImageUtils import *
def test1(repodir, repoout, inpath, outpath):
cloneRepo(repodir,repoout)
print "getting files"
files = getFilesFromPath(inpath)
print "building image"
ImgUtils.reduceAndSave2Png(outpath, *files)
return files
def doIt(repodir, repoout, inpath, outpath):
if not os.path.exists(inpath):
cloneRepo(repodir, repoout)
basename = os.path.basename(inpath)
os.chdir(repoout)
commits = getCommits(inpath)
for framescount, commit in enumerate(commits):
checkout(commit)
files = getFilesFromPath(inpath)
ImgUtils.reduceAndSave2Png(os.path.join(outpath, "{name}{num:0>3}.png".format(name=basename, num=framescount)), *files)
#ImgUtils.renderVideo(outpath, "{name}".format(name=basename)+"{:0>3}.png", cheoutcount, os.path.join(outpath, basename+".avi"))
ImgUtils.renderVideo(outpath, "{name}".format(name=basename)+"%03d.png", os.path.join(outpath, basename+".avi"))
return outpath
if __name__ == "__main__":
a = doIt("https://github.com/nikolaypavlov/MLPNeuralNet.git", r"c:\tests",r"c:\tests\MLPNeuralNet", r"c:\tests")
if __name__ == "__main__":
print "Done"
| {
"repo_name": "danimanimal/GitVid",
"path": "src/DoIt.py",
"copies": "1",
"size": "1271",
"license": "mit",
"hash": 623110138732735400,
"line_mean": 27.2444444444,
"line_max": 132,
"alpha_frac": 0.6774193548,
"autogenerated": false,
"ratio": 3.070048309178744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9231911170267479,
"avg_score": 0.003111298742252807,
"num_lines": 45
} |
__author__ = 'Daniel Sanchez Quiros'
import hashlib
import cv2
import numpy as np
import math
import os.path as op
import os
from itertools import repeat
import subprocess
import png
class ImgUtils:
hexmin, hexmax = 0, int("ffffffff", 16)
rgbmin, rgbmax = 0, 255
def __init__(self, file):
self.file = file
@staticmethod
def hexlify(data):
return hashlib.md5(data).hexdigest()
@staticmethod
def hexlifylines(fname):
return [ImgUtils.hexlify(x) for x in open(fname).readlines()[:100]]
@staticmethod
def split8bits(data):
return [data[a:b] for a, b in [(0,8), (8,16), (16,24), (24, 32)]] if len(data) == 32 else None
@staticmethod
def parsenum(data):
return int(data, 16)
@classmethod
def reduceAlpha(cls, r, g, b, a):
return map(lambda x: int(math.floor(x*a/float(cls.rgbmax))), [r, g, b])
@classmethod
def normalize(cls, data):
return math.floor(((data/float(cls.hexmax))+cls.hexmin)*cls.rgbmax)+cls.rgbmin
@staticmethod
def repeatLst(elem, size):
ret = []
data = [x for x in repeat(elem, size)]
for e in data:
ret.extend(e)
return ret
@staticmethod
def toNumPyArray(lst):
return np.asarray(lst, dtype=np.uint8)
@staticmethod
def npArrayReplicated(lst):
return ImgUtils.toNumPyArray(ImgUtils.replicatedDataLst(lst))
@staticmethod
def toImage(array):
return cv2.imdecode(array, 0)
@staticmethod
def replicatedDataLst(lst, size=10):
return map(lambda x: ImgUtils.repeatLst(x, size), lst)
@staticmethod
def calcData(filename):
f = lambda x : int(ImgUtils.normalize(ImgUtils.parsenum(x)))
return map(lambda x: ImgUtils.reduceAlpha(*x), map(lambda x: map(f,x), map(ImgUtils.split8bits ,ImgUtils.hexlifylines(filename))))
@staticmethod
def refill(lst, sizeY, sizeX=10):
lst.extend([0,0,0]*sizeX for x in xrange(sizeY))
return lst
@staticmethod
def calcBunchFiles(replicatedSize=10, *args):
replicated = map(lambda x: ImgUtils.replicatedDataLst(ImgUtils.calcData(x), replicatedSize), args)
return map(lambda x: ImgUtils.refill(x, max(map(lambda y: len(y), replicated))- len(x), replicatedSize), replicated)
def doIt(self):
return self.toImage(self.npArrayReplicated(self.calcData(self.file)))
def doAndSave(self ,outdir=None):
filename, _ = op.splitext(op.basename(self.file))
extension = '.jpg'
if outdir:
outdir = op.join(outdir, filename+extension)
else:
outdir = op.basename(filename+extension)
cv2.imwrite(outdir, self.doIt())
@staticmethod
def data2Png(data, outdir):
with open(outdir, 'wb') as f:
#data = ImgUtils.replicatedDataLst(data,100)
if data:
w = png.Writer(len(data[0])/3, len(data), greyscale=False)
w.write(f, data)
return outdir
@staticmethod
def reduceAndSave2Png(outdir, *args):
return ImgUtils.data2Png(reduce(lambda x, y: [a+b for a,b in zip(x, y)] , ImgUtils.calcBunchFiles(1, *args)), outdir)
#@staticmethod
#def renderVideo(path, basename, framenum, filepath):
# img = cv2.imread(op.join(path, basename.format(1)))
# vw = cv2.VideoWriter(filepath,cv2.VideoWriter_fourcc(*'FMP4'), 20, (len(img[0]), len(img)), 1)
# for i in xrange(framenum):
# img = cv2.imread(op.join(path, basename.format(i+1)))
# vw.write(img)
# return vw
@staticmethod
def renderVideo(sourcepath, basename, filepath):
os.chdir(sourcepath)
command = 'ffmpeg.exe -f image2 -pattern_type sequence -start_number 0 -i "{bname}" '.format(bname=basename)+ ' -c:v libx264 -r 30 {}'.format(filepath)
#command = 'ffmpeg.exe -f image2 -framerate 12 -pattern_type sequence -start_number 1 -vcodec mpeg4 -i "{bname}" '.format(bname=basename) + filepath
print command
return subprocess.check_output(command)
def toPNG(self, outdir=None):
filename, _ = op.splitext(op.basename(self.file))
extension = '.png'
if outdir:
outdir = op.join(outdir, filename+extension)
else:
outdir = op.basename(filename+extension)
with open(outdir, 'wb') as f:
data = self.replicatedDataLst(self.calcData(),100)
if data:
w = png.Writer(len(data[0])/3, len(data), greyscale=False)
w.write(f, data)
return outdir if data else None
if __name__ == "__main__":
from pprint import pprint
import png
import PathUtils
ImgUtils.reduceAndSave2Png(r"C:\tests\test_django.png", *PathUtils.getFilesFromPath(r"C:\tests\django"))
| {
"repo_name": "danimanimal/GitVid",
"path": "src/ImageUtils.py",
"copies": "1",
"size": "4837",
"license": "mit",
"hash": 9146731679963581000,
"line_mean": 30.614379085,
"line_max": 159,
"alpha_frac": 0.6233202398,
"autogenerated": false,
"ratio": 3.4207920792079207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.454411231900792,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniel Sanchez Quiros'
import os
from cStringIO import StringIO
import subprocess
def redirect_output(f):
def ret(*args):
old_stout = os.sys.stdout
myStdout = StringIO()
os.sys.stdout = myStdout
error = f(*args)
stringret = myStdout.getvalue()
myStdout.close()
os.sys.stdout = old_stout
return stringret, error
return ret
def getVersion():
return subprocess.check_output("git --version").strip("\n")
def cloneRepo(dir, path = None, username = None, passwd = None):
if path:
os.chdir(path)
return subprocess.check_output("git clone {dir}".format(dir=dir))
def getCommits(path= None):
if path:
os.chdir(path)
ret = [x for x in subprocess.check_output("git log --pretty=%H").split("\n")]
#ret.reverse()
return ret[:-1]
def checkout(checkout_code, path = None):
if path:
os.chdir(path)
subprocess.check_output("git checkout master")
subprocess.check_output("git checkout {}".format(checkout_code))
#def getcheckout(revision):
# subprocess.check_output("git checkout ")
def countRevisions(path=None):
if path:
os.chdir(path)
return int(subprocess.check_output("git rev-list HEAD --count").strip("\n"))
if __name__ == "__main__":
from pprint import pprint
#p = "https://github.com/fourthbit/spheres.git"
#os.chdir("c:/tests")
#cloneRepo(p)
#os.chdir("c:/tests/spheres")
print countRevisions(r"C:\tests\MLPNeuralNet")
pprint(getCommits(r"C:\tests\MLPNeuralNet")) | {
"repo_name": "danimanimal/GitVid",
"path": "src/GitUtils.py",
"copies": "1",
"size": "1562",
"license": "mit",
"hash": 4924536412456714000,
"line_mean": 26.9107142857,
"line_max": 81,
"alpha_frac": 0.6395646607,
"autogenerated": false,
"ratio": 3.3956521739130436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476490752869932,
"avg_score": 0.011745216348622344,
"num_lines": 56
} |
__author__ = 'Daniel Sanchez Quiros'
import os.path as osp
import os
def getFilesFromPath(path, *args):
if osp.exists(path):
os.chdir(path)
ret = []
content = os.listdir(path)
ret.extend(map(lambda x: osp.join(path,x),filter(lambda x: osp.isfile(x), content)))
content = map(lambda x: osp.join(path,x),filter(lambda x: osp.isdir(x), content))
if content:
for e in map(getFilesFromPath, content):
ret.extend(e)
return filter(lambda x: osp.basename(x) not in args, ret)
else:
raise ValueError("Path does not exists!")
def getFromPathAndExtension(path, extension):
if osp.exists(path):
return filter(lambda x: osp.splitext(x)[1] == extension,os.listdir(path))
else:
raise ValueError("Path does not exists!")
if __name__ == "__main__":
from pprint import pprint
flst = getFilesFromPath(r"C:/tests/spheres")
pprint(flst)
print all(map (osp.isfile,flst))
print getFromPathAndExtension("c:/tests", ".png")
| {
"repo_name": "danimanimal/GitVid",
"path": "src/PathUtils.py",
"copies": "1",
"size": "1061",
"license": "mit",
"hash": 3258317772794642400,
"line_mean": 22.5777777778,
"line_max": 92,
"alpha_frac": 0.6182846371,
"autogenerated": false,
"ratio": 3.3364779874213837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44547626245213834,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
from googleapiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
from c_warning import show_warning
from c_gawrapper.c_api import GoogleAnalyticApi
def check_the_data(info_obj=None):
return True
def make_report(credentials, params, all_metrics):
try:
client_id = credentials["installed"]["client_id"]
client_secret = credentials["installed"]["client_secret"]
# Load other properties except Dimensions and Metrics
start_date = params['start-date']
end_date = params['end-date']
filters = params['filters']
# Load Metrics
dimensions = ''
metrics = 'ga:sessions,'
for p_property in all_metrics['/']:
if all_metrics["/"][p_property]["value"]:
if all_metrics["/"][p_property]["view"] in ["Metrics"]:
metrics += '%s,' % p_property
else:
dimensions += '%s,' % p_property
# Make instance of the google api class
api = GoogleAnalyticApi(client_id, client_secret)
# To delete
if len(dimensions) > 0:
if dimensions[-1] == ',': dimensions = dimensions[: -1]
if metrics[-1] == ',': metrics = metrics[: -1]
# Go go go
res = api.callAPI(start_date, end_date, metrics=metrics, dimensions=dimensions, filters=filters)
return res
except TypeError as error:
show_warning('There was an error in constructing your query : %s' % error)
print ('There was an error in constructing your query : %s' % error)
except HttpError as error:
show_warning('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))
print ('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
show_warning('The credentials have been revoked or expired, please re-run the application to re-authorize')
print ('The credentials have been revoked or expired, please re-run the application to re-authorize')
| {
"repo_name": "DaniilLeksin/gc",
"path": "c_gawrapper/c_wrapper.py",
"copies": "1",
"size": "2147",
"license": "apache-2.0",
"hash": 8869938000949228000,
"line_mean": 39.5094339623,
"line_max": 115,
"alpha_frac": 0.6199347927,
"autogenerated": false,
"ratio": 4.043314500941619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023391519505563496,
"num_lines": 53
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
import sys
import json
import pprint
from googleapiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
from c_gawrapper.c_api import GoogleAnalyticApi
def main(argv):
# TODO: check the valid input data
# TODO: handle IOError
# TODO: handle KeyError
try:
# Load the credentials
# json format is got from: https://developers.google.com/analytics/solutions/articles/hello-analytics-api
credentials = json.loads(open(argv[1], 'rb').read())
client_id = credentials["installed"]["client_id"]
client_secret = credentials["installed"]["client_secret"]
# Load other properties
properties = json.loads(open(argv[2], 'rb').read())
start_date = properties['start-date']
end_date = properties['end-date']
dimensions = properties['dimensions']
metrics = properties['metrics']
filters = properties['filters']
# Make instance of the google api class
api = GoogleAnalyticApi(client_id, client_secret)
# Go go go
res = api.callAPI(start_date, end_date, metrics=metrics, dimensions=dimensions, filters=filters)
pprint.pprint(res)
except TypeError as error:
print ('There was an error in constructing your query : %s' % error)
except HttpError as error:
print ('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the application to re-authorize')
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "DaniilLeksin/gc",
"path": "cGAwrapper.py",
"copies": "1",
"size": "1692",
"license": "apache-2.0",
"hash": 7522279961953859000,
"line_mean": 33.5306122449,
"line_max": 113,
"alpha_frac": 0.6607565012,
"autogenerated": false,
"ratio": 4.057553956834532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5218310458034532,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
def on_change_value(event, dict_credentials, dict_params, dict_api_properties):
"""
:param event:
:param dict_credentials:
:param dict_api_properties:
:return:
"""
current_property = event.GetProperty().GetName()
new_property_value = event.GetProperty().GetValue()
current_category = event.GetProperty().GetParent().GetName()
# Change the value of the property
if current_category in ["01 - Client Parameters"]:
dict_credentials["installed"][current_property] = new_property_value
elif current_category in ["02 - Required Parameters"]:
if current_property in ["start-date", "end-date"]:
# piano - remake
new_property_value = new_property_value.FormatDate().replace('/', '-')
dict_params[current_property] = new_property_value
else:
dict_api_properties["/"][current_property]["value"] = new_property_value
return dict_credentials, dict_params, dict_api_properties
def on_select_property(event, dict_api_properties):
current_property = event.GetProperty()
if current_property:
try:
# TODO: normal handler of the event "PGEVT_SELECTED"
#print('%s selected\n' % (event.GetProperty().GetName()))
#html_path = dict_api_properties["/"][event.GetProperty().GetName()]["html"]
#self.GetTopLevelParent().set_html_page("html/%s" % html_path)
pass
except KeyError as error:
print "No key found: %s" % str(error) # TODO: to log
else:
print('Nothing selected\n') # TODO: to log | {
"repo_name": "DaniilLeksin/gc",
"path": "c_gawrapper/c_properties.py",
"copies": "1",
"size": "1644",
"license": "apache-2.0",
"hash": -5253905766664349000,
"line_mean": 39.1219512195,
"line_max": 88,
"alpha_frac": 0.6289537713,
"autogenerated": false,
"ratio": 3.914285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022949432354168802,
"num_lines": 41
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
###########################################################################
##
##
##
##
###########################################################################
global user_metrics
global session_metrics
dict_credentials = {
"installed": {
"client_id": None,
"client_secret": None,
}
}
dict_params = {
"start-date": None,
"end-date": None,
"filters": None,
"sort": None,
"segment": None,
"sampling_level": None,
"start_index": None,
"max_results": None,
"output": None,
"fields": None
}
user_metrics = {
"ga:userType": {
"type": "bool",
"view": "Dimensions",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_usertype"],
"value": False},
"ga:sessionCount": {
"type": "bool",
"view": "Dimensions",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_sessioncount"],
"value": False},
"ga:daysSinceLastSession": {
"type": "bool",
"view": "Dimensions",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_dayssincelastsession"],
"value": False},
"ga:userDefinedValue": {
"type": "bool",
"view": "Dimensions",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_userdefinedvalue"],
"value": False},
"ga:users": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_users"],
"value": False},
"ga:newUsers": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=user&jump=ga_newusers"],
"value": False},
"ga:percentNewSessions": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", ""],
"value": False},
}
#
session_metrics = {
"ga:sessionDurationBucket": {
"type": "bool",
"view": "Dimensions",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_sessiondurationbucket"],
"value": False},
"ga:sessions": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_sessions"],
"value": False},
"ga:bounces": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_bounces"],
"value": False},
"ga:sessionDuration": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_sessionduration"],
"value": False},
"ga:hits": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_hits"],
"value": False},
"ga:bounceRate": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_bounces"],
"value": False},
"ga:avgSessionDuration": {
"type": "bool",
"view": "Metrics",
"html": ["Default/default.html", "https://developers.google.com/analytics/devguides/reporting/core/dimsmets#view=detail&group=session&jump=ga_avgsessionduration"],
"value": False},
}
traffic_source_metrics = {
'ga:adContent': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:medium': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:referralPath': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:socialNetwork': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:keyword': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:campaign': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:source': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:hasSocialSourceReferral': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:sourceMedium': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:fullReferrer': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:organicSearches': {
"html": "Default/default.html",
'value': False,
'view': 'Metrics'}
}
adwords_metrics = {
'ga:adTargetingType': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adSlot': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adwordsCustomerID': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adKeywordMatchType': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adMatchType': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adDestinationUrl': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adMatchedQuery': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:adFormat': {
"html": "Default/default.html",
'value': False,
'view': 'Dimensions'},
'ga:isTrueViewVideoAd': {
'html': 'isTrueViewVideoAdPage/isTrueViewVideoAdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adGroup': {
'html': 'adGroupPage/adGroupPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adwordsCampaignID': {
'html': 'adwordsCampaignIDPage/adwordsCampaignIDPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adDistributionNetwork': {
'html': 'adDistributionNetworkPage/adDistributionNetworkPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adwordsCriteriaID': {
'html': 'adwordsCriteriaIDPage/adwordsCriteriaIDPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adwordsCreativeID': {
'html': 'adwordsCreativeIDPage/adwordsCreativeIDPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adDisplayUrl': {
'html': 'adDisplayUrlPage/adDisplayUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adSlotPosition': {
'html': 'adSlotPositionPage/adSlotPositionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adPlacementDomain': {
'html': 'adPlacementDomainPage/adPlacementDomainPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adPlacementUrl': {
'html': 'adPlacementUrlPage/adPlacementUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adTargetingOption': {
'html': 'adTargetingOptionPage/adTargetingOptionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adwordsAdGroupID': {
'html': 'adwordsAdGroupIDPage/adwordsAdGroupIDPage.html',
'value': False,
'view': 'Dimensions'},
'ga:adClicks': {
'html': 'adClicksPage/adClicksPage.html',
'value': False,
'view': 'Metrics'},
'ga:impressions': {
'html': 'impressionsPage/impressionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:adCost': {
'html': 'adCostPage/adCostPage.html',
'value': False,
'view': 'Metrics'},
'ga:margin': {
'html': 'marginPage/marginPage.html',
'value': False,
'view': 'Metrics'},
'ga:CTR': {
'html': 'CTRPage/CTRPage.html',
'value': False,
'view': 'Metrics'},
'ga:CPM': {
'html': 'CPMPage/CPMPage.html',
'value': False,
'view': 'Metrics'},
'ga:CPC': {
'html': 'CPCPage/CPCPage.html',
'value': False,
'view': 'Metrics'},
'ga:costPerConversion': {
'html': 'costPerConversionPage/costPerConversionPage.html',
'value': False,
'view': 'Metrics'},
'ga:costPerTransaction': {
'html': 'costPerTransactionPage/costPerTransactionPage.html',
'value': False,
'view': 'Metrics'},
'ga:RPC': {
'html': 'RPCPage/RPCPage.html',
'value': False,
'view': 'Metrics'},
'ga:costPerGoalConversion': {
'html': 'costPerGoalConversionPage/costPerGoalConversionPage.html',
'value': False,
'view': 'Metrics'},
'ga:ROI': {
'html': 'ROIPage/ROIPage.html',
'value': False,
'view': 'Metrics'}
}
goal_conversions_metrics = {
'ga:goalCompletionLocation': {
'html': 'goalCompletionLocationPage/goalCompletionLocationPage.html',
'value': False,
'view': 'Dimensions'},
'ga:goalPreviousStep2': {
'html': 'goalPreviousStep2Page/goalPreviousStep2Page.html',
'value': False,
'view': 'Dimensions'},
'ga:goalPreviousStep3': {
'html': 'goalPreviousStep3Page/goalPreviousStep3Page.html',
'value': False,
'view': 'Dimensions'},
'ga:goalPreviousStep1': {
'html': 'goalPreviousStep1Page/goalPreviousStep1Page.html',
'value': False,
'view': 'Dimensions'},
'ga:goalCompletionsAll': {
'html': 'goalCompletionsAllPage/goalCompletionsAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXValue': {
'html': 'goalXXValuePage/goalXXValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:goalValuePerVisit': {
'html': 'goalValuePerVisitPage/goalValuePerVisitPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalConversionRateAll': {
'html': 'goalConversionRateAllPage/goalConversionRateAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXAbandonRate': {
'html': 'goalXXAbandonRatePage/goalXXAbandonRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:goalValueAll': {
'html': 'goalValueAllPage/goalValueAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXCompletions': {
'html': 'goalXXCompletionsPage/goalXXCompletionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXStarts': {
'html': 'goalXXStartsPage/goalXXStartsPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXAbandons': {
'html': 'goalXXAbandonsPage/goalXXAbandonsPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalValuePerSession': {
'html': 'goalValuePerSessionPage/goalValuePerSessionPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalAbandonRateAll': {
'html': 'goalAbandonRateAllPage/goalAbandonRateAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalAbandonsAll': {
'html': 'goalAbandonsAllPage/goalAbandonsAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:goalXXConversionRate': {
'html': 'goalXXConversionRatePage/goalXXConversionRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:goalStartsAll': {
'html': 'goalStartsAllPage/goalStartsAllPage.html',
'value': False,
'view': 'Metrics'}
}
platform_device_metrics = {
'ga:mobileDeviceBranding': {
'html': 'mobileDeviceBrandingPage/mobileDeviceBrandingPage.html',
'value': False,
'view': 'Dimensions'},
'ga:mobileDeviceInfo': {
'html': 'mobileDeviceInfoPage/mobileDeviceInfoPage.html',
'value': False,
'view': 'Dimensions'},
'ga:deviceCategory': {
'html': 'deviceCategoryPage/deviceCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:operatingSystem': {
'html': 'operatingSystemPage/operatingSystemPage.html',
'value': False,
'view': 'Dimensions'},
'ga:mobileInputSelector': {
'html': 'mobileInputSelectorPage/mobileInputSelectorPage.html',
'value': False,
'view': 'Dimensions'},
'ga:browser': {
'html': 'browserPage/browserPage.html',
'value': False,
'view': 'Dimensions'},
'ga:operatingSystemVersion': {
'html': 'operatingSystemVersionPage/operatingSystemVersionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:mobileDeviceMarketingName': {
'html': 'mobileDeviceMarketingNamePage/mobileDeviceMarketingNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:mobileDeviceModel': {
'html': 'mobileDeviceModelPage/mobileDeviceModelPage.html',
'value': False,
'view': 'Dimensions'},
'ga:browserVersion': {
'html': 'browserVersionPage/browserVersionPage.html',
'value': False,
'view': 'Dimensions'}
}
geo_network_metrics = {
'ga:city': {
'html': 'cityPage/cityPage.html',
'value': False,
'view': 'Dimensions'},
'ga:longitude': {
'html': 'longitudePage/longitudePage.html',
'value': False,
'view': 'Dimensions'},
'ga:country': {
'html': 'countryPage/countryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:region': {
'html': 'regionPage/regionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:networkDomain': {
'html': 'networkDomainPage/networkDomainPage.html',
'value': False,
'view': 'Dimensions'},
'ga:metro': {
'html': 'metroPage/metroPage.html',
'value': False,
'view': 'Dimensions'},
'ga:subContinent': {
'html': 'subContinentPage/subContinentPage.html',
'value': False,
'view': 'Dimensions'},
'ga:latitude': {
'html': 'latitudePage/latitudePage.html',
'value': False,
'view': 'Dimensions'},
'ga:continent': {
'html': 'continentPage/continentPage.html',
'value': False,
'view': 'Dimensions'},
'ga:networkLocation': {
'html': 'networkLocationPage/networkLocationPage.html',
'value': False,
'view': 'Dimensions'}
}
system_metrics = {
'ga:sourcePropertyDisplayName': {
'html': 'sourcePropertyDisplayNamePage/sourcePropertyDisplayNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:flashVersion': {
'html': 'flashVersionPage/flashVersionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:language': {
'html': 'languagePage/languagePage.html',
'value': False,
'view': 'Dimensions'},
'ga:sourcePropertyTrackingId': {
'html': 'sourcePropertyTrackingIdPage/sourcePropertyTrackingIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:javaEnabled': {
'html': 'javaEnabledPage/javaEnabledPage.html',
'value': False,
'view': 'Dimensions'},
'ga:screenColors': {
'html': 'screenColorsPage/screenColorsPage.html',
'value': False,
'view': 'Dimensions'},
'ga:screenResolution': {
'html': 'screenResolutionPage/screenResolutionPage.html',
'value': False,
'view': 'Dimensions'}
}
social_activities_metrics = {
'ga:socialActivityTimestamp': {
'html': 'socialActivityTimestampPage/socialActivityTimestampPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityAction': {
'html': 'socialActivityActionPage/socialActivityActionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityTagsSummary': {
'html': 'socialActivityTagsSummaryPage/socialActivityTagsSummaryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityUserProfileUrl': {
'html': 'socialActivityUserProfileUrlPage/socialActivityUserProfileUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityEndorsingUrl': {
'html': 'socialActivityEndorsingUrlPage/socialActivityEndorsingUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityNetworkAction': {
'html': 'socialActivityNetworkActionPage/socialActivityNetworkActionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityPost': {
'html': 'socialActivityPostPage/socialActivityPostPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityUserHandle': {
'html': 'socialActivityUserHandlePage/socialActivityUserHandlePage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityUserPhotoUrl': {
'html': 'socialActivityUserPhotoUrlPage/socialActivityUserPhotoUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityContentUrl': {
'html': 'socialActivityContentUrlPage/socialActivityContentUrlPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivityDisplayName': {
'html': 'socialActivityDisplayNamePage/socialActivityDisplayNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialActivities': {
'html': 'socialActivitiesPage/socialActivitiesPage.html',
'value': False,
'view': 'Metrics'}
}
page_tracking_metrics = {
'ga:pagePathLevel1': {
'html': 'pagePathLevel1Page/pagePathLevel1Page.html',
'value': False,
'view': 'Dimensions'},
'ga:pagePathLevel3': {
'html': 'pagePathLevel3Page/pagePathLevel3Page.html',
'value': False,
'view': 'Dimensions'},
'ga:pagePathLevel2': {
'html': 'pagePathLevel2Page/pagePathLevel2Page.html',
'value': False, 'view': 'Dimensions'},
'ga:pagePathLevel4': {
'html': 'pagePathLevel4Page/pagePathLevel4Page.html',
'value': False,
'view': 'Dimensions'},
'ga:hostname': {
'html': 'hostnamePage/hostnamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:pageTitle': {
'html': 'pageTitlePage/pageTitlePage.html',
'value': False,
'view': 'Dimensions'},
'ga:pagePath': {
'html': 'pagePathPage/pagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:nextPagePath': {
'html': 'nextPagePathPage/nextPagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:exitPagePath': {
'html': 'exitPagePathPage/exitPagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:secondPagePath': {
'html': 'secondPagePathPage/secondPagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:landingPagePath': {
'html': 'landingPagePathPage/landingPagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:pageDepth': {
'html': 'pageDepthPage/pageDepthPage.html',
'value': False,
'view': 'Dimensions'},
'ga:previousPagePath': {
'html': 'previousPagePathPage/previousPagePathPage.html',
'value': False,
'view': 'Dimensions'},
'ga:exits': {
'html': 'exitsPage/exitsPage.html',
'value': False,
'view': 'Metrics'},
'ga:avgTimeOnPage': {
'html': 'avgTimeOnPagePage/avgTimeOnPagePage.html',
'value': False,
'view': 'Metrics'},
'ga:uniquePageviews': {
'html': 'uniquePageviewsPage/uniquePageviewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:pageviewsPerSession': {
'html': 'pageviewsPerSessionPage/pageviewsPerSessionPage.html',
'value': False,
'view': 'Metrics'},
'ga:exitRate': {
'html': 'exitRatePage/exitRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:entranceRate': {
'html': 'entranceRatePage/entranceRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:timeOnPage': {
'html': 'timeOnPagePage/timeOnPagePage.html',
'value': False,
'view': 'Metrics'},
'ga:pageviews': {
'html': 'pageviewsPage/pageviewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:pageValue': {
'html': 'pageValuePage/pageValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:entrances': {
'html': 'entrancesPage/entrancesPage.html',
'value': False,
'view': 'Metrics'}
}
content_grouping_metrics = {
'ga:previousContentGroupXX': {
'html': 'previousContentGroupXXPage/previousContentGroupXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:contentGroupXX': {
'html': 'contentGroupXXPage/contentGroupXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:nextContentGroupXX': {
'html': 'nextContentGroupXXPage/nextContentGroupXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:contentGroupUniqueViewsXX': {
'html': 'contentGroupUniqueViewsXXPage/contentGroupUniqueViewsXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:landingContentGroupXX': {
'html': 'landingContentGroupXXPage/landingContentGroupXXPage.html',
'value': False,
'view': 'Metrics'}
}
internal_search_metrics = {
'ga:searchStartPage': {
'html': 'searchStartPagePage/searchStartPagePage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchKeywordRefinement': {
'html': 'searchKeywordRefinementPage/searchKeywordRefinementPage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchUsed': {
'html': 'searchUsedPage/searchUsedPage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchCategory': {
'html': 'searchCategoryPage/searchCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchKeyword': {
'html': 'searchKeywordPage/searchKeywordPage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchDestinationPage': {
'html': 'searchDestinationPagePage/searchDestinationPagePage.html',
'value': False,
'view': 'Dimensions'},
'ga:searchExitRate': {
'html': 'searchExitRatePage/searchExitRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:percentSearchRefinements': {
'html': 'percentSearchRefinementsPage/percentSearchRefinementsPage.html',
'value': False,
'view': 'Metrics'},
'ga:avgSearchDuration': {
'html': 'avgSearchDurationPage/avgSearchDurationPage.html',
'value': False,
'view': 'Metrics'},
'ga:avgSearchResultViews': {
'html': 'avgSearchResultViewsPage/avgSearchResultViewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:avgSearchDepth': {
'html': 'avgSearchDepthPage/avgSearchDepthPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchVisits': {
'html': 'searchVisitsPage/searchVisitsPage.html',
'value': False,
'view': 'Metrics'},
'ga:percentSessionsWithSearch': {
'html': 'percentSessionsWithSearchPage/percentSessionsWithSearchPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchDepth': {
'html': 'searchDepthPage/searchDepthPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchResultViews': {
'html': 'searchResultViewsPage/searchResultViewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchDuration': {
'html': 'searchDurationPage/searchDurationPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchRefinements': {
'html': 'searchRefinementsPage/searchRefinementsPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchUniques': {
'html': 'searchUniquesPage/searchUniquesPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchGoalXXConversionRate': {
'html': 'searchGoalXXConversionRatePage/searchGoalXXConversionRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:goalValueAllPerSearch': {
'html': 'goalValueAllPerSearchPage/goalValueAllPerSearchPage.html',
'value': False,
'view': 'Metrics'},
'ga:percentVisitsWithSearch': {
'html': 'percentVisitsWithSearchPage/percentVisitsWithSearchPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchSessions':
{'html': 'searchSessionsPage/searchSessionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchGoalConversionRateAll': {
'html': 'searchGoalConversionRateAllPage/searchGoalConversionRateAllPage.html',
'value': False,
'view': 'Metrics'},
'ga:searchExits': {
'html': 'searchExitsPage/searchExitsPage.html',
'value': False,
'view': 'Metrics'}
}
site_speed_metrics = {
'ga:domInteractiveTime': {
'html': 'domInteractiveTimePage/domInteractiveTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgDomainLookupTime': {
'html': 'avgDomainLookupTimePage/avgDomainLookupTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgPageLoadTime': {
'html': 'avgPageLoadTimePage/avgPageLoadTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:pageLoadTime': {
'html': 'pageLoadTimePage/pageLoadTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgDomInteractiveTime': {
'html': 'avgDomInteractiveTimePage/avgDomInteractiveTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgServerConnectionTime': {
'html': 'avgServerConnectionTimePage/avgServerConnectionTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgPageDownloadTime': {
'html': 'avgPageDownloadTimePage/avgPageDownloadTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:serverConnectionTime': {
'html': 'serverConnectionTimePage/serverConnectionTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:redirectionTime': {
'html': 'redirectionTimePage/redirectionTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgRedirectionTime': {
'html': 'avgRedirectionTimePage/avgRedirectionTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgDomContentLoadedTime': {
'html': 'avgDomContentLoadedTimePage/avgDomContentLoadedTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:domContentLoadedTime': {
'html': 'domContentLoadedTimePage/domContentLoadedTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:serverResponseTime': {
'html': 'serverResponseTimePage/serverResponseTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgServerResponseTime': {
'html': 'avgServerResponseTimePage/avgServerResponseTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:domainLookupTime': {
'html': 'domainLookupTimePage/domainLookupTimePage.html',
'value': False,
'view': 'Metrics'},
'ga:speedMetricsSample': {
'html': 'speedMetricsSamplePage/speedMetricsSamplePage.html',
'value': False,
'view': 'Metrics'},
'ga:domLatencyMetricsSample': {
'html': 'domLatencyMetricsSamplePage/domLatencyMetricsSamplePage.html',
'value': False,
'view': 'Metrics'},
'ga:pageLoadSample': {
'html': 'pageLoadSamplePage/pageLoadSamplePage.html',
'value': False,
'view': 'Metrics'},
'ga:pageDownloadTime': {
'html': 'pageDownloadTimePage/pageDownloadTimePage.html',
'value': False,
'view': 'Metrics'}
}
app_tracking_metrics = {
'ga:screenName': {
'html': 'screenNamePage/screenNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:screenDepth': {
'html': 'screenDepthPage/screenDepthPage.html',
'value': False,
'view': 'Dimensions'},
'ga:appName': {
'html': 'appNamePage/appNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:exitScreenName': {
'html': 'exitScreenNamePage/exitScreenNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:appId': {
'html': 'appIdPage/appIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:appVersion': {
'html': 'appVersionPage/appVersionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:appInstallerId': {
'html': 'appInstallerIdPage/appInstallerIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:landingScreenName': {
'html': 'landingScreenNamePage/landingScreenNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:screenviews': {
'html': 'screenviewsPage/screenviewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:uniqueScreenviews': {
'html': 'uniqueScreenviewsPage/uniqueScreenviewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:timeOnScreen': {
'html': 'timeOnScreenPage/timeOnScreenPage.html',
'value': False,
'view': 'Metrics'},
'ga:uniqueAppviews': {
'html': 'uniqueAppviewsPage/uniqueAppviewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:avgScreenviewDuration': {
'html': 'avgScreenviewDurationPage/avgScreenviewDurationPage.html',
'value': False,
'view': 'Metrics'},
'ga:screenviewsPerSession': {
'html': 'screenviewsPerSessionPage/screenviewsPerSessionPage.html',
'value': False,
'view': 'Metrics'}
}
event_tracking_metrics = {
'ga:eventLabel': {
'html': 'eventLabelPage/eventLabelPage.html',
'value': False,
'view': 'Dimensions'},
'ga:eventCategory': {
'html': 'eventCategoryPage/eventCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:eventAction': {
'html': 'eventActionPage/eventActionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:avgEventValue': {
'html': 'avgEventValuePage/avgEventValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:sessionsWithEvent': {
'html': 'sessionsWithEventPage/sessionsWithEventPage.html',
'value': False,
'view': 'Metrics'},
'ga:eventValue': {
'html': 'eventValuePage/eventValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:totalEvents': {
'html': 'totalEventsPage/totalEventsPage.html',
'value': False,
'view': 'Metrics'},
'ga:eventsPerSessionWithEvent': {
'html': 'eventsPerSessionWithEventPage/eventsPerSessionWithEventPage.html',
'value': False,
'view': 'Metrics'},
'ga:uniqueEvents': {
'html': 'uniqueEventsPage/uniqueEventsPage.html',
'value': False,
'view': 'Metrics'}
}
ecommerce_metrics = {
'ga:productName': {
'html': 'productNamePage/productNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:transactionId': {
'html': 'transactionIdPage/transactionIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:affiliation': {
'html': 'affiliationPage/affiliationPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productBrand': {
'html': 'productBrandPage/productBrandPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productCategoryHierarchy': {
'html': 'productCategoryHierarchyPage/productCategoryHierarchyPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productListPosition': {
'html': 'productListPositionPage/productListPositionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productCategoryLevelXX': {
'html': 'productCategoryLevelXXPage/productCategoryLevelXXPage.html',
'value': False, 'view': 'Dimensions'},
'ga:productSku': {
'html': 'productSkuPage/productSkuPage.html',
'value': False,
'view': 'Dimensions'},
'ga:shoppingStage': {
'html': 'shoppingStagePage/shoppingStagePage.html',
'value': False,
'view': 'Dimensions'},
'ga:orderCouponCode': {
'html': 'orderCouponCodePage/orderCouponCodePage.html',
'value': False,
'view': 'Dimensions'},
'ga:daysToTransaction': {
'html': 'daysToTransactionPage/daysToTransactionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productCouponCode': {
'html': 'productCouponCodePage/productCouponCodePage.html',
'value': False,
'view': 'Dimensions'},
'ga:productVariant': {
'html': 'productVariantPage/productVariantPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productListName': {
'html': 'productListNamePage/productListNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:sessionsToTransaction': {
'html': 'sessionsToTransactionPage/sessionsToTransactionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:internalPromotionName': {
'html': 'internalPromotionNamePage/internalPromotionNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:internalPromotionPosition': {
'html': 'internalPromotionPositionPage/internalPromotionPositionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:internalPromotionId': {
'html': 'internalPromotionIdPage/internalPromotionIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:currencyCode': {
'html': 'currencyCodePage/currencyCodePage.html',
'value': False,
'view': 'Dimensions'},
'ga:checkoutOptions': {
'html': 'checkoutOptionsPage/checkoutOptionsPage.html',
'value': False,
'view': 'Dimensions'},
'ga:internalPromotionCreative': {
'html': 'internalPromotionCreativePage/internalPromotionCreativePage.html',
'value': False,
'view': 'Dimensions'},
'ga:productCategory': {
'html': 'productCategoryPage/productCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:productRevenuePerPurchase': {
'html': 'productRevenuePerPurchasePage/productRevenuePerPurchasePage.html',
'value': False,
'view': 'Metrics'},
'ga:productRemovesFromCart': {
'html': 'productRemovesFromCartPage/productRemovesFromCartPage.html',
'value': False,
'view': 'Metrics'},
'ga:itemRevenue': {
'html': 'itemRevenuePage/itemRevenuePage.html',
'value': False,
'view': 'Metrics'},
'ga:transactionTax': {
'html': 'transactionTaxPage/transactionTaxPage.html',
'value': False,
'view': 'Metrics'},
'ga:internalPromotionClicks': {
'html': 'internalPromotionClicksPage/internalPromotionClicksPage.html',
'value': False,
'view': 'Metrics'},
'ga:productRefunds': {
'html': 'productRefundsPage/productRefundsPage.html',
'value': False,
'view': 'Metrics'},
'ga:totalRefunds': {
'html': 'totalRefundsPage/totalRefundsPage.html',
'value': False,
'view': 'Metrics'},
'ga:internalPromotionViews': {
'html': 'internalPromotionViewsPage/internalPromotionViewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:productListClicks': {
'html': 'productListClicksPage/productListClicksPage.html',
'value': False,
'view': 'Metrics'},
'ga:transactions': {
'html': 'transactionsPage/transactionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:quantityRefunded': {
'html': 'quantityRefundedPage/quantityRefundedPage.html',
'value': False,
'view': 'Metrics'},
'ga:productCheckouts': {
'html': 'productCheckoutsPage/productCheckoutsPage.html',
'value': False,
'view': 'Metrics'},
'ga:localTransactionTax': {
'html': 'localTransactionTaxPage/localTransactionTaxPage.html',
'value': False,
'view': 'Metrics'},
'ga:transactionsPerSession': {
'html': 'transactionsPerSessionPage/transactionsPerSessionPage.html',
'value': False,
'view': 'Metrics'},
'ga:productListCTR': {
'html': 'productListCTRPage/productListCTRPage.html',
'value': False,
'view': 'Metrics'},
'ga:transactionRevenuePerSession': {
'html': 'transactionRevenuePerSessionPage/transactionRevenuePerSessionPage.html',
'value': False,
'view': 'Metrics'},
'ga:itemQuantity': {
'html': 'itemQuantityPage/itemQuantityPage.html',
'value': False,
'view': 'Metrics'},
'ga:buyToDetailRate': {
'html': 'buyToDetailRatePage/buyToDetailRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:localTransactionRevenue': {
'html': 'localTransactionRevenuePage/localTransactionRevenuePage.html',
'value': False,
'view': 'Metrics'},
'ga:quantityRemovedFromCart': {
'html': 'quantityRemovedFromCartPage/quantityRemovedFromCartPage.html',
'value': False,
'view': 'Metrics'},
'ga:productListViews': {
'html': 'productListViewsPage/productListViewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:transactionShipping': {
'html': 'transactionShippingPage/transactionShippingPage.html',
'value': False,
'view': 'Metrics'},
'ga:productRefundAmount': {
'html': 'productRefundAmountPage/productRefundAmountPage.html',
'value': False,
'view': 'Metrics'},
'ga:revenuePerTransaction': {
'html': 'revenuePerTransactionPage/revenuePerTransactionPage.html',
'value': False,
'view': 'Metrics'},
'ga:internalPromotionCTR': {
'html': 'internalPromotionCTRPage/internalPromotionCTRPage.html',
'value': False,
'view': 'Metrics'},
'ga:revenuePerItem': {
'html': 'revenuePerItemPage/revenuePerItemPage.html',
'value': False,
'view': 'Metrics'},
'ga:localProductRefundAmount': {
'html': 'localProductRefundAmountPage/localProductRefundAmountPage.html',
'value': False,
'view': 'Metrics'},
'ga:uniquePurchases': {
'html': 'uniquePurchasesPage/uniquePurchasesPage.html',
'value': False,
'view': 'Metrics'},
'ga:localItemRevenue': {
'html': 'localItemRevenuePage/localItemRevenuePage.html',
'value': False,
'view': 'Metrics'},
'ga:refundAmount': {
'html': 'refundAmountPage/refundAmountPage.html',
'value': False,
'view': 'Metrics'},
'ga:quantityCheckedOut': {
'html': 'quantityCheckedOutPage/quantityCheckedOutPage.html',
'value': False,
'view': 'Metrics'},
'ga:localTransactionShipping': {
'html': 'localTransactionShippingPage/localTransactionShippingPage.html',
'value': False,
'view': 'Metrics'},
'ga:localRefundAmount': {
'html': 'localRefundAmountPage/localRefundAmountPage.html',
'value': False,
'view': 'Metrics'},
'ga:productAddsToCart': {
'html': 'productAddsToCartPage/productAddsToCartPage.html',
'value': False,
'view': 'Metrics'},
'ga:quantityAddedToCart': {
'html': 'quantityAddedToCartPage/quantityAddedToCartPage.html',
'value': False,
'view': 'Metrics'},
'ga:productDetailViews': {
'html': 'productDetailViewsPage/productDetailViewsPage.html',
'value': False,
'view': 'Metrics'},
'ga:cartToDetailRate': {
'html': 'cartToDetailRatePage/cartToDetailRatePage.html',
'value': False,
'view': 'Metrics'},
'ga:totalValue': {
'html': 'totalValuePage/totalValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:itemsPerPurchase': {
'html': 'itemsPerPurchasePage/itemsPerPurchasePage.html',
'value': False,
'view': 'Metrics'},
'ga:transactionRevenue': {
'html': 'transactionRevenuePage/transactionRevenuePage.html',
'value': False,
'view': 'Metrics'}
}
social_interaction_metrics = {
'ga:socialEngagementType': {
'html': 'socialEngagementTypePage/socialEngagementTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialInteractionNetworkAction': {
'html': 'socialInteractionNetworkActionPage/socialInteractionNetworkActionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialInteractionTarget': {
'html': 'socialInteractionTargetPage/socialInteractionTargetPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialInteractionAction': {
'html': 'socialInteractionActionPage/socialInteractionActionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialInteractionNetwork': {
'html': 'socialInteractionNetworkPage/socialInteractionNetworkPage.html',
'value': False,
'view': 'Dimensions'},
'ga:socialInteractionsPerSession': {
'html': 'socialInteractionsPerSessionPage/socialInteractionsPerSessionPage.html',
'value': False,
'view': 'Metrics'},
'ga:socialInteractions': {
'html': 'socialInteractionsPage/socialInteractionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:uniqueSocialInteractions': {
'html': 'uniqueSocialInteractionsPage/uniqueSocialInteractionsPage.html',
'value': False,
'view': 'Metrics'}
}
user_timing_metrics = {
'ga:userTimingVariable': {
'html': 'userTimingVariablePage/userTimingVariablePage.html',
'value': False,
'view': 'Dimensions'},
'ga:userTimingCategory': {
'html': 'userTimingCategoryPage/userTimingCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:userTimingLabel': {
'html': 'userTimingLabelPage/userTimingLabelPage.html',
'value': False,
'view': 'Dimensions'},
'ga:userTimingSample': {
'html': 'userTimingSamplePage/userTimingSamplePage.html',
'value': False,
'view': 'Metrics'},
'ga:avgUserTimingValue': {
'html': 'avgUserTimingValuePage/avgUserTimingValuePage.html',
'value': False,
'view': 'Metrics'},
'ga:userTimingValue': {
'html': 'userTimingValuePage/userTimingValuePage.html',
'value': False,
'view': 'Metrics'}
}
exception_metrics = {
'ga:exceptionsPerScreenview': {
'html': 'exceptionsPerScreenviewPage/exceptionsPerScreenviewPage.html',
'value': False,
'view': 'Dimensions'},
'ga:exceptionDescription': {
'html': 'exceptionDescriptionPage/exceptionDescriptionPage.html',
'value': False,
'view': 'Metrics'},
'ga:fatalExceptions': {
'html': 'fatalExceptionsPage/fatalExceptionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:exceptions': {
'html': 'exceptionsPage/exceptionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:fatalExceptionsPerScreenview': {
'html': 'fatalExceptionsPerScreenviewPage/fatalExceptionsPerScreenviewPage.html',
'value': False,
'view': 'Metrics'}
}
content_experiments_metrics = {
'ga:experimentVariant': {
'html': 'experimentVariantPage/experimentVariantPage.html',
'value': False,
'view': 'Dimensions'},
'ga:experimentId': {
'html': 'experimentIdPage/experimentIdPage.html',
'value': False,
'view': 'Dimensions'}
}
custom_variables_columns_metrics = {
'ga:dimensionXX': {
'html': 'dimensionXXPage/dimensionXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:customVarNameXX': {
'html': 'customVarNameXXPage/customVarNameXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:customVarValueXX': {
'html': 'customVarValueXXPage/customVarValueXXPage.html',
'value': False,
'view': 'Dimensions'},
'ga:metricXX': {
'html': 'metricXXPage/metricXXPage.html',
'value': False,
'view': 'Metrics'}
}
time_metrics = {
'ga:isoYear': {
'html': 'isoYearPage/isoYearPage.html',
'value': False,
'view': 'Dimensions'},
'ga:nthHour': {
'html': 'nthHourPage/nthHourPage.html',
'value': False,
'view': 'Dimensions'},
'ga:nthWeek': {
'html': 'nthWeekPage/nthWeekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dayOfWeek': {
'html': 'dayOfWeekPage/dayOfWeekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:nthMonth': {
'html': 'nthMonthPage/nthMonthPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dateHour': {
'html': 'dateHourPage/dateHourPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dayOfWeekName': {
'html': 'dayOfWeekNamePage/dayOfWeekNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:nthDay': {
'html': 'nthDayPage/nthDayPage.html',
'value': False,
'view': 'Dimensions'},
'ga:yearWeek': {
'html': 'yearWeekPage/yearWeekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:year': {
'html': 'yearPage/yearPage.html',
'value': False,
'view': 'Dimensions'},
'ga:isoWeek': {
'html': 'isoWeekPage/isoWeekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:month': {
'html': 'monthPage/monthPage.html',
'value': False,
'view': 'Dimensions'},
'ga:date': {
'html': 'datePage/datePage.html',
'value': False,
'view': 'Dimensions'},
'ga:day': {
'html': 'dayPage/dayPage.html',
'value': False,
'view': 'Dimensions'},
'ga:hour': {
'html': 'hourPage/hourPage.html',
'value': False,
'view': 'Dimensions'},
'ga:isoYearIsoWeek': {
'html': 'isoYearIsoWeekPage/isoYearIsoWeekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:minute': {
'html': 'minutePage/minutePage.html',
'value': False,
'view': 'Dimensions'},
'ga:nthMinute': {
'html': 'nthMinutePage/nthMinutePage.html',
'value': False,
'view': 'Dimensions'},
'ga:week': {
'html': 'weekPage/weekPage.html',
'value': False,
'view': 'Dimensions'},
'ga:yearMonth': {
'html': 'yearMonthPage/yearMonthPage.html',
'value': False,
'view': 'Dimensions'}
}
double_click_campaign_manager_metrics = {
'ga:dcmClickRenderingId': {
'html': 'dcmClickRenderingIdPage/dcmClickRenderingIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAdId': {
'html': 'dcmClickAdIdPage/dcmClickAdIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickSitePlacementId': {
'html': 'dcmClickSitePlacementIdPage/dcmClickSitePlacementIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightActivityId': {
'html': 'dcmFloodlightActivityIdPage/dcmFloodlightActivityIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventSiteId': {
'html': 'dcmLastEventSiteIdPage/dcmLastEventSiteIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAdId': {
'html': 'dcmLastEventAdIdPage/dcmLastEventAdIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventSitePlacement': {
'html': 'dcmLastEventSitePlacementPage/dcmLastEventSitePlacementPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAdvertiserId': {
'html': 'dcmLastEventAdvertiserIdPage/dcmLastEventAdvertiserIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickSite': {
'html': 'dcmClickSitePage/dcmClickSitePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCreativeVersion': {
'html': 'dcmClickCreativeVersionPage/dcmClickCreativeVersionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCampaignId': {
'html': 'dcmLastEventCampaignIdPage/dcmLastEventCampaignIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAdvertiser': {
'html': 'dcmLastEventAdvertiserPage/dcmLastEventAdvertiserPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAdTypeId': {
'html': 'dcmLastEventAdTypeIdPage/dcmLastEventAdTypeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightActivityAndGroup': {
'html': 'dcmFloodlightActivityAndGroupPage/dcmFloodlightActivityAndGroupPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCreativeType': {
'html': 'dcmLastEventCreativeTypePage/dcmLastEventCreativeTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAdType': {
'html': 'dcmClickAdTypePage/dcmClickAdTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCampaign': {
'html': 'dcmLastEventCampaignPage/dcmLastEventCampaignPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventSite': {
'html': 'dcmLastEventSitePage/dcmLastEventSitePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightSpotId': {
'html': 'dcmFloodlightSpotIdPage/dcmFloodlightSpotIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventSitePlacementId': {
'html': 'dcmLastEventSitePlacementIdPage/dcmLastEventSitePlacementIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCreativeTypeId': {
'html': 'dcmLastEventCreativeTypeIdPage/dcmLastEventCreativeTypeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickSpotId': {
'html': 'dcmClickSpotIdPage/dcmClickSpotIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCreative': {
'html': 'dcmClickCreativePage/dcmClickCreativePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCampaign': {
'html': 'dcmClickCampaignPage/dcmClickCampaignPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAdvertiser': {
'html': 'dcmClickAdvertiserPage/dcmClickAdvertiserPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightActivity': {
'html': 'dcmFloodlightActivityPage/dcmFloodlightActivityPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAd': {
'html': 'dcmLastEventAdPage/dcmLastEventAdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickSiteId': {
'html': 'dcmClickSiteIdPage/dcmClickSiteIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAd': {
'html': 'dcmClickAdPage/dcmClickAdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCreativeVersion': {
'html': 'dcmLastEventCreativeVersionPage/dcmLastEventCreativeVersionPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAdTypeId': {
'html': 'dcmClickAdTypeIdPage/dcmClickAdTypeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightActivityGroupId': {
'html': 'dcmFloodlightActivityGroupIdPage/dcmFloodlightActivityGroupIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightAdvertiserId': {
'html': 'dcmFloodlightAdvertiserIdPage/dcmFloodlightAdvertiserIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAttributionType': {
'html': 'dcmLastEventAttributionTypePage/dcmLastEventAttributionTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventAdType': {
'html': 'dcmLastEventAdTypePage/dcmLastEventAdTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCreativeId': {
'html': 'dcmLastEventCreativeIdPage/dcmLastEventCreativeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightActivityGroup': {
'html': 'dcmFloodlightActivityGroupPage/dcmFloodlightActivityGroupPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCreativeType': {
'html': 'dcmClickCreativeTypePage/dcmClickCreativeTypePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCampaignId': {
'html': 'dcmClickCampaignIdPage/dcmClickCampaignIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCreativeTypeId': {
'html': 'dcmClickCreativeTypeIdPage/dcmClickCreativeTypeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickAdvertiserId': {
'html': 'dcmClickAdvertiserIdPage/dcmClickAdvertiserIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickCreativeId': {
'html': 'dcmClickCreativeIdPage/dcmClickCreativeIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventSpotId': {
'html': 'dcmLastEventSpotIdPage/dcmLastEventSpotIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmClickSitePlacement': {
'html': 'dcmClickSitePlacementPage/dcmClickSitePlacementPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventCreative': {
'html': 'dcmLastEventCreativePage/dcmLastEventCreativePage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmLastEventRenderingId': {
'html': 'dcmLastEventRenderingIdPage/dcmLastEventRenderingIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:dcmFloodlightQuantity': {
'html': 'dcmFloodlightQuantityPage/dcmFloodlightQuantityPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmRPC': {
'html': 'dcmRPCPage/dcmRPCPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmCost': {
'html': 'dcmCostPage/dcmCostPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmClicks': {
'html': 'dcmClicksPage/dcmClicksPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmCTR': {
'html': 'dcmCTRPage/dcmCTRPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmMargin': {
'html': 'dcmMarginPage/dcmMarginPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmFloodlightRevenue': {
'html': 'dcmFloodlightRevenuePage/dcmFloodlightRevenuePage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmROI': {
'html': 'dcmROIPage/dcmROIPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmCPC': {
'html': 'dcmCPCPage/dcmCPCPage.html',
'value': False,
'view': 'Metrics'},
'ga:dcmImpressions': {
'html': 'dcmImpressionsPage/dcmImpressionsPage.html',
'value': False,
'view': 'Metrics'}
}
audience_metrics = {
'ga:userAgeBracket': {
'html': 'userAgeBracketPage/userAgeBracketPage.html',
'value': False,
'view': 'Dimensions'},
'ga:interestInMarketCategory': {
'html': 'interestInMarketCategoryPage/interestInMarketCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:userGender': {
'html': 'userGenderPage/userGenderPage.html',
'value': False,
'view': 'Dimensions'},
'ga:interestOtherCategory': {
'html': 'interestOtherCategoryPage/interestOtherCategoryPage.html',
'value': False,
'view': 'Dimensions'},
'ga:interestAffinityCategory': {
'html': 'interestAffinityCategoryPage/interestAffinityCategoryPage.html',
'value': False,
'view': 'Dimensions'}
}
adsense_metrics = {
'ga:adsenseAdUnitsViewed': {
'html': 'adsenseAdUnitsViewedPage/adsenseAdUnitsViewedPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseAdsClicks': {
'html': 'adsenseAdsClicksPage/adsenseAdsClicksPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsensePageImpressions': {
'html': 'adsensePageImpressionsPage/adsensePageImpressionsPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseCTR': {
'html': 'adsenseCTRPage/adsenseCTRPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseRevenue': {
'html': 'adsenseRevenuePage/adsenseRevenuePage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseExits': {
'html': 'adsenseExitsPage/adsenseExitsPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseAdsViewed': {
'html': 'adsenseAdsViewedPage/adsenseAdsViewedPage.html',
'value': False,
'view': 'Metrics'},
'ga:adsenseECPM': {
'html': 'adsenseECPMPage/adsenseECPMPage.html',
'value': False,
'view': 'Metrics'}
}
channel_grouping_metrics = {
'ga:channelGrouping': {
'html': 'channelGroupingPage/channelGroupingPage.html',
'value': False,
'view': 'Dimensions'}}
related_products_metrics = {
'ga:queryProductId': {
'html': 'queryProductIdPage/queryProductIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:queryProductVariation': {
'html': 'queryProductVariationPage/queryProductVariationPage.html',
'value': False,
'view': 'Dimensions'},
'ga:correlationModelId': {
'html': 'correlationModelIdPage/correlationModelIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:relatedProductName': {
'html': 'relatedProductNamePage/relatedProductNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:queryProductName': {
'html': 'queryProductNamePage/queryProductNamePage.html',
'value': False,
'view': 'Dimensions'},
'ga:relatedProductId': {
'html': 'relatedProductIdPage/relatedProductIdPage.html',
'value': False,
'view': 'Dimensions'},
'ga:relatedProductVariation': {
'html': 'relatedProductVariationPage/relatedProductVariationPage.html',
'value': False,
'view': 'Dimensions'},
'ga:relatedProductQuantity': {
'html': 'relatedProductQuantityPage/relatedProductQuantityPage.html',
'value': False,
'view': 'Metrics'},
'ga:correlationScore': {
'html': 'correlationScorePage/correlationScorePage.html',
'value': False,
'view': 'Metrics'},
'ga:queryProductQuantity': {
'html': 'queryProductQuantityPage/queryProductQuantityPage.html',
'value': False,
'view': 'Metrics'}
}
dict_dataTable_rows_v = {"v": ""}
dict_dataTable_rows_c = {"c": [dict_dataTable_rows_v]}
dict_dataTable_cols = {"id": "", "label": "", "type": ""}
dict_response = {
"kind": "",
"id": "",
"selfLink": "",
"containsSampledData": None,
"query": {
"start-date": "",
"end-date": "",
"ids": "",
"dimensions": [],
"metrics": [],
"samplingLevel": "",
"sort": [],
"filters": "",
"segment": "",
"start-index": None,
"max-results": None
},
"itemsPerPage": None,
"totalResults": None,
"previousLink": None,
"nextLink": None,
"profileInfo": {
"profileId": "",
"accountId": "",
"webPropertyId": "",
"internalWebPropertyId": "",
"profileName": "",
"tableId": ""
},
"columnHeaders": [],
"rows": [[]],
"dataTable": {
"cols": [],
"rows": []
},
"sampleSize": "",
"sampleSpace": "",
"totalsForAllResults": []
}
| {
"repo_name": "DaniilLeksin/gc",
"path": "m_gawrapper/m_dicts.py",
"copies": "1",
"size": "62132",
"license": "apache-2.0",
"hash": -5560640459287790000,
"line_mean": 34.4432401597,
"line_max": 174,
"alpha_frac": 0.5907101011,
"autogenerated": false,
"ratio": 3.6285697599719677,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9717408561217991,
"avg_score": 0.00037425997079534694,
"num_lines": 1753
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
# ##########################################################################
##
##
##
##
###########################################################################
import sys
import datetime
import wx
_ = wx.GetTranslation
import wx.propgrid as grid
from m_gawrapper.m_dicts import *
from c_gawrapper.c_properties import *
from ui_gawrapper.ui_filters import FiltersAdds
###########################################################################
## Class main
###########################################################################
class Properties(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.panel = panel = wx.Panel(self, wx.ID_ANY)
sz_main = wx.BoxSizer(wx.VERTICAL)
self.property_grid = property_grid = \
grid.PropertyGridManager(panel, style=grid.PG_SPLITTER_AUTO_CENTER | grid.PG_AUTO_SORT)
if not getattr(sys, '_PropGridEditorsRegistered', False):
property_grid.RegisterEditor(FiltersAdds)
sys._PropGridEditorsRegistered = True
# Set column width
self.property_grid.SetColumnProportion(1, 2)
# Add properties
# Create property page
property_grid.AddPage("Properties")
# Create dict for storing credentials
self.dict_credentials = dict_credentials
# Create first required group of properties - Client Parameters
property_grid.Append(grid.PropertyCategory("01 - Client Parameters"))
# Client ID: get from the api credentials
property_grid.Append(grid.StringProperty("client_id", value=""))
# Client secret: get from the api credentials. Keep it in secret (:
property_grid.Append(grid.StringProperty("client_secret", value=""))
# Dictionary to store parameters
self.dict_params = dict_params
# Category of the required parameters except Metrics and ids
# Metric are in individual category
property_grid.Append(grid.PropertyCategory("02 - Required Parameters"))
# Start date for fetching Analytics data
property_grid.Append(grid.StringProperty("start-date", value=datetime.date.today().strftime("%Y-%m-%d")))
# End date for fetching Analytics data
property_grid.Append(grid.StringProperty("end-date", value=datetime.date.today().strftime("%Y-%m-%d")))
# Update the start-date & end-date value little piano in the bushes 2
self.dict_params["start-date"] = datetime.date.today().strftime("%Y-%m-%d")
self.dict_params["end-date"] = datetime.date.today().strftime("%Y-%m-%d")
# Not required property category
property_grid.Append(grid.PropertyCategory("03 - Not Required Parameters"))
# A list of comma-separated dimensions and metrics indicating the sorting order and
# sorting direction for the returned data
property_grid.Append(grid.StringProperty("sort", value=wx.EmptyString))
# Dimension or metric filters that restrict the data returned for your request
property_grid.Append(grid.LongStringProperty("filters"))
property_grid.SetPropertyEditor("filters", "FiltersAdds")
# Segments the data returned for your request
property_grid.Append(grid.StringProperty("segment", value=wx.EmptyString))
# The desired sampling level. Allowed Values:
# DEFAULT — Returns response with a sample size that balances speed and accuracy.
# FASTER — Returns a fast response with a smaller sample size.
# HIGHER_PRECISION — Returns a more accurate response using a large sample size,
# but this may result in the response being slower.
property_grid.Append(grid.EnumProperty("samplingLevel", "samplingLevel",
['DEFAULT', 'FASTER', 'HIGHER_PRECISION'], [10, 11, 12], 0))
# The first row of data to retrieve
property_grid.Append(grid.StringProperty("start-index", value=wx.EmptyString))
# The maximum number of rows to include in the response
property_grid.Append(grid.StringProperty("max-results", value=wx.EmptyString))
# Format of the outputs
property_grid.Append(grid.EnumProperty("output", "output",
['json', 'dataTable'], [10, 11], 0))
# Specifies which fields to return in a partial response.
property_grid.Append(grid.StringProperty("fields", value=wx.EmptyString))
# Returns the response in a human-readable format
property_grid.Append(grid.BoolProperty("prettyPrint", value=True))
property_grid.SetPropertyAttribute("prettyPrint", "UseCheckbox", True)
# To enforce per-user quotas from a server-side application even in cases when the user's IP address is unknown.
property_grid.Append(grid.StringProperty("quotaUser", value=wx.EmptyString))
# Create big vac of metrics for work
self.dict_api_properties = {"/": {}}
# Create user dimensions and metrics category
property_grid.Append(grid.PropertyCategory("04 - User"))
# Create vac of the user dimensions and metrics
# Vac format: ga:metrics/dimension: {
# "type": type_of_property,
# "view": dim/met,
# "html": path_to_html_info_page,
# "value": True/False}
# Create properties for user metrics
self._create_property(user_metrics)
self.dict_api_properties["/"].update(user_metrics)
# Create property category for session dimensions & metrics
property_grid.Append(grid.PropertyCategory("05 - Session"))
# Vac format: ga:metrics/dimension
# Create properties for session metrics
self._create_property(session_metrics)
self.dict_api_properties["/"].update(session_metrics)
# Create property category for Traffic Sources dimensions & metrics
property_grid.Append(grid.PropertyCategory("06 - Traffic Sources"))
# Create vac for Traffic sources
# Create properties for Traffic sources metrics
self._create_property(traffic_source_metrics)
self.dict_api_properties["/"].update(traffic_source_metrics)
# Create property category for Adwords dimensions & metrics
property_grid.Append(grid.PropertyCategory("07 - Adwords"))
# Create vac for Traffic sources
self._create_property(adwords_metrics)
self.dict_api_properties["/"].update(adwords_metrics)
# Create property category for Goal Conversion dimensions & metrics
property_grid.Append(grid.PropertyCategory("08 - Goal Conversion"))
# Create vac for Goal Conversion
self._create_property(goal_conversions_metrics)
self.dict_api_properties["/"].update(goal_conversions_metrics)
# Create property category for Platform & device dimensions & metrics
property_grid.Append(grid.PropertyCategory("09 - Platform & device"))
# Create vac for Platform & device
self._create_property(platform_device_metrics)
self.dict_api_properties["/"].update(platform_device_metrics)
# Create property category for Geo Network dimensions & metrics
property_grid.Append(grid.PropertyCategory("10 - Geo Network"))
# Create vac for Geo Network
self._create_property(geo_network_metrics)
self.dict_api_properties["/"].update(geo_network_metrics)
# Create property category for System dimensions & metrics
property_grid.Append(grid.PropertyCategory("11 - System"))
# Create vac for System
self._create_property(system_metrics)
self.dict_api_properties["/"].update(system_metrics)
# Create property category for Social activities dimensions & metrics
property_grid.Append(grid.PropertyCategory("12 - Social activities"))
# Create vac for Social activities
self._create_property(social_activities_metrics)
self.dict_api_properties["/"].update(social_interaction_metrics)
# Create property category for Page Tracking dimensions & metrics
property_grid.Append(grid.PropertyCategory("13 - Page tracking"))
# Create vac for Page Tracking
self._create_property(page_tracking_metrics)
self.dict_api_properties["/"].update(page_tracking_metrics)
# Create property category for Content grouping dimensions & metrics
property_grid.Append(grid.PropertyCategory("14 - Content grouping"))
# Create vac for Content grouping
self._create_property(content_grouping_metrics)
self.dict_api_properties["/"].update(content_grouping_metrics)
# Create property category for Internal search dimensions & metrics
property_grid.Append(grid.PropertyCategory("15 - Internal search"))
# Create vac for Internal search
self._create_property(internal_search_metrics)
self.dict_api_properties["/"].update(internal_search_metrics)
# Create property category for Site speed dimensions & metrics
property_grid.Append(grid.PropertyCategory("16 - Site speed"))
# Create vac for Site speed
self._create_property(site_speed_metrics)
self.dict_api_properties["/"].update(site_speed_metrics)
# Create property category for App tracking dimensions & metrics
property_grid.Append(grid.PropertyCategory("17 - App tracking"))
# Create vac for App tracking
self._create_property(app_tracking_metrics)
self.dict_api_properties["/"].update(app_tracking_metrics)
# Create property category for Event tracking dimensions & metrics
property_grid.Append(grid.PropertyCategory("18 - Event tracking"))
# Create vac for Event tracking
self._create_property(event_tracking_metrics)
self.dict_api_properties["/"].update(event_tracking_metrics)
# Create property category for Ecommerce dimensions & metrics
property_grid.Append(grid.PropertyCategory("19 - Ecommerce"))
# Create vac for Ecommerce
self._create_property(ecommerce_metrics)
self.dict_api_properties["/"].update(ecommerce_metrics)
# Create property category for Social interaction dimensions & metrics
property_grid.Append(grid.PropertyCategory("20 - Social interaction"))
# Create vac for Social interaction
self._create_property(social_interaction_metrics)
self.dict_api_properties["/"].update(social_interaction_metrics)
# Create property category for User timing dimensions & metrics
property_grid.Append(grid.PropertyCategory("21 - User timing"))
# Create vac for User timing
self._create_property(user_timing_metrics)
self.dict_api_properties["/"].update(user_timing_metrics)
# Create property category for Exceptions dimensions & metrics
property_grid.Append(grid.PropertyCategory("22 - Exceptions"))
# Create vac for Exceptions
self._create_property(exception_metrics)
self.dict_api_properties["/"].update(exception_metrics)
# Create property category for Content experiments dimensions & metrics
property_grid.Append(grid.PropertyCategory("23 - Content experiments"))
# Create vac for Content experiments
self._create_property(content_experiments_metrics)
self.dict_api_properties["/"].update(content_experiments_metrics)
# Create property category for Custom variables dimensions & metrics
property_grid.Append(grid.PropertyCategory("24 - Custom variables"))
# Create vac for Custom variables
self._create_property(custom_variables_columns_metrics)
self.dict_api_properties["/"].update(custom_variables_columns_metrics)
# Create property category for Time dimensions & metrics
property_grid.Append(grid.PropertyCategory("25 - Time"))
# Create vac for Time
self._create_property(time_metrics)
self.dict_api_properties["/"].update(time_metrics)
# Create property category for Double click manager dimensions & metrics
property_grid.Append(grid.PropertyCategory("25 - DoubleClick campaign manager"))
# Create vac for Double click manager
self._create_property(double_click_campaign_manager_metrics)
self.dict_api_properties["/"].update(double_click_campaign_manager_metrics)
# Create property category for Audience dimensions & metrics
property_grid.Append(grid.PropertyCategory("26 - Audience"))
# Create vac for Audience
self._create_property(audience_metrics)
self.dict_api_properties["/"].update(audience_metrics)
# Create property category for Adsense dimensions & metrics
property_grid.Append(grid.PropertyCategory("27 - Adsense"))
# Create vac for Adsense
self._create_property(adsense_metrics)
self.dict_api_properties["/"].update(adsense_metrics)
# Create property category for Channel grouping dimensions & metrics
property_grid.Append(grid.PropertyCategory("28 - Channel grouping"))
# Create vac for Channel grouping
self._create_property(channel_grouping_metrics)
self.dict_api_properties["/"].update(channel_grouping_metrics)
# Create property category for Related products dimensions & metrics
property_grid.Append(grid.PropertyCategory("29 - Related products"))
# Create vac for Related products
self._create_property(related_products_metrics)
self.dict_api_properties["/"].update(related_products_metrics)
# Embedding grid panel to the main sizer
sz_main.Add(property_grid, 1, wx.EXPAND)
panel.SetSizer(sz_main)
sz_main.SetSizeHints(panel)
# Fit the sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
# Little piano in the bushes, to expand only useful/most popular property categories
# Collapse all categories
property_grid.CollapseAll()
# expand categories that are needed
property_grid.Expand("01 - Client Parameters")
property_grid.Expand("02 - Required Parameters")
# Binders, to connect events and handlers
property_grid.Bind(grid.EVT_PG_CHANGED, self.on_property_grid_changed)
property_grid.Bind(grid.EVT_PG_SELECTED, self.on_property_grid_select)
def _create_property(self, metrics):
"""
Put data to the grid
:param metrics: vac of properties
:return:
"""
for ga_property in metrics:
self.property_grid.Append(grid.BoolProperty(ga_property, value=False))
self.property_grid.SetPropertyAttribute(ga_property, "UseCheckbox", True)
if metrics[ga_property]["view"] == "Metrics":
# TODO: create the background colour separation between dimensions and metrics
pass
else:
# TODO: create the background colour separation between dimensions and metrics
pass
def on_property_grid_select(self, event):
"""
hook the event on select any property in the grid
:param event:
"""
on_select_property(event, self.dict_api_properties)
def on_property_grid_changed(self, event):
"""
hook the event on making changes in any property in the grid
:param event:
"""
# Call controller to update data in dictionaries
self.dict_credentials, self.dict_params, self.dict_api_properties = \
on_change_value(event, self.dict_credentials, self.dict_params, self.dict_api_properties)
# TODO: change status bar
# self.GetTopLevelParent().sb_status.SetValue("changed:%s" % event.GetProperty().GetName(), 0)
# self.GetTopLevelParent().sb_status.refresh()
| {
"repo_name": "DaniilLeksin/gc",
"path": "ui_gawrapper/ui_properties.py",
"copies": "1",
"size": "16111",
"license": "apache-2.0",
"hash": 9005729983777780000,
"line_mean": 47.5090361446,
"line_max": 120,
"alpha_frac": 0.6566283763,
"autogenerated": false,
"ratio": 4.4885730211817165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015901848827211751,
"num_lines": 332
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
###########################################################################
##
##
##
##
###########################################################################
import wx
import wx.html
from ui_gawrapper.ui_browser import Browser
from ui_gawrapper.ui_response import Response
from ui_properties import Properties
from c_gawrapper.c_wrapper import *
import json
from m_gawrapper.m_dicts import dict_response
###########################################################################
## Class main
###########################################################################
class Main(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=u"GA: wrapper", pos=wx.DefaultPosition,
size=wx.Size(964, 750),
style=wx.DEFAULT_FRAME_STYLE | wx.MAXIMIZE |wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
sz_main = wx.BoxSizer(wx.VERTICAL)
sz_top = wx.BoxSizer(wx.HORIZONTAL)
sz_properties = wx.BoxSizer(wx.HORIZONTAL)
self.nb_properties = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
self.p_properties = Properties(self.nb_properties)
self.nb_properties.AddPage(self.p_properties, u"Properties", True)
self.p_response = Response(self.nb_properties)
self.nb_properties.AddPage(self.p_response, u"Response", False)
sz_properties.Add(self.nb_properties, 1, wx.ALL | wx.EXPAND, 5)
self.hw_info = Browser(self)
sz_properties.Add(self.hw_info, 1, wx.ALL | wx.EXPAND, 5)
sz_top.Add(sz_properties, 1, wx.EXPAND, 5)
sz_main.Add(sz_top, 1, wx.EXPAND, 5)
sz_bottom = wx.BoxSizer(wx.VERTICAL)
sz_buttons = wx.BoxSizer(wx.HORIZONTAL)
self.bt_getData = wx.Button(self, wx.ID_ANY, u"Make report", wx.DefaultPosition, wx.DefaultSize, 0)
sz_buttons.Add(self.bt_getData, 1, wx.ALL, 5)
self.bt_clearSelection = wx.Button(self, wx.ID_ANY, u"Clear selections", wx.DefaultPosition, wx.DefaultSize, 0)
sz_buttons.Add(self.bt_clearSelection, 1, wx.ALL, 5)
self.bt_clearSelection.Disable()
self.bt_save2json = wx.Button(self, wx.ID_ANY, u"Save to json format", wx.DefaultPosition, wx.DefaultSize, 0)
sz_buttons.Add(self.bt_save2json, 1, wx.ALL, 5)
self.bt_save2wrapper = wx.Button(self, wx.ID_ANY, u"Save to wrapper format", wx.DefaultPosition, wx.DefaultSize, 0)
sz_buttons.Add(self.bt_save2wrapper, 1, wx.ALL, 5)
self.bt_save2wrapper.Disable()
self.bt_exit = wx.Button(self, wx.ID_ANY, u"Exit", wx.DefaultPosition, wx.DefaultSize, 0)
self.bt_exit.SetToolTipString(u"Go out from my wrapper")
sz_buttons.Add(self.bt_exit, 0, wx.ALL, 5)
sz_bottom.Add(sz_buttons, 0, wx.EXPAND, 5)
sz_logs = wx.BoxSizer(wx.HORIZONTAL)
# self.tc_logs = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE)
# self.tc_logs.SetMinSize(wx.Size(-1, 150))
#
# sz_logs.Add(self.tc_logs, 1, wx.ALL, 5)
sz_bottom.Add(sz_logs, 0, wx.EXPAND, 5)
sz_main.Add(sz_bottom, 0, wx.EXPAND, 5)
self.SetSizer(sz_main)
self.Layout()
self.sb_status = self.CreateStatusBar(3, wx.ST_SIZEGRIP, wx.ID_ANY)
self.Centre(wx.BOTH)
# Connect Events
self.bt_getData.Bind(wx.EVT_BUTTON, self.on_bt_get_data)
self.bt_clearSelection.Bind(wx.EVT_BUTTON, self.on_bt_clear_selection)
self.bt_save2json.Bind(wx.EVT_BUTTON, self.on_bt_save2json)
self.bt_save2wrapper.Bind(wx.EVT_BUTTON, self.on_bt_save2wrapper)
self.bt_exit.Bind(wx.EVT_BUTTON, self.on_bt_exit)
self.response = None
def __del__(self):
pass
# Virtual event handlers, override them in your derived class
def on_bt_get_data(self, event):
# Check the correctness of info
if check_the_data():
response = make_report(self.p_properties.dict_credentials, # credential dictionary
self.p_properties.dict_params, # Other param aka start date etc...
self.p_properties.dict_api_properties) # Dimensions & Metrics
# Work with response
self.p_response.SetFocus()
self.add_response(response)
else:
# TODO: Hand the error actions
pass
def on_bt_clear_selection(self, event):
event.Skip()
def on_bt_save2json(self, event):
if self.response:
import time
open("%d_response.json" % round(time.time()), 'wb').write(json.dumps(self.response))
show_warning(message="File xxxx_response.json - saved", title="Save file:")
def on_bt_save2wrapper(self, event):
event.Skip()
def on_bt_exit(self, event):
import sys
sys.exit()
def set_html_page(self, html_path):
try:
if self.hw_info.cb_online_help.IsChecked():
self.hw_info.location = html_path[1]
self.hw_info.on_location_enter()
else:
self.hw_info.web_view.SetPage(open(html_path[0], 'rb').read())
except Exception as error:
print str(error) # TODO: put in log
def save_response(self, response):
self.response = response
def add_response(self, response):
self.save_response(response)
self.p_response.add(response)
| {
"repo_name": "DaniilLeksin/gc",
"path": "ui_gawrapper/ui_wrapper.py",
"copies": "1",
"size": "5653",
"license": "apache-2.0",
"hash": 8158354156452454000,
"line_mean": 34.5534591195,
"line_max": 123,
"alpha_frac": 0.5784539183,
"autogenerated": false,
"ratio": 3.487353485502776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4565807403802776,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniil Leksin'
# -*- coding: utf-8 -*-
# ##########################################################################
##
##
##
##
###########################################################################
import wx
_ = wx.GetTranslation
import wx.propgrid as grid
###########################################################################
## Class main
###########################################################################
class Response(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
self.panel = panel = wx.Panel(self, wx.ID_ANY)
sz_main = wx.BoxSizer(wx.VERTICAL)
self.property_grid = property_grid = \
grid.PropertyGridManager(panel, style=grid.PG_SPLITTER_AUTO_CENTER)
# Set column width
self.property_grid.SetColumnProportion(1, 2)
# Add properties
# Create property page
self.property_grid.AddPage("Response")
# Create response format group of properties - Client Parameters
property_grid.Append(grid.PropertyCategory("Response"))
# Embedding grid panel to the main sizer
sz_main.Add(self.property_grid, 1, wx.EXPAND)
panel.SetSizer(sz_main)
sz_main.SetSizeHints(panel)
# Fit the sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
def add(self, response):
try:
if response.has_key("kind"): self.property_grid.Append(grid.StringProperty("kind", value=response["kind"]))
if response.has_key("id"): self.property_grid.Append(grid.StringProperty("id", value=response["id"]))
if response.has_key("selfLink"):self.property_grid.Append(grid.StringProperty("selfLink", value=response["selfLink"]))
if response.has_key("containsSampledData"):self.property_grid.Append(grid.StringProperty("containsSampledData", value=str(response["containsSampledData"])))
self.property_grid.Append(grid.PropertyCategory("query"))
if response['query'].has_key("start-date"):self.property_grid.Append(grid.StringProperty("start-date", value=response["query"]["start-date"]))
if response['query'].has_key("end-date"):self.property_grid.Append(grid.StringProperty("end-date", value=response["query"]["end-date"]))
if response['query'].has_key("ids"):self.property_grid.Append(grid.StringProperty("ids", value=response["query"]["ids"]))
if response['query'].has_key("dimensions"):self.property_grid.Append(grid.StringProperty("dimensions", value=response["query"]["dimensions"]))
if response['query'].has_key("metrics"):self.property_grid.Append(grid.StringProperty("metrics", value=",".join(response["query"]["metrics"])))
if response['query'].has_key("samplingLevel"):self.property_grid.Append(grid.StringProperty("samplingLevel", value=response["query"]["samplingLevel"]))
if response['query'].has_key("sort"):self.property_grid.Append(grid.StringProperty("sort", value=",".join(response["query"]["sort"])))
if response['query'].has_key("filters"):self.property_grid.Append(grid.StringProperty("filters", value=response["query"]["filters"]))
if response['query'].has_key("segment"):self.property_grid.Append(grid.StringProperty("segment", value=response["query"]["segment"]))
if response['query'].has_key("start-index"):self.property_grid.Append(grid.StringProperty("start-index", value=str(response["query"]["start-index"])))
if response['query'].has_key("max-results"):self.property_grid.Append(grid.StringProperty("max-results", value=str(response["query"]["max-results"])))
self.property_grid.Append(grid.PropertyCategory("response fields"))
if response.has_key("itemsPerPage"):self.property_grid.Append(grid.StringProperty("itemsPerPage", value=str(response["itemsPerPage"])))
if response.has_key("totalResults"):self.property_grid.Append(grid.StringProperty("totalResults", value=str(response["totalResults"])))
if response.has_key("previousLink"):self.property_grid.Append(grid.StringProperty("previousLink", value=str(response["previousLink"])))
if response.has_key("nextLink"):self.property_grid.Append(grid.StringProperty("nextLink", value=str(response["nextLink"])))
if response.has_key("profileId"):self.property_grid.Append(grid.StringProperty("profileId", value=response["profileId"]))
if response.has_key("accountId"):self.property_grid.Append(grid.StringProperty("accountId", value=response["accountId"]))
if response.has_key("webPropertyId"):self.property_grid.Append(grid.StringProperty("webPropertyId", value=response["webPropertyId"]))
if response.has_key("internalWebPropertyId"):self.property_grid.Append(grid.StringProperty("internalWebPropertyId", value=response["internalWebPropertyId"]))
if response.has_key("profileName"):self.property_grid.Append(grid.StringProperty("profileName", value=response["profileName"]))
if response.has_key("tableId"):self.property_grid.Append(grid.StringProperty("tableId", value=response["tableId"]))
for index, header in enumerate(response["columnHeaders"]):
self.property_grid.Append(grid.PropertyCategory("columnHeaders_%s" % index))
self.property_grid.Append(grid.StringProperty(name="columnHeaders_%s_name" % index, label="columnHeaders", value=header["name"]))
self.property_grid.Append(grid.StringProperty(name="columnHeaders_%s_columnType" % index, label="columnType", value=header["columnType"]))
self.property_grid.Append(grid.StringProperty(name="columnHeaders_%s_dataType" % index, label="dataType", value=header["dataType"]))
if response.has_key("dataTable"):
# handle if used json table
self.property_grid.Append(grid.PropertyCategory("dataTable"))
for index, column in enumerate(response["dataTable"]["cols"]):
self.property_grid.Append(grid.PropertyCategory("column_%s" % index))
if header.has_key("id"):self.property_grid.Append(grid.StringProperty(name="column_%s_id" % index, label="id", value=header["id"]))
if header.has_key("label"):self.property_grid.Append(grid.StringProperty(name="column_%s_label" % index, label="label", value=header["label"]))
if header.has_key("type"):self.property_grid.Append(grid.StringProperty(name="column_%s_type" % index, label="type", value=header["type"]))
for index, row in enumerate(response["dataTable"]["rows"]):
self.property_grid.Append(grid.PropertyCategory("row_%s" % index))
for index_c, c in enumerate(row):
self.property_grid.Append(grid.PropertyCategory("c_%s" % index_c))
if c.has_key("v"):self.property_grid.Append(grid.StringProperty("c_%s_v" % index_c, value=c["v"]))
if response.has_key("sampleSize"):self.property_grid.Append(grid.StringProperty("sampleSize", value=response["sampleSize"]))
if response.has_key("sampleSpace"):self.property_grid.Append(grid.StringProperty("sampleSpace", value=response["sampleSpace"]))
self.property_grid.Append(grid.PropertyCategory("totalsForAllResults"))
# Make it better
if type(response["totalsForAllResults"]) is not dict:
for index, result in enumerate(response["totalsForAllResults"]):
self.property_grid.Append(grid.PropertyCategory("result_%s" % index))
for metric in response["totalsForAllResults"].keys():
self.property_grid.Append(grid.StringProperty(name="%s_%s" % (metric, index), label=metric, value=response["totalsForAllResults"][result]))
else:
for index, result in enumerate(response["totalsForAllResults"]):
self.property_grid.Append(grid.StringProperty(name="%s_%s" % (result, index), label=result, value=response["totalsForAllResults"][result]))
except KeyError as error:
print str(error)
| {
"repo_name": "DaniilLeksin/gc",
"path": "ui_gawrapper/ui_response.py",
"copies": "1",
"size": "8327",
"license": "apache-2.0",
"hash": -6188704758296320000,
"line_mean": 74.7,
"line_max": 169,
"alpha_frac": 0.6382850967,
"autogenerated": false,
"ratio": 4.169754631947922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009968188487307775,
"num_lines": 110
} |
__author__ = 'Daniil Leksin'
import wx
import wx.html2
class Browser(wx.Panel):
def __init__(self, parent, frame=None):
wx.Panel.__init__(self, parent, -1)
self.current = 'https://developers.google.com/analytics//'
self.frame = frame
if frame:
self.titleBase = frame.GetTitle()
sz_main = wx.BoxSizer(wx.VERTICAL)
sz_buttons = wx.BoxSizer(wx.HORIZONTAL)
self.web_view = wx.html2.WebView.New(self)
self.Bind(wx.html2.EVT_WEBVIEW_LOADED, self.on_web_view_loaded, self.web_view)
self.cb_online_help = wx.CheckBox(self, wx.ID_ANY, u"Use online help!", wx.DefaultPosition, wx.DefaultSize, 0)
self.cb_online_help.Value = True
self.cb_online_help.Disable()
sz_buttons.Add(self.cb_online_help, 0, wx.EXPAND | wx.ALL, 2)
bt_back = wx.Button(self, -1, "<--", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.on_bt_back, bt_back)
sz_buttons.Add(bt_back, 0, wx.EXPAND | wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.on_check_valid_back, bt_back)
btn_forward = wx.Button(self, -1, "-->", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.on_bt_forward, btn_forward)
sz_buttons.Add(btn_forward, 0, wx.EXPAND | wx.ALL, 2)
self.Bind(wx.EVT_UPDATE_UI, self.on_check_valid_forward, btn_forward)
bt_stop = wx.Button(self, -1, "Stop", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.on_bt_stop, bt_stop)
sz_buttons.Add(bt_stop, 0, wx.EXPAND | wx.ALL, 2)
bt_refresh = wx.Button(self, -1, "Refresh", style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, self.on_bt_refresh, bt_refresh)
sz_buttons.Add(bt_refresh, 0, wx.EXPAND | wx.ALL, 2)
txt = wx.StaticText(self, -1, "URL:")
sz_buttons.Add(txt, 0, wx.CENTER | wx.ALL, 2)
self.location = wx.ComboBox(
self, -1, "", style=wx.CB_DROPDOWN | wx.TE_PROCESS_ENTER)
self.location.AppendItems(['https://developers.google.com/analytics/'])
self.Bind(wx.EVT_COMBOBOX, self.on_select_location, self.location)
self.location.Bind(wx.EVT_TEXT_ENTER, self.on_location_enter)
sz_buttons.Add(self.location, 1, wx.EXPAND | wx.ALL, 2)
sz_main.Add(sz_buttons, 0, wx.EXPAND)
sz_main.Add(self.web_view, 1, wx.EXPAND)
self.SetSizer(sz_main)
self.web_view.LoadURL(self.current)
def on_web_view_loaded(self, event):
# The full document has loaded
self.current = event.GetURL()
self.location.SetValue(self.current)
# Control bar events
def on_select_location(self, event):
url = self.location.GetStringSelection()
self.web_view.LoadURL(url)
def on_location_enter(self, event=None):
url = self.location.GetValue()
self.location.Append(url)
self.web_view.LoadURL(url)
def on_bt_back(self, event):
self.web_view.GoBack()
def on_bt_forward(self, event):
self.web_view.GoForward()
def on_check_valid_back(self, event):
event.Enable(self.web_view.CanGoBack())
def on_check_valid_forward(self, event):
event.Enable(self.web_view.CanGoForward())
def on_bt_stop(self, event):
self.web_view.Stop()
def on_bt_refresh(self, event):
self.web_view.Reload()
| {
"repo_name": "DaniilLeksin/gc",
"path": "ui_gawrapper/ui_browser.py",
"copies": "1",
"size": "3343",
"license": "apache-2.0",
"hash": -3674067132449909000,
"line_mean": 36.1444444444,
"line_max": 118,
"alpha_frac": 0.6201017051,
"autogenerated": false,
"ratio": 3.030825022665458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4150926727765458,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniil Nikulin'
__copyright__ = "Copyright 2017,VK File Bot"
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Daniil Nikulin"
__email__ = "danil.nikulin@gmail.com"
__status__ = "Production"
from telebot import types
from config import emoji
commands = { # command description used in the "help" command
'start': 'Начало работы со мной',
'help': 'Вся необходимая информация'
}
tips = { # tips description used in the "tips" command
'Совет №1': "Если ищешь книгу " + emoji.emoji_codes_dict[
":blue_book:"] + ", то часто легче найти в pdf или djvu с помощью "
"кнопки" + "<b>" + "\n" + "[ Текст(pdf,doc) ]" + emoji.emoji_codes_dict[
":page_facing_up:"] + "</b>" + " (кнопки появятся внизу вместо клавиатуры)",
'Совет №2': "Если ты из Украины " + emoji.emoji_codes_dict[":U:"] + emoji.emoji_codes_dict[":A:"] +
" выбирай способ загрузки " + "<b>" + "Файл" + "</b>" \
+ " и тебе не понадобится скачивать по ссылке на VK (заблокрованый ресурс).",
'Совет №3': "Музыку трудно найти тут, все борятся с пиратством. Скорее всего ты найдешь "
"рингтон по своему запросу. Но любимая песня на звонке, разве не подарок? " + emoji.emoji_codes_dict[
":grinning:"],
'Совет №4': "Гифки ищются легко и непринужденно (особенно на русском), но всё же в Телеграме лучшие гифки тут " + "@gif. ",
'Совет №5': "Современную музыку " + emoji.emoji_codes_dict[":musical_score:"] + " в " + "<b>" + "mp3" + "</b>" +
" найти тут сложновато, но почти всегда есть в формате "
+ "<b>" + "flac" + "</b>" + "."
}
types_dict = { # types description used in the "types" command
"Все": "Все файлы найденные по запросу",
emoji.emoji_codes_dict[
":page_facing_up:"] + "Текст(pdf,doc)": 'dpf, doc, docx, txt, odt...',
emoji.emoji_codes_dict[":open_book:"] + "Книги": "epub, fb2...",
emoji.emoji_codes_dict[":compression :"] + "Архивы": 'zip, rar, 7z...',
"Gif": 'Анимации: gif',
emoji.emoji_codes_dict[":frame_photo"] + "Изображения": 'jpg, jpeg, bmp, png, m3d ,tif...',
emoji.emoji_codes_dict[":musical_note:"] + "Аудио": 'flac, mp3, m4r, mp2, wav',
emoji.emoji_codes_dict[":video_camera:"] + "Видео": 'mp4, webm, mkv, 3gp',
}
# Define Keyboard
type_select = types.ReplyKeyboardMarkup(one_time_keyboard=True)
type_select.row("Все", "Текст(pdf,doc)" + emoji.emoji_codes_dict[":page_facing_up:"])
type_select.row("Книги" + emoji.emoji_codes_dict[":open_book:"], "Архивы" + emoji.emoji_codes_dict[":compression :"])
type_select.row("Gif", "Изображения" + emoji.emoji_codes_dict[":frame_photo"])
type_select.row("Аудио" + emoji.emoji_codes_dict[":musical_note:"], "Видео" + emoji.emoji_codes_dict[":video_camera:"])
hide_board = types.ReplyKeyboardRemove() # if sent as reply_markup, will hide the keyboard | {
"repo_name": "ddci/vkfilebot",
"path": "config/config.py",
"copies": "1",
"size": "3566",
"license": "apache-2.0",
"hash": 3598530778816045000,
"line_mean": 51.375,
"line_max": 127,
"alpha_frac": 0.610845839,
"autogenerated": false,
"ratio": 1.986449864498645,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8083994391686846,
"avg_score": 0.0026602623623597052,
"num_lines": 56
} |
__author__ = 'Daniil Nikulin'
__copyright__ = "Copyright 2017,VK File Bot"
__license__ = "Apache License 2.0"
__version__ = "1.0"
__maintainer__ = "Daniil Nikulin"
__email__ = "danil.nikulin@gmail.com"
__status__ = "Production"
# ---------
# Imports
# ---------
import os
import telebot
import constants
from bot import bot as vk_bot
from flask import Flask, request
server = Flask(__name__)
# Telegram API getUpdate method
# bot.remove_webhook()
# bot.polling(none_stop=True, interval=0)
@server.route('/' + constants.TOKEN_TELEGRAM, methods=['POST'])
def get_message():
vk_bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "POST", 200
@server.route("/")
def web_hook():
vk_bot.remove_webhook()
vk_bot.set_webhook(url='https://vkfilebot.herokuapp.com/' + constants.TOKEN_TELEGRAM)
return "CONNECTED" + "\n Contact " + "<a href=" + "https://t.me/daniel_nikulin" + ">" + "Daniel" + "<a>", 200
server.run(host="0.0.0.0", port=os.environ.get('PORT', 5000))
| {
"repo_name": "ddci/vkfilebot",
"path": "main.py",
"copies": "1",
"size": "1071",
"license": "apache-2.0",
"hash": 5406285853073919000,
"line_mean": 26.9459459459,
"line_max": 113,
"alpha_frac": 0.629318394,
"autogenerated": false,
"ratio": 2.803664921465969,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3932983315465969,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dani'
import re
class Deal(object):
# Negotiation status
INTENDED = "Intended"
CONCLUDED = "Concluded"
FAILED = "Failed"
# Implementation status
IN_OPERATION = "In operation (production)"
STARTUP_PHASE = "Startup phase (no production)"
PROJECT_NOT_STARTED = "Project not started"
PROJECT_ABANDONED = "Project abandoned"
# This must match the intention value, removing the blank spaces"
# Sectors
AGRICULTURE = "Agriculture"
BIOFUELS = "Biofuels"
FOOD_CROPS = "Foodcrops"
LIVESTOCK = "Livestock"
NON_FOOD_AGRICULTURAL_COMMODITIES = "Nonfoodagriculturalcommodities"
AGRIUNSPECIFIED = "Agricultureunspecified"
FOODER = "Fodder"
CONSERVATION = "Conservation"
FORESTRY = "Forestry"
TIMBER_FOR_WOOD_AND_FIBRE = "Timberplantation(forwoodandfibre)"
LOGGING_FOR_WOOD_AND_FIBRE = "Forestlogging/management(forwoodandfibre)"
FOR_CARBON_SEQUESTRATION_REDD = "Forcarbonsequestration/REDD"
FORESTUNSPECIFIED = "Forestryunspecified"
INDUSTRY = "Industry"
RENEWABLE_ENERGY = "RenewableEnergy"
TOURISM = "Tourism"
OTHER = "Other(pleasespecify)"
UNKNOWN = "Unknown"
MINING = "Mining"
LANDSPECULATION = "Landspeculation"
OIL_GAS = "Oil/Gasextraction"
def __init__(self, target_country=None, production_hectares=None, contract_hectares=None,
intended_hectares=None, date=None, sectors=None, negotiation_status=None, implementation_status=None):
self.target_country = target_country
self.production_hectares = production_hectares
self.contract_hectares = contract_hectares
self.intended_hectares = intended_hectares
self.date = date
self.sectors = sectors
self.negotiation_status = negotiation_status
self.implementation_status = implementation_status
def __str__(self):
sb = list()
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, unicode):
value = self.__dict__[key].encode('utf-8') # transfor to str to allow
if (key=="date"):# and value not in ["In operation (production)", "Project not started", "Startup phase (no production)", "Project abandoned"]:
pass#print(value)
sb.append("{key}='{value}'".format(key=key, value=value))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
| {
"repo_name": "landportal/landbook-importers",
"path": "LandMatrix_Importer/es/weso/landmatrix/entities/deal.py",
"copies": "1",
"size": "2377",
"license": "mit",
"hash": 3716478101710389000,
"line_mean": 33.4492753623,
"line_max": 148,
"alpha_frac": 0.6760622634,
"autogenerated": false,
"ratio": 3.26510989010989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9225631144138211,
"avg_score": 0.0431082018743357,
"num_lines": 69
} |
__author__ = 'Dani'
from ..entities.deal import Deal
import re
class DealsBuilder(object):
def __init__(self):
pass
@staticmethod
def turn_node_into_deal_object(info_node):
"""
It receives a node (Element of ElementTree) and returns a deal object containing the needed data
"""
a_deal = Deal()
a_deal.target_country = _extract_target_country(info_node)
a_deal.date = _extract_date(info_node)
a_deal.production_hectares = _extract_production_hectares(info_node)
a_deal.intended_hectares = _extract_intended_hectares(info_node)
a_deal.contract_hectares = _extract_contract_hectares(info_node)
a_deal.sectors = _extract_sectors(info_node)
a_deal.negotiation_status = _extract_negotiation_status(info_node)
a_deal.implementation_status = _extract_implementation_status(info_node)
return a_deal
#############################################################################################
# FUNCTIONS #
#############################################################################################
### Contants
PROPERTY = "name"
TARGET_COUNTRY = "Location 1: Target country"
SECTORS = "Intention of investment"
NEGOTIATION_STATUS = "Negotiation status"
IMPLEMENTATION_STATUS = "Implementation status"
NO_VALUE = "None"
INTENDED_SIZE = "Intended size (in ha)"
CONTRACT_SIZE = "Size under contract (leased or purchased area, in ha)"
PRODUCTION_SIZE = "Size in operation (production, in ha)"
#Functions
def _extract_hectares(info_node, hectares_type):
hectares_container = _get_node_data(info_node, hectares_type)
if hectares_container == NO_VALUE:
return None
else:
potential_ha = hectares_container.split("|")[-1]
potential_ha = potential_ha.split("##")[-1]
if "current" in potential_ha: #2020#current#500
potential_ha = potential_ha.split("#")[-1]
return int(round(float(potential_ha)))
def _extract_intended_hectares(info_node):
return _extract_hectares(info_node, INTENDED_SIZE)
def _extract_contract_hectares(info_node):
return _extract_hectares(info_node, CONTRACT_SIZE)
def _extract_production_hectares(info_node):
return _extract_hectares(info_node, PRODUCTION_SIZE)
def _extract_negotiation_status(info_node):
#Looking for the target text
status_container = _get_node_data(info_node, NEGOTIATION_STATUS)
if status_container == NO_VALUE:
return None
elif status_container.__contains__(Deal.FAILED):
return Deal.FAILED
elif status_container.__contains__(Deal.CONCLUDED):
return Deal.CONCLUDED
elif status_container.__contains__(Deal.INTENDED):
return Deal.INTENDED
else:
raise RuntimeError("Unrecognized negotiation status in node: " + status_container)
def _extract_implementation_status(info_node):
#Looking for the target text
implementation_status_container = _get_node_data(info_node, IMPLEMENTATION_STATUS)
if implementation_status_container == NO_VALUE:
return None
elif implementation_status_container.__contains__(Deal.IN_OPERATION):
return Deal.IN_OPERATION
elif implementation_status_container.__contains__(Deal.STARTUP_PHASE):
return Deal.STARTUP_PHASE
elif implementation_status_container.__contains__(Deal.PROJECT_NOT_STARTED):
return Deal.PROJECT_NOT_STARTED
elif implementation_status_container.__contains__(Deal.PROJECT_ABANDONED):
return Deal.PROJECT_ABANDONED
else:
raise RuntimeError("Unrecognized implementation status in node: " + implementation_status_container)
def _extract_target_country(info_node):
for subnode in info_node.getchildren():
if subnode.attrib[PROPERTY] == TARGET_COUNTRY:
return subnode.text.strip()
_raise_error("country", "not found")
def _extract_date(info_node):
# 2016/03/29: Pattern of negotiation_status node "[YYYY] STATUS (XXXX XXXXXX)" or "STATUS (XXXX XXXXXX)"
# 2020/03/19: Pattern changed. Many different
#Looking for the target text
date_container = _get_node_data(info_node, NEGOTIATION_STATUS)
# return None if there is no negotiation_status value
if date_container == NO_VALUE:
return None
candidate_years = []
negotiation_status_year = None
for negotiation in date_container.split("|"):
potential_year = negotiation.split("#")[0][:4]
if potential_year != "":
candidate_years.append(potential_year)
if len(candidate_years)>0:
negotiation_status_year = max(candidate_years) # obtain the highest value
if int(negotiation_status_year)>2020:
return None
else:
return None
return negotiation_status_year
def _lower_lenght(elem1, elem2):
if len(elem1) <= len(elem2):
return len(elem1)
return len(elem2)
def _find_index_all_occurrences_of_a_sequence(string, sequence):
result = []
last_found_pos = 0
while last_found_pos != -1:
last_found_pos = string.find(sequence, last_found_pos+1)
if last_found_pos != -1:
result.append(last_found_pos)
return result
def _get_node_data(info_node, tag):
for subnode in info_node.getchildren():
if subnode.attrib[PROPERTY] == tag:
return _remove_blanks(subnode.text)
return NO_VALUE
def _remove_blanks(text):
if text is None:
return NO_VALUE
result = text.replace("\t", "")
result = result.replace("\n", "")
result = result.replace("\r", "")
return result
def _extract_sectors(info_node):
#Looking for text
text = _get_node_data(info_node, SECTORS)
if text is None:
_raise_error("sectors", "not found")
return # It will throw an error, the next won't execute.... but let's ensure that
elif text == NO_VALUE:
return ['Unknown'] # Unknown sector
result = []
text = re.sub('\d', '', text)
text = text.replace("#","").replace("-", "").replace("current","")
text = text.replace("|",",")
candidate_sectors = text.split(",")
for candidate in candidate_sectors:
if not (candidate is None or candidate == ""):
result.append(candidate.replace(" ", "")) # Should we replace blank space ?
if len(result) == 0:
_raise_error("sectors", "not found")
return result
def _raise_error(concrete_filed, cause):
raise RuntimeError("Error while parsing {0} in a node. Cause: {1}. Node will be ignored".format(concrete_filed,
cause))
| {
"repo_name": "landportal/landbook-importers",
"path": "LandMatrix_Importer/es/weso/landmatrix/translator/deals_builder.py",
"copies": "1",
"size": "6727",
"license": "mit",
"hash": -6652325931910377000,
"line_mean": 34.219895288,
"line_max": 115,
"alpha_frac": 0.6276200387,
"autogenerated": false,
"ratio": 3.743461324429605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9846811690653035,
"avg_score": 0.004853934495314122,
"num_lines": 191
} |
__author__ = 'Dani'
from ..entities.deal import Deal
class DealsBuilder(object):
def __init__(self):
pass
@staticmethod
def turn_node_into_deal_object(info_node):
"""
It receives a node (Element of ElementTree) and returns a deal object containing the needed data
"""
a_deal = Deal()
a_deal.target_country = _extract_target_country(info_node)
a_deal.date = _extract_date(info_node)
a_deal.production_hectares = _extract_production_hectares(info_node)
a_deal.intended_hectares = _extract_intended_hectares(info_node)
a_deal.contract_hectares = _extract_contract_hectares(info_node)
a_deal.sectors = _extract_sectors(info_node)
a_deal.negotiation_status = _extract_negotiation_status(info_node)
return a_deal
#############################################################################################
# FUNCTIONS #
#############################################################################################
### Contants
PROPERTY = "name"
TARGET_COUNTRY = "target_country"
SECTORS = "intention"
NEGOTIATION_STATUS = "negotiation_status"
NO_VALUE = "None"
INTENDED_SIZE = "intended_size"
CONTRACT_SIZE = "contract_size"
PRODUCTION_SIZE = "production_size"
#Functions
def _extract_hectares(info_node, hectares_type):
hectares_container = _get_node_data(info_node, hectares_type)
if hectares_container == NO_VALUE:
return None
elif hectares_container.isdigit():
return int(hectares_container)
else:
return None
def _extract_intended_hectares(info_node):
return _extract_hectares(info_node, INTENDED_SIZE)
def _extract_contract_hectares(info_node):
return _extract_hectares(info_node, CONTRACT_SIZE)
def _extract_production_hectares(info_node):
return _extract_hectares(info_node, PRODUCTION_SIZE)
def _extract_negotiation_status(info_node):
#Looking for the target text
status_container = _get_node_data(info_node, NEGOTIATION_STATUS)
if status_container == NO_VALUE:
return None
elif status_container.__contains__(Deal.FAILED):
return Deal.FAILED
elif status_container.__contains__(Deal.CONCLUDED):
return Deal.CONCLUDED
elif status_container.__contains__(Deal.INTENDED):
return Deal.INTENDED
else:
raise RuntimeError("Unrecognized negotiation status in node: " + status_container)
def _extract_target_country(info_node):
for subnode in info_node.getchildren():
if subnode.attrib[PROPERTY] == TARGET_COUNTRY:
return subnode.text
_raise_error("country", "not found")
def _extract_date(info_node):
#Looking for the target text
date_container = _get_node_data(info_node, NEGOTIATION_STATUS)
if date_container == NO_VALUE:
return None
#Obtaining possible dates
aperture_sign_list = _find_index_all_occurrences_of_a_sequence(date_container, "[")
closure_sign_list = _find_index_all_occurrences_of_a_sequence(date_container, "]")
complete_pairs = _lower_lenght(aperture_sign_list, closure_sign_list)
candidate_dates = []
for i in range(0, complete_pairs):
candidate_dates.append(date_container[aperture_sign_list[i] + 1:closure_sign_list[i]])
#Chechking if they are valid dates and returning the highest
result = -1
for a_date in candidate_dates:
if len(a_date) == 4 and a_date.isdigit():
int_date = int(a_date)
if int_date > result:
result = int_date
if result == -1:
return None
else:
return result
def _lower_lenght(elem1, elem2):
if len(elem1) <= len(elem2):
return len(elem1)
return len(elem2)
def _find_index_all_occurrences_of_a_sequence(string, sequence):
result = []
last_found_pos = 0
while last_found_pos != -1:
last_found_pos = string.find(sequence, last_found_pos+1)
if last_found_pos != -1:
result.append(last_found_pos)
return result
def _get_node_data(info_node, tag):
for subnode in info_node.getchildren():
if subnode.attrib[PROPERTY] == tag:
return _remove_blanks(subnode.text)
return NO_VALUE
def _remove_blanks(text):
result = text.replace("\t", "")
result = result.replace("\n", "")
result = result.replace("\r", "")
return result
def _extract_sectors(info_node):
#Looking for text
text = _get_node_data(info_node, SECTORS)
if text is None:
_raise_error("sectors", "not found")
return # It will throw an error, the next won't execute.... but let's ensure that
elif text == NO_VALUE:
return ['Unknown'] # Unknowk sector
result = []
candidate_sectors = text.split(",")
for candidate in candidate_sectors:
if not (candidate is None or candidate == ""):
result.append(candidate.replace(" ", ""))
if len(result) == 0:
_raise_error("sectors", "not found")
return result
def _raise_error(concrete_filed, cause):
raise RuntimeError("Error while parsing {0} in a node. Cause: {1}. Node will be ignored".format(concrete_filed,
cause))
| {
"repo_name": "weso/landportal-importers",
"path": "LandMatrixExtractor/es/weso/landmatrix/translator/deals_builder.py",
"copies": "1",
"size": "5362",
"license": "unlicense",
"hash": 1197367024005783600,
"line_mean": 30.3567251462,
"line_max": 115,
"alpha_frac": 0.6083550914,
"autogenerated": false,
"ratio": 3.622972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4731328064372973,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dani'
from es.weso.oecdextractor.translator.path_object_pair import PathObjectPair
import json
import os
import codecs
class JsonLoader(object):
def __init__(self, log, config):
self._log = log
self._config = config
def run(self):
"""
It must return as many json objects as datasets we have
"""
result = []
base_dir = self._config.get("DATASETS", "base_dir")
candidate_files = os.listdir(base_dir)
for candidate_file in candidate_files:
if os.path.splitext(candidate_file)[1] == ".json":
result.append(PathObjectPair(os.path.abspath(base_dir + "/" + candidate_file),
self._turn_file_into_json_object(base_dir + "/" + candidate_file)))
return result
@staticmethod
def _turn_file_into_json_object(path_to_file):
json_file = codecs.open(path_to_file)
json_string = json_file.read()
json_file.close()
return json.loads(json_string)
pass
| {
"repo_name": "weso/landportal-importers",
"path": "OECD_Importer/es/weso/oecdextractor/translator/json_loader.py",
"copies": "2",
"size": "1064",
"license": "unlicense",
"hash": 8301913604565635000,
"line_mean": 27,
"line_max": 110,
"alpha_frac": 0.5845864662,
"autogenerated": false,
"ratio": 3.786476868327402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027066956152828732,
"num_lines": 38
} |
__author__ = 'Dani'
from es.weso.util.excell_utils import is_empty_cell, content_starts_in_second_column
from es.weso.translator.parser.parsed_entities import ParsedDate
class DatesParser(object):
def __init__(self, sheet):
self.sheet = sheet
self.row = None
self.dates = []
def run(self):
self.row = self._look_for_dates_row()
self._parse_dates()
# for date in self.dates:
# print date.string_date, date.beg_col, date.end_col
return self.dates
def _parse_dates(self):
#Looking for first available content
cursor = 0 # cursor will be used as a pointer to cells in a row
while is_empty_cell(self.row[cursor]):
cursor += 1
#When we find content, we save it index in index_begin (current cursor value)
index_begin = cursor
cursor += 1
while cursor <= self.sheet.ncols:
while cursor < self.sheet.ncols and is_empty_cell(self.row[cursor]): #When we find a non empty cell, we have
#reach a new indicator. We have to save the old one. In
#the case of the last indicator, we will not find a non
#empty cell, but the end of the cells. That is why we
#are using the first part of the condition
cursor += 1
new_date = self._build_parsed_date(index_begin, cursor - 1)
index_begin = cursor
cursor += 1
self.dates.append(new_date)
def _look_for_dates_row(self):
"""
We should look for the second row with content starting in the second
col. The first row that starts with content in the second col is the
indicators one
"""
appropiate_rows_counter = 0
for i in range(0, self.sheet.nrows):
candidate_row = self.sheet.row(i)
if content_starts_in_second_column(candidate_row):
appropiate_rows_counter += 1
if appropiate_rows_counter == 2:
return candidate_row
raise RuntimeError("No dates row found. Impoissible to parse file")
def _build_parsed_date(self, index_begin, index_end):
new_date = ParsedDate()
new_date.beg_col = index_begin
new_date.end_col = index_end
new_date.string_date = self.row[index_begin].value
return new_date
| {
"repo_name": "weso/landportal-importers",
"path": "IpfriExtractor/es/weso/translator/parser/dates_parser.py",
"copies": "2",
"size": "2597",
"license": "unlicense",
"hash": -2040164765738665000,
"line_mean": 38.9538461538,
"line_max": 120,
"alpha_frac": 0.5506353485,
"autogenerated": false,
"ratio": 4.243464052287582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019073471674251184,
"num_lines": 65
} |
__author__ = 'Dani'
from es.weso.util.excell_utils import is_empty_cell, content_starts_in_second_column
from es.weso.translator.parser.parsed_entities import ParsedIndicator
class IndicatorsParser(object):
def __init__(self, sheet):
self.sheet = sheet
self.row = None # Complete when running
self.indicators = [] # Complete when running
def run(self):
self.row = self._look_for_indicators_row()
self.parse_indicators()
# for indicator in self.indicators:
# print indicator.name, indicator.beg_col, indicator.end_col
return self.indicators
def parse_indicators(self):
#Looking for first available content
cursor = 0 #cursor will be used as a pointer to cells in a row
while is_empty_cell(self.row[cursor]):
cursor += 1
#When we find content, we save it index in index_begin (current cursor value)
index_begin = cursor
cursor += 1
while cursor <= self.sheet.ncols:
while cursor < self.sheet.ncols and is_empty_cell(self.row[cursor]): #When we find a non empty cell, we have
#reach a new indicator. We have to save the old one. In
#the case of the last indicator, we will not find a non
#empty cell, but the end of the cells. That is why we
#are using the first part of the condition
cursor += 1
new_indicator = self._build_parsed_indicator(index_begin, cursor - 1)
index_begin = cursor
cursor += 1
self.indicators.append(new_indicator)
def _build_parsed_indicator(self, index_beg, index_end):
new_indicator = ParsedIndicator()
new_indicator.name = self.row[index_beg].value.encode("utf-8")
new_indicator.beg_col = index_beg
new_indicator.end_col = index_end
return new_indicator
def _look_for_indicators_row(self):
"""
It looks like the indicator row is the first one with data in the second col.
It makes sense, because, as main header, should be over date and, for sure,
over concrete data observations. It also makes sense to start in the second
col, not in the first, because first is reserved to country_names.
We should trust in that theory to locate the indicators row.
"""
for i in range(0, self.sheet.nrows):
candidate_row = self.sheet.row(i)
if content_starts_in_second_column(candidate_row):
return candidate_row
raise RuntimeError("No indicators row found. Impossible to parse file")
| {
"repo_name": "landportal/landbook-importers",
"path": "old-importers/IpfriExtractor/es/weso/translator/parser/indicators_parser.py",
"copies": "2",
"size": "2855",
"license": "mit",
"hash": 860681951718048300,
"line_mean": 39.7857142857,
"line_max": 120,
"alpha_frac": 0.5859894921,
"autogenerated": false,
"ratio": 4.399075500770416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018699175027484145,
"num_lines": 70
} |
__author__ = 'Dani'
from lpentities.year_interval import YearInterval
from lpentities.interval import Interval
def get_model_object_time_from_parsed_string(original_time):
str_time = str(original_time).replace(" ", "") # we could already receive a str, but we need to ensure it
if "-" in str_time:
return _get_model_object_interval(str_time)
else:
return _get_model_object_year_interval(str_time)
def _get_model_object_interval(str_time):
date_array = str_time.split("-")
start_time = int(date_array[0])
end_time = _transform_twodigited_date_into_forudigited_if_needed(date_array[1])
return Interval(start_time=start_time, end_time=end_time)
def _get_model_object_year_interval(str_time):
return YearInterval(year=int(float(str_time)))
def _transform_twodigited_date_into_forudigited_if_needed(indef_digited_str):
"""
We are turning the string into an int. Depending on its value, we add a quantity to that int, obtaining the year.
Examples:
receiving 10:
10 + 2000 = 2010. return 2010 as year
receiving 98:
98 + 1900 = 1998. return 1998 as a year
"""
if len(indef_digited_str) == 4:
return int(indef_digited_str)
d2f = int(indef_digited_str)
if d2f < 50:
d2f += 2000
elif d2f >= 50:
d2f += 1900
return d2f
# The function will stop working in 2050... | {
"repo_name": "landportal/landbook-importers",
"path": "old-importers/IpfriExtractor/es/weso/translator/object_builder/dates_builder.py",
"copies": "2",
"size": "1407",
"license": "mit",
"hash": -1075512433132458900,
"line_mean": 30.2888888889,
"line_max": 117,
"alpha_frac": 0.6609808102,
"autogenerated": false,
"ratio": 3.227064220183486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9882464843659232,
"avg_score": 0.001116037344850904,
"num_lines": 45
} |
__author__ = 'Dani'
import requests
class CountriesXmlExtractor(object):
def __init__(self, log, config, reconciler):
self._log = log
self._config = config
self._reconciler = reconciler
self._query_pattern = self._config.get("API", "request_pattern")
self._replace_by_iso = self._config.get("API", "text_to_replace")
def run(self):
"""
It returns a list of strings containing xml trees that represents the available info of every country
available in the reconciler
"""
result = []
for a_country in self._reconciler.get_all_countries():
result.append(self._get_xml_of_a_country(a_country))
## This comented code is usefull for fast test. It asks only for 15 countries to the API
#
# tris = self._reconciler.get_all_countries()
# for i in range(0, 15):
# result.append((self._get_xml_of_a_country(tris[i])))
return result
def _get_xml_of_a_country(self, country):
string_request = self._query_pattern.replace(self._replace_by_iso, country.iso3)
self._log.info("Tracking data from " + country.iso3 + "...")
return requests.get(string_request).content
| {
"repo_name": "weso/landportal-importers",
"path": "FAOGender_Importer/es/weso/faogenderextractor/extractor/xml_management/countries_xml_extractor.py",
"copies": "2",
"size": "1254",
"license": "unlicense",
"hash": -9043915704906011000,
"line_mean": 27.5,
"line_max": 109,
"alpha_frac": 0.6108452951,
"autogenerated": false,
"ratio": 3.8,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02534809386886979,
"num_lines": 44
} |
__author__ = 'Dani'
class KeyDicts(object):
#INDICATOR KEYS
TOTAL_DEALS = "N1"
HECTARES_TOTAL_DEALS = "HA1"
CONCLUDED_DEALS = "N2"
HECTARES_CONTRACT_DEALS = "HA2"
INTENDED_DEALS = "N3"
HECTARES_INTENDED_DEALS = "HA3"
FAILED_DEALS = "N4"
HECTARES_FAILED_DEALS = "HA4"
IN_PRODUCTION_DEALS = "N5"
HECTARES_PRODUCTION_DEALS = "HA5"
AGRICULTURE_DEALS = "N6"
CONSERVATION_DEALS = "N7"
FORESTRY_DEALS = "N8"
INDUSTRY_DEALS = "N9"
RENEWABLE_ENERGY_DEALS = "N10"
TOURISM_DEALS = "N11"
OTHER_DEALS = "N12"
UNKNOWN_DEALS = "N13"
MINING_DEALS = "N23"
LANDSPECULATION_DEALS = "N24"
OIL_GAS_DEALS = "N25"
BIOFUELS_DEALS = "N14"
FOOD_CROPS_DEALS = "N15"
LIVESTOCK_DEALS = "N16"
NON_FOOD_AGRICULTURAL_COMMODITIES_DEALS = "N17"
AGRIUNSPECIFIED_DEALS = "N18"
FOODER_DEALS = "N22"
FOR_WOOD_AND_FIBRE_DEALS = "N19"
FOR_CARBON_SEQUESTRATION_REDD_DEALS = "N20"
FORESTUNSPECIFIED_DEALS = "N21"
HECTARES_AGRICULTURE_DEALS = "HA6"
HECTARES_CONSERVATION_DEALS = "HA7"
HECTARES_FORESTRY_DEALS = "HA8"
HECTARES_INDUSTRY_DEALS = "HA9"
HECTARES_RENEWABLE_ENERGY_DEALS = "HA10"
HECTARES_TOURISM_DEALS = "HA11"
HECTARES_OTHER_DEALS = "HA12"
HECTARES_UNKNOWN_DEALS = "HA13"
HECTARES_MINING_DEALS = "HA23"
HECTARES_LANDSPECULATION_DEALS = "HA24"
HECTARES_OIL_GAS_DEALS = "HA25"
HECTARES_BIOFUELS_DEALS = "HA14"
HECTARES_FOOD_CROPS_DEALS = "HA15"
HECTARES_LIVESTOCK_DEALS = "HA16"
HECTARES_NON_FOOD_AGRICULTURAL_COMMODITIES_DEALS = "HA17"
HECTARES_AGRIUNSPECIFIED_DEALS = "HA18"
HECTARES_FOODER_DEALS = "HA22"
HECTARES_FOR_WOOD_AND_FIBRE_DEALS = "HA19"
HECTARES_FOR_CARBON_SEQUESTRATION_REDD_DEALS = "HA20"
HECTARES_FORESTUNSPECIFIED_DEALS = "HA21"
| {
"repo_name": "landportal/landbook-importers",
"path": "LandMatrix_Importer/es/weso/landmatrix/translator/keys_dicts.py",
"copies": "1",
"size": "1835",
"license": "mit",
"hash": 2972909535001847300,
"line_mean": 25.2142857143,
"line_max": 61,
"alpha_frac": 0.6517711172,
"autogenerated": false,
"ratio": 2.0711060948081266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8143301207432122,
"avg_score": 0.015915200915200916,
"num_lines": 70
} |
__author__ = 'Dani'
class KeyMapper(object):
SIGI_KEY = "S"
SIGI_RANK_KEY = "SR"
FAMILY_CODE_KEY = "FC"
FAMILY_CODE_RANK_KEY = "FCR"
CIVIL_KEY = "C"
CIVIL_RANK_KEY = "CR"
ENTITLEMENTS_KEY = "E"
ENTITLEMENTS_RANK_KEY = "ER"
LAND_KEY = "L"
INHERITANCE_GENERAL_KEY = "IG"
INHERITANCE_DAUGHTERS_KEY = "ID"
INHERITANCE_WIDOWS_KEY = "IW"
#################
_GID2_ID = "GID2"
_GID2_SIGI_KEY = "SIGI_VAL"
_GID2_SIGI_RANK_KEY = "SIGI_RANK"
_GID2_FAMILY_CODE_KEY = "FC_VALUE"
_GID2_FAMILY_CODE_RANK_KEY = "FC_RANK"
_GID2_CIVIL_KEY = "CIVIL_VALUE"
_GID2_CIVIL_RANK_KEY = "CIVIL_RANK"
_GID2_ENTITLEMENTS_KEY = "OWR_VALUE"
_GID2_ENTITLEMENTS_RANK_KEY = "OWR_RANK"
_GID2_LAND_KEY = "OWR_1"
_GID2_INHERITANCE_GENERAL_KEY = "FC_2"
# _GID2_INHERITANCE_DAUGHTERS_KEY = "ID" Does not exist in GID2
# _GID2_INHERITANCE_WIDOWS_KEY = "IW"
##########
_GIDDB_2012_ID = "GIDDB2012"
_GIDDB2012_SIGI_KEY = "SIGI_VALUE"
_GIDDB2012_SIGI_RANK_KEY = "SIGI_RANK"
_GIDDB2012_FAMILY_CODE_KEY = "FC_VALUE"
_GIDDB2012_FAMILY_CODE_RANK_KEY = "FC_RANK"
_GIDDB2012_CIVIL_KEY = "CIVL_VALUE"
_GIDDB2012_CIVIL_RANK_KEY = "CIVL_RANK"
_GIDDB2012_ENTITLEMENTS_KEY = "RR_VALUE"
_GIDDB2012_ENTITLEMENTS_RANK_KEY = "RR_RANK"
_GIDDB2012_LAND_KEY = "RR_1"
_GIDDB2012_INHERITANCE_GENERAL_KEY = "FC_4"
_GIDDB2012_INHERITANCE_DAUGHTERS_KEY = "FC_4_1"
_GIDDB2012_INHERITANCE_WIDOWS_KEY = "FC_4_2"
_key_dict = {}
@staticmethod
def map_key(key, dataset_id):
if len(KeyMapper._key_dict) == 0:
KeyMapper._build_key_dict()
return KeyMapper._key_dict[dataset_id][key]
@staticmethod
def identify_dataset(dataset_url):
if KeyMapper._GID2_ID in dataset_url:
return KeyMapper._GID2_ID
elif KeyMapper._GIDDB_2012_ID in dataset_url:
return KeyMapper._GIDDB_2012_ID
else:
raise RuntimeError("Dataset unrecognized. Imposibble to map indicator keys")
@staticmethod
def _build_key_dict():
#GID2
gid2_dict = {KeyMapper._GID2_SIGI_KEY: KeyMapper.SIGI_KEY,
KeyMapper._GID2_SIGI_RANK_KEY: KeyMapper.SIGI_RANK_KEY,
KeyMapper._GID2_FAMILY_CODE_KEY: KeyMapper.FAMILY_CODE_KEY,
KeyMapper._GID2_FAMILY_CODE_RANK_KEY: KeyMapper.FAMILY_CODE_RANK_KEY,
KeyMapper._GID2_CIVIL_KEY: KeyMapper.CIVIL_KEY,
KeyMapper._GID2_CIVIL_RANK_KEY: KeyMapper.CIVIL_RANK_KEY,
KeyMapper._GID2_ENTITLEMENTS_KEY: KeyMapper.ENTITLEMENTS_KEY,
KeyMapper._GID2_ENTITLEMENTS_RANK_KEY: KeyMapper.ENTITLEMENTS_RANK_KEY,
KeyMapper._GID2_LAND_KEY: KeyMapper.LAND_KEY,
KeyMapper._GID2_INHERITANCE_GENERAL_KEY: KeyMapper.INHERITANCE_GENERAL_KEY}
giddb2012 = {KeyMapper._GIDDB2012_SIGI_KEY: KeyMapper.SIGI_KEY,
KeyMapper._GIDDB2012_SIGI_RANK_KEY: KeyMapper.SIGI_RANK_KEY,
KeyMapper._GIDDB2012_FAMILY_CODE_KEY: KeyMapper.FAMILY_CODE_KEY,
KeyMapper._GIDDB2012_FAMILY_CODE_RANK_KEY: KeyMapper.FAMILY_CODE_RANK_KEY,
KeyMapper._GIDDB2012_CIVIL_KEY: KeyMapper.CIVIL_KEY,
KeyMapper._GIDDB2012_CIVIL_RANK_KEY: KeyMapper.CIVIL_RANK_KEY,
KeyMapper._GIDDB2012_ENTITLEMENTS_KEY: KeyMapper.ENTITLEMENTS_KEY,
KeyMapper._GIDDB2012_ENTITLEMENTS_RANK_KEY: KeyMapper.ENTITLEMENTS_RANK_KEY,
KeyMapper._GIDDB2012_LAND_KEY: KeyMapper.LAND_KEY,
KeyMapper._GIDDB2012_INHERITANCE_GENERAL_KEY: KeyMapper.INHERITANCE_GENERAL_KEY,
KeyMapper._GIDDB2012_INHERITANCE_DAUGHTERS_KEY: KeyMapper.INHERITANCE_DAUGHTERS_KEY,
KeyMapper._GIDDB2012_INHERITANCE_WIDOWS_KEY: KeyMapper.INHERITANCE_WIDOWS_KEY}
KeyMapper._key_dict[KeyMapper._GID2_ID] = gid2_dict
KeyMapper._key_dict[KeyMapper._GIDDB_2012_ID] = giddb2012
| {
"repo_name": "landportal/landbook-importers",
"path": "old-importers/OECD_Importer/es/weso/oecdextractor/translator/indicator_key_mapper.py",
"copies": "2",
"size": "4120",
"license": "mit",
"hash": -2225165671274550000,
"line_mean": 39,
"line_max": 105,
"alpha_frac": 0.6165048544,
"autogenerated": false,
"ratio": 2.63764404609475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.425414890049475,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dani'
from .interval import Interval
class MonthInterval(Interval):
def __init__(self, year, month):
self._year = year
self._month = month
arg_for_super = self.get_time_string() # We do not have to call the method two times
super(MonthInterval, self).__init__(Interval.MONTHLY, arg_for_super, arg_for_super)
def __get_year(self):
return self._year
def __set_year(self, year):
try:
if len(str(year)) == 4 :
self._year = int(year)
else:
raise ValueError("Year must have yyyy format")
except:
raise ValueError("Year must be an integer")
year = property(fget=__get_year,
fset=__set_year,
doc="The year of the given month")
def __get_month(self):
return self._month
def __set_month(self, month):
try:
self._month = int(month)
except:
raise ValueError("Month must be an integer")
month = property(fget=__get_month,
fset=__set_month,
doc="The month of the observation")
def get_time_string(self):
str_month = str(self.month)
if len(str_month) == 1:
str_month = "0" + str_month
return str_month + "/" + str(self.year) | {
"repo_name": "landportal/landbook-importers",
"path": "LandPortalEntities/lpentities/month_interval.py",
"copies": "2",
"size": "1389",
"license": "mit",
"hash": 154360859136430660,
"line_mean": 27.3673469388,
"line_max": 93,
"alpha_frac": 0.5190784737,
"autogenerated": false,
"ratio": 4.0852941176470585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5604372591347059,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dani'
try:
import xml.etree.cElementTree as ETree
except:
import xml.etree.ElementTree as ETree
from ..entities.xml_register import XmlRegister
class XmlContentParser(object):
#
# year=year,
# month=month,
# bornages=self._look_for_field(tree, self.BORNAGES),
# csj=self._look_for_field(tree, self.CSJ),
# mutations=self._look_for_field(tree, self.MUTATIONS),
# titres_crees=self._look_for_field(tree, self.TITRES_CREES),
# reperages=self._look_for_field(tree, self.REPERAGES),
# reproduction_des_plants=self._look_for_field(tree, self.REP_DES_PLANS)
BORNAGES = ".//iTopBornagesEff"
CSJ = ".//iDomCvjDel"
MUTATIONS = ".//iDomMutationsEff"
TITRES_CREES = ".//iDomTitreCrees"
REPERAGES = ".//iTopReperagesEff"
REP_DES_PLANS = ".//iTopReproductionPlans"
VALUE = "value"
def __init__(self, log):
self._log = log
def turn_xml_into_register(self, year, month, xml_content):
tree = ETree.fromstring(xml_content)
result = XmlRegister(year=year,
month=month,
bornages=self._look_for_field(tree, self.BORNAGES),
csj=self._look_for_field(tree, self.CSJ),
mutations=self._look_for_field(tree, self.MUTATIONS),
titres_crees=self._look_for_field(tree, self.TITRES_CREES),
reperages=self._look_for_field(tree, self.REPERAGES),
reproduction_des_plans=self._look_for_field(tree, self.REP_DES_PLANS)
)
return result
def _look_for_field(self, tree, field_to_look_for):
base_node = tree.find(field_to_look_for)
return base_node.find(self.VALUE).text
| {
"repo_name": "landportal/landbook-importers",
"path": "old-importers/FoncierImporter/es/weso/foncier/importer/xml_management/xml_content_parser.py",
"copies": "2",
"size": "1900",
"license": "mit",
"hash": -7309660090854521000,
"line_mean": 34.1851851852,
"line_max": 98,
"alpha_frac": 0.5663157895,
"autogenerated": false,
"ratio": 3.3450704225352115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49113862120352114,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dani'
try:
import xml.etree.cElementTree as ETree
except:
import xml.etree.ElementTree as ETree
from ...entities.xml_entities import XmlRegister, IndicatorData
from lpentities.year_interval import YearInterval
from lpentities.interval import Interval
from ..keys_dict import KeysDict
from datetime import datetime
class XmlContentParser(object):
ISO3_ATTR = "iso3"
def __init__(self, log, config, reconciler, responses, look_for_historical):
self._log = log
self._config = config
self._reconciler = reconciler
self._responses = responses
self._look_for_historical = look_for_historical
def run(self):
"""
It rreturns a list of XmlRegister objects containing all the info that
"""
result = []
for response in self._responses:
parsed_response = self._process_response(response)
if parsed_response is not None:
result.append(parsed_response)
return result
def _process_response(self, response):
"""
It returns an XmlRegister object or None if the response does not contain usefull info
"""
tree = None
try:
tree = ETree.fromstring(response)
except BaseException as e:
self._log.warning("Error while parsing a response. The importer will try to process the rest "
"of the data. Cause: " + e.message)
return None
return self._turn_tree_into_xml_register(tree)
def _turn_tree_into_xml_register(self, tree):
if self._is_empty_tree(tree):
return None
country = self._get_country_of_tree(tree)
result = XmlRegister(country=country)
try:
#The next two local variables will be needed in the for loop.
base_node_in_wich_to_look_for = tree.find("lang").find("topics").find("topic") # Exploring the tree
codes_to_look_for = [KeysDict.TOTAL_HOLDERS_CODE,
KeysDict.WOMEN_HOLDERS_CODE,
KeysDict.HOLDINGS_CO_OWNERSHIP_CODE,
KeysDict.RURAL_HOUSEHOLDS_WOMEN_CODE]
for code in codes_to_look_for:
data_of_an_indicator = self._look_for_indicator_data(base_node_in_wich_to_look_for, code)
if (not data_of_an_indicator is None) and self._pass_filters(data_of_an_indicator):
result.add_indicator_data(data_of_an_indicator)
return result
except BaseException as e:
self._log.warning("Unable to track data from country {0}. Country will be ignored. Cause: {1}"
.format(country.iso3, e.message))
return None
def _pass_filters(self, data_of_an_indicator):
if self._look_for_historical:
return True
if not "_target_date" in self.__dict__:
self._target_date = self._get_current_date()
elif self._get_year_of_data_indicator(data_of_an_indicator) < self._target_date:
return False
return True
def _get_current_date(self):
return int(self._config.get("HISTORICAL", "first_valid_year"))
@staticmethod
def _get_year_of_data_indicator(data_of_an_indicator):
date_obj = data_of_an_indicator.date
if type(date_obj) == YearInterval:
return int(date_obj.year)
elif type(date_obj) == Interval:
return int(date_obj.end_time)
else:
raise RuntimeError("Unexpected object date. Impossible to build observation from it: " + type(date_obj))
def _look_for_indicator_data(self, base_node, code):
"""
Return an IndicatorData object if it finds usefull info. If not, return None
"""
for node in base_node.getchildren():
if node.attrib["code"] == code:
return self._get_data_from_a_given_indicator_node(node, code)
return None
def _get_data_from_a_given_indicator_node(self, node, code):
node_value = self._look_for_node_value(node.text)
node_date = self._look_for_node_date(node.text)
if not (node_value is None or node_date is None):
return IndicatorData(indicator_code=code,
value=node_value,
date=node_date)
else:
return None
def _look_for_node_value(self, text):
result = self._remove_text_between_certain_sequences(text, "(", ")")
result = self._remove_text_between_certain_sequences(result, "[", "]")
result = self._remove_text_between_certain_sequences(result, "<", ">")
result = self._remove_text_between_certain_sequences(result, "&", ";")
result = self._remove_text_between_certain_sequences(result, "<", ">") # They should be encoded...
result = result.replace(" ", "")
result = result.replace(",", "")
result = result.replace("\n", "")
result = result.replace("\t", "")
result = result.replace("\r", "")
return result
@staticmethod
def _remove_text_between_certain_sequences(text, beg, end):
# return text[text.index]
result = text
pos_beg = text.find(beg)
pos_end = text.find(end)
while not (pos_beg == -1 or pos_end == -1):
result = result[:result.index(beg)] + result[result.index(end) + len(end):]
pos_beg = result.find(beg)
pos_end = result.find(end)
return result
def _look_for_node_date(self, text):
"""
It returns an lpentities Interval if ot finds an intervalic date or a YearInterval object if
it find a single value. In case of not finding any valid date, returns None
Asumptions:
- Dates appear betwen "[]"
- Sometimes, more things than dates appear between "[]"
- We have got several date formats: XXXX, XX, XXXX-XXXX, XXXX-XX, XX-XX, XXXX/XXXX, XXXX/XX, XX/XX
"""
beg_pos = text.find('[')
end_pos = text.find(']')
while not (beg_pos == -1 or end_pos == -1):
string_date = text[beg_pos + 1:end_pos]
#Trying to return a valid date
if "-" in string_date: # Interval (two values)
years = string_date.split("-")
return Interval(start_time=self._return_4_digits_year(years[0]),
end_time=self._return_4_digits_year(years[1]))
elif "/" in string_date:
years = string_date.split("/")
return Interval(start_time=self._return_4_digits_year(years[0]),
end_time=self._return_4_digits_year(years[1]))
elif "−" in string_date:
years = string_date.split("−")
return Interval(start_time=self._return_4_digits_year(years[0]),
end_time=self._return_4_digits_year(years[1]))
elif self._is_single_year(string_date): # YearInterval (single value)
return YearInterval(year=int(string_date))
#Preparing end_pos and beg_pos for potential next iteration
beg_pos = text.find('[', beg_pos + 1)
end_pos = text.find(']', end_pos + 1)
#TODO: maybe make some noise in the log... but probably not. There are so many cases like this
return None # We reach this if no fate was found
@staticmethod
def _is_single_year(year):
if year.isdigit():
try:
int_year = int(year)
if 0 < int_year < 100: # Year of the form 01.02,...,98,99
return True
elif 999 < int_year < 2150: # Year of 4 digits. In 2150 it will stop working :)
return True
else: # Unknown year format
return False
except ValueError:
return False
else:
return False
def _return_4_digits_year(self, year_string):
if len(year_string) == 4:
return int(year_string)
elif len(year_string) == 2:
final_digits = int(year_string)
if final_digits < 30: # So, in 2030, this will stop working.
return 2000 + final_digits
else:
return 1900 + final_digits
else:
raise RuntimeError("Date with a number of digits different tahn 2 or 4: " + year_string)
def _get_country_of_tree(self, tree):
iso3 = tree.attrib[self.ISO3_ATTR]
return self._reconciler.get_country_by_iso3(iso3)
@staticmethod
def _is_empty_tree(tree):
"""
Empty means taht it hasn't got available data
"""
if len(tree.getchildren()) == 0:
return True
else:
return False
pass
| {
"repo_name": "landportal/landbook-importers",
"path": "old-importers/FAOGender_Importer/es/weso/faogenderextractor/extractor/xml_management/xml_content_parser.py",
"copies": "2",
"size": "9000",
"license": "mit",
"hash": 75948351298875070,
"line_mean": 35.8852459016,
"line_max": 116,
"alpha_frac": 0.5671111111,
"autogenerated": false,
"ratio": 3.982300884955752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5549411996055753,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daniyar'
import itertools
from localsys.storage import db
import math
class score_model:
def check_closest_competitor(self, usrid, your_score):
value_risk = 0.0
value_cost = 0.0
value_risk_cost_contender = 2.0
value_cost_risk_contender = 1.0
prev_value_risk = 2.0
prev_value_cost = 1.0
next_value_risk = 2.0
next_value_cost = 1.0
prev_risk_rank = 0
next_risk_rank = 0
prev_cost_rank = 0
next_cost_rank = 0
next_value_risk_date = "2014-01-06"
prev_value_risk_date = "2014-01-06"
prev_value_cost_date = "2014-01-06"
next_value_cost_date = "2014-01-06"
date_risk = "2014-01-06"
date_cost = "2014-01-06"
checked = False
u_rank_risk = 1
u_rank_cost = 1
users_risk = []
users_cost = []
risk_values = []
cost_values = []
contender_id_prev_risk = 1
contender_id_next_risk = 1
contender_id_prev_cost = 1
contender_id_next_cost = 1
scores_1, scores_2 = itertools.tee(your_score)
for row in scores_1:
if row.score_type == 1:
if row.userid == usrid:
if not checked:
value_risk = row.score_value
checked = True
date_risk = row.date
else:
if not checked:
if not row.userid in users_risk:
if not float(row.score_value) in risk_values:
risk_values.append(float(row.score_value))
u_rank_risk += 1
prev_value_risk = row.score_value
prev_value_risk_date = row.date
contender_id_prev_risk = row.userid
users_risk.append(row.userid)
else:
if not row.userid in users_risk:
next_value_risk = row.score_value
next_value_risk_date = row.date
contender_id_next_risk = row.userid
break
checked = False
for row in scores_2:
if row.score_type == 2:
if row.userid == usrid:
if not checked:
value_cost = row.score_value
checked = True
date_cost = row.date
else:
if not checked:
if not row.userid in users_cost:
if not float(row.score_value) in cost_values:
users_cost.append(row.userid)
cost_values.append(float(row.score_value))
u_rank_cost += 1
prev_value_cost = row.score_value
prev_value_cost_date = row.date
contender_id_prev_cost = row.userid
else:
if not row.userid in users_cost:
next_value_cost = row.score_value
next_value_cost_date = row.date
contender_id_next_cost = row.userid
break
u_rank_risk -= risk_values.count(float(value_risk))
u_rank_cost -= cost_values.count(float(value_cost))
prev_risk_rank = u_rank_risk - 1
if prev_risk_rank == 0:
prev_value_risk = 9
prev_cost_rank = u_rank_cost - 1
if prev_cost_rank == 0:
prev_value_cost = 9
if next_value_risk == value_risk:
next_risk_rank = u_rank_risk
else:
next_risk_rank = u_rank_risk + 1
if next_value_cost == value_cost:
next_cost_rank = u_rank_cost
else:
next_cost_rank = u_rank_cost + 1
if prev_value_risk == value_risk:
prev_risk_rank = u_rank_risk
if prev_value_cost == value_cost:
prev_cost_rank = u_rank_cost
if math.fabs(float(value_risk) - float(prev_value_risk)) <= math.fabs(
float(next_value_risk) - float(value_risk)):
closest_score_risk = prev_value_risk
closest_ranking_risk = prev_risk_rank
closest_date_risk = prev_value_risk_date
contender_id_risk = contender_id_prev_risk
else:
closest_score_risk = next_value_risk
closest_ranking_risk = next_risk_rank
closest_date_risk = next_value_risk_date
contender_id_risk = contender_id_next_risk
if math.fabs(float(value_cost) - float(prev_value_cost)) <= math.fabs(
float(next_value_cost) - float(value_cost)):
closest_score_cost = prev_value_cost
closest_ranking_cost = prev_cost_rank
closest_date_cost = prev_value_cost_date
contender_id_cost = contender_id_prev_cost
else:
closest_score_cost = next_value_cost
closest_ranking_cost = next_cost_rank
closest_date_cost = next_value_cost_date
contender_id_cost = contender_id_next_cost
value_risk_cost = db.select('scores', where="date=$date_risk&&score_type=2&&userid=$usrid", vars=locals())[0].score_value
value_cost_risk = db.select('scores', where="date=$date_cost&&score_type=1&&userid=$usrid", vars=locals())[0].score_value
res1 = db.select('scores', where="date=$closest_date_risk&&score_type=2&&userid=$contender_id_risk", vars=locals())
if len(res1) > 0:
value_risk_cost_contender = res1[0].score_value
res2 = db.select('scores', where="date=$closest_date_cost&&score_type=1&&userid=$contender_id_cost", vars=locals())
if len(res2) > 0:
value_cost_risk_contender = res2[0].score_value
return value_risk, value_risk_cost, date_risk, value_cost, value_cost_risk, date_cost, u_rank_risk, u_rank_cost, closest_score_risk, value_risk_cost_contender, \
closest_ranking_risk, closest_date_risk, closest_score_cost, value_cost_risk_contender, closest_ranking_cost, closest_date_cost
def find_best(self, scores):
date_risk = "N/A"
value_risk = 0.0
date_cost = "N/A"
value_cost = 0.0
id_risk = 1
id_cost = 1
scores_1, scores_2 = itertools.tee(scores)
for row in scores_1:
if row.score_type == 1:
date_risk = row.date
value_risk = row.score_value
id_risk = row.userid
break
for row in scores_2:
if row.score_type == 2:
date_cost = row.date
value_cost = row.score_value
id_cost = row.userid
break
value_risk_cost = db.select('scores', where="date=$date_risk&&userid=$id_risk&&score_type=2", vars=locals())[0].score_value
value_cost_risk = db.select('scores', where="date=$date_cost&&userid=$id_cost&&score_type=1", vars=locals())[0].score_value
return value_risk, value_risk_cost, date_risk, value_cost, value_cost_risk, date_cost
def find_avg(self):
average_risk = db.query("SELECT AVG(score_value)as avg FROM scores WHERE score_type =1;")[0]
average_cost = db.query("SELECT AVG(score_value)as avg FROM scores WHERE score_type =2;")[0]
return average_risk.avg, average_cost.avg
@classmethod
def get_scores(cls, id_user):
all_scores = db.select('scores', order="score_value ASC")
length = len(all_scores)
scores_1, scores_2, scores_3, scores_4 = itertools.tee(all_scores, 4)
if len(all_scores) > 0:
b_u_risk, b_u_risk_cost, b_u_risk_date, b_u_cost, b_u_cost_risk, b_u_cost_date, b_u_risk_rank, b_u_cost_rank,\
c_risk, c_risk_cost, c_risk_rank, c_risk_when, c_pc, c_pc_risk, c_pc_rank, c_pc_when = \
score_model().check_closest_competitor(id_user, scores_2)
b_risk, b_risk_cost, b_risk_when, b_pc, b_pc_risk, b_pc_when = score_model().find_best(scores_3)
avg_risk, avg_pc = score_model().find_avg()
msg = {
"b_u_risk": str(b_u_risk),
"b_u_risk_cost": str(b_u_risk_cost),
"b_u_risk_date": str(b_u_risk_date),
"b_u_risk_rank": b_u_risk_rank,
"b_u_cost": str(b_u_cost),
"b_u_cost_risk": str(b_u_cost_risk),
"b_u_cost_date": str(b_u_cost_date),
"b_u_cost_rank": b_u_cost_rank,
"c_risk": str(c_risk),
"c_risk_cost": str(c_risk_cost),
"c_risk_when": str(c_risk_when),
"c_risk_rank": c_risk_rank,
"c_pc": str(c_pc),
"c_pc_risk": str(c_pc_risk),
"c_pc_when": str(c_pc_when),
"c_pc_rank": c_pc_rank,
"b_risk": str(b_risk),
"b_risk_cost": str(b_risk_cost),
"b_risk_when": str(b_risk_when),
"b_pc": str(b_pc),
"b_pc_risk": str(b_pc_risk),
"b_pc_when": str(b_pc_when),
"avg_risk": str(avg_risk),
"avg_pc": str(avg_pc)
}
return msg
def multiple_score(self, policies):
policy_costs_risks = []
sim = simulation()
for policy_entry in policies:
result_entry = {}
for key in policy_entry:
if key == "data":
tmp_value = policy_entry[key]
#sim.set_multi_policy(tmp_value)
result_entry["risk"] = sim.calc_risk_prob(tmp_value)
result_entry["cost"] = sim.calc_prod_cost(tmp_value)
else:
result_entry["id"] = policy_entry[key]
policy_costs_risks.append(result_entry)
# print('return cost '+ policy_costs_risks)
return policy_costs_risks
@classmethod
def insert_score(self, user_id, score_type, score_value, date):
db.insert('scores', userid=user_id, score_type=score_type, score_value=score_value, date=date)
| {
"repo_name": "mapto/sprks",
"path": "models/score.py",
"copies": "1",
"size": "10424",
"license": "mit",
"hash": -3461555122772116500,
"line_mean": 43.547008547,
"line_max": 169,
"alpha_frac": 0.5090176516,
"autogenerated": false,
"ratio": 3.57598627787307,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9578904274320219,
"avg_score": 0.001219931030570185,
"num_lines": 234
} |
__author__ = 'Daniyar'
import numpy
class company:
employee_types = ['executives', 'desk', 'road'] # 'desk' == 'white-collar', 'road' == 'blue-collar'
location_types = ['office', 'public', 'home']
device_types = ['desktop', 'laptop', 'phone']
employees_count = 2 * pow(10, 5)
max_incident_cost = employees_count * pow(10, 3)
def __init__(self, distribution=[.1, .5, .4], support=5):
self.distribution = distribution
self.support = support # number of support staff per one staff unit (1000 employees)
# 'desk' used for office worker, but changed in order to distinguish from office as location
self.employees2locations = numpy.genfromtxt('static/data/locations.csv', delimiter=',') # rows - locations, columns - employees
self.employees2devices = numpy.genfromtxt('static/data/devices.csv', delimiter=',') # rows - devices, columns - employees
def get_size(self):
return self.size
def set_size(self, size):
self.size = size
def get_user_distribution(self):
return self.distribution
def get_location_distribution(self, employee="any"):
if employee == "any":
return self.employees2locations.dot(self.distribution)
else:
index = self.employee_types.index(employee)
return self.employees2locations[:,index]
def get_device_distribution(self, employee="any"):
if employee == "any":
return self.employees2devices.dot(self.distribution)
else:
index = self.employee_types.index(employee)
return self.employees2devices[:,index]
def set_support(self, support):
pass
def get_support(self):
return self.support
if __name__ == "__main__":
co = company()
print str(co.employee_types) + " " + str(co.get_user_distribution())
print str(co.location_types) + " " + str(co.get_location_distribution())
print str(co.device_types) + " " + str(co.get_device_distribution())
print co.get_location_distribution("executives")
print co.get_location_distribution("desk")
| {
"repo_name": "mapto/sprks",
"path": "models/company.py",
"copies": "1",
"size": "2117",
"license": "mit",
"hash": 3479118479493015000,
"line_mean": 36.1403508772,
"line_max": 135,
"alpha_frac": 0.6381672178,
"autogenerated": false,
"ratio": 3.8007181328545783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938885350654578,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dankle'
import subprocess
import datetime
from localq.Status import Status
class Job:
""" A command line job to run with a specified number of cores
"""
def __init__(self, job_id, cmd, num_cores=1, stdout=None, stderr=None, priority_method="fifo",
rundir=".", name=None, use_shell=False, dependencies=[]):
self.jobid = job_id
self.cmd = cmd
self.num_cores = int(num_cores)
self.stdout = stdout
self.stderr = stderr
self.priority_method = priority_method
self.rundir = rundir
self.proc = None
self._failed_to_start = False
self.start_time = None
self.end_time = None
self._status = Status.PENDING
self.use_shell = use_shell
self.dependencies = dependencies
if name is None:
self.name = "localq-" + str(self.jobid)
else:
self.name = name
def run(self):
now = self._get_formatted_now()
if not self.stdout:
self.stdout = str(self.rundir) + "/localq-" + str(self.jobid) + "-" + now + ".out"
if not self.stderr:
self.stderr = str(self.rundir) + "/localq-" + str(self.jobid) + "-" + now + ".out"
try:
self.start_time = now
self.proc = subprocess.Popen(self.cmd,
shell=self.use_shell,
stdout=open(self.stdout, 'a'),
stderr=open(self.stderr, 'a'),
cwd=self.rundir)
except OSError:
# An OSError is thrown if the executable file in 'cmd' is not found. This needs to be captured
# "manually" and handled in self.status()
# see https://docs.python.org/2/library/subprocess.html#exceptions
self.proc = None
self._failed_to_start = True
def _get_formatted_now(self):
return datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%dT%H%M%S')
def kill(self):
"""Send the jobs process SIGTERM
"""
if self.proc:
try:
self.proc.terminate() # sends the SIGTERM signal
except OSError: # if job is finished or has been cancelled before, an OSError will be thrown
pass
self._status = Status.CANCELLED
def update_status(self):
"""
update the jobs status. Returns nothing.
:return:
"""
if self._status == Status.CANCELLED:
pass
elif self.proc:
# update the process' returncode
self.proc.poll()
if self.proc.returncode is None: # Running
self._status = Status.RUNNING
else: # None < 0 evaluates as True on some systems, so need to make sure its not None
if self.proc.returncode == 0: # Completed successfully
if not self.end_time:
self.end_time = self._get_formatted_now()
self._status = Status.COMPLETED
elif self.proc.returncode > 0: # Failed
if not self.end_time:
self.end_time = self._get_formatted_now()
self._status = Status.FAILED
elif self.proc.returncode < 0: # Cancelled
if not self.end_time:
self.end_time = self._get_formatted_now()
# if job was cancelled, returncode will be -N if it received signal N (SIGKILL = 9)
self._status = Status.CANCELLED
else:
if self._failed_to_start: # Failed to start (self.proc will equal None if this happens)
if not self.end_time:
self.end_time = self._get_formatted_now()
self._status = Status.FAILED
else:
self._status = Status.PENDING
def status(self):
"""
Update and return the job's status
:return: One of Job.PENDING, CANCELLED, COMPLETED, FAILED or RUNNING
"""
self.update_status()
return self._status
def priority(self):
if self.priority_method == "fifo":
return -1 * self.jobid
else:
## default to fifo
return -1 * self.jobid
def info(self):
return "\t".join(
[str(self.jobid),
str(self.priority()),
str(self.status()),
str(self.num_cores),
str(self.start_time),
str(self.end_time),
str(self.name),
str(" ".join(self.cmd))]
)
def __hash__(self):
return self.jobid
def __str__(self):
return str(self.jobid) + "-" + str(self.name)
| {
"repo_name": "johandahlberg/localq",
"path": "localq/Job.py",
"copies": "1",
"size": "4840",
"license": "mit",
"hash": -3762102176296180000,
"line_mean": 34.8518518519,
"line_max": 106,
"alpha_frac": 0.5169421488,
"autogenerated": false,
"ratio": 4.253075571177504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5270017719977504,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dan'
from usedMsgs import continueMsg1A , continueMsg1B , continueMsg2 , inputRequest
from askForVars import askForVars
from verifyGo import verifyGo
from math import fmod
def verifyData( dirList , varList , allData ) :
done = False # verification loop not done
printBeg = True # print first part of continue message
printEnd = True # print second part of continue message
curIncTry = 0 # the current number of incorrect tries is 0
while not done :
if printBeg :
# -------------------------------------
# print beginning of continue message
# -------------------------------------
print continueMsg1A
# ----------------------------
# print selected directories
# ----------------------------
curMsg = ' ' + 'curDirs' + ' : '
for ( j , curVar ) in enumerate( dirList ) :
if j != 0 :
curMsg += ' , '
if int( fmod( j , 2 ) ) == 0 :
curMsg += '\n '
curMsg += str(curVar).ljust( 14 )
print curMsg + '\n'
# --------------------------
# print selected variables
# --------------------------
curMsg = ' ' + 'curVars' + ' : '
for ( j , curVar ) in enumerate( varList ) :
if j != 0 :
curMsg += ' , '
if int( fmod( j , 2 ) ) == 0 :
curMsg += '\n '
curMsg += str(curVar).ljust( 14 )
print curMsg + '\n'
# -----------------------------------
# don't repeat this text every time
# the user types in something wrong
# -----------------------------------
printBeg = False
# ---------------------------------------
# print second part of continue message
# ---------------------------------------
if printEnd :
if curIncTry == 0 :
print continueMsg1B
else :
print continueMsg2
else :
printEnd = True
# ---------------------
# get input from user
# ---------------------
verifyInput = raw_input( inputRequest )
verifyInput = verifyInput.strip()
if not verifyInput :
printEnd = False
continue
# ---------------------
# do desired protocol
# ---------------------
incorrectInput = False
if verifyInput[0] == 'y' :
if verifyGo( dirList , varList ) :
done = True
else :
# ------------------------------------
# this is equivalent to setting both
# printBeg and printEnd to False
# ------------------------------------
printEnd = False
incorrectInput = True
curIncTry = 0
elif verifyInput[0] == 'a' :
print 'addDir'
elif verifyInput[0] == 's' :
print 'removeDir'
elif verifyInput[0] == 'd' :
askForVars( varList )
elif verifyInput[0] == 'f' :
print 'removeVar'
else :
incorrectInput = True
# ------------------------------------------
# adjust the current incorrect try counter
# ------------------------------------------
if incorrectInput :
curIncTry += 1
else :
curIncTry = 0
# ------------------------------------
# check if the first part of the
# continue message should be printed
# ------------------------------------
if fmod( curIncTry , 3 ) == 0 :
printBeg = True
# ---------------------------------------
# this function doesn't return anything
# it just manipulates data and checks
# when the user is ready to search
# ---------------------------------------
return | {
"repo_name": "djsegal/ahab_legacy_",
"path": "pythonToMatlab/toMatlabAids/verifyData.py",
"copies": "1",
"size": "4270",
"license": "apache-2.0",
"hash": 6784421264613680000,
"line_mean": 22.7277777778,
"line_max": 82,
"alpha_frac": 0.3613583138,
"autogenerated": false,
"ratio": 5.11377245508982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.597513076888982,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dan'
from usedMsgs import preDirMsg
from math import fmod
def displayDirs( allData ) :
curDirMsg = preDirMsg + ' '
for ( i , curFolder ) in enumerate( allData ) :
# ------------------------------
# start index at 1 because
# humans are using this number
# ------------------------------
curInd = i + 1
# --------------------------------
# prettify text for when there
# are less than 100 data folders
# --------------------------------
if curInd < 10 :
curDirMsg += '0' + str(curInd)
else :
curDirMsg += str(curInd)
curDirMsg += ' : ' + curFolder + ' '
# ------------------------
# for cosmetic purposes:
# add a space
# ------------------------
if len( curFolder ) < 12 :
curDirMsg += ' '
# -------------------------------
# put two directories on a line
# -------------------------------
if int( fmod( curInd , 2 ) ) == 0 :
curDirMsg += ' \n'
curDirMsg += ' '
return curDirMsg | {
"repo_name": "djsegal/ahab_legacy_",
"path": "pythonToMatlab/toMatlabAids/displayDirs.py",
"copies": "1",
"size": "1204",
"license": "apache-2.0",
"hash": -2020366529422209000,
"line_mean": 21.7358490566,
"line_max": 51,
"alpha_frac": 0.3504983389,
"autogenerated": false,
"ratio": 4.36231884057971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0184278122215453,
"num_lines": 53
} |
__author__ = 'Dan'
import json
import web
from models.policies import policies_model
from localsys.environment import context
from localsys.storage import db
class history_rest:
def GET(self):
web.header('Content-Type', 'application/json')
#get policy history (used in table display on a Profile page)
policy_history = policies_model.get_policy_history(context.user_id())
#get risks, costs for all months played by a user (used in graphs display on a Profile page)
userid = context.user_id()
scores = db.select('scores', where='userid=$userid', order="date ASC", vars=locals())
scores_result = []
for row in scores:
tmp = {}
for key, value in row.iteritems():
tmp[key] = str(value)
scores_result.append(tmp)
history = json.dumps(
{
'policy_history': json.dumps(policy_history),
'graph_data': json.dumps(scores_result)
}
)
if history:
return history
class score_frame:
"""
Provides data for a risk and cost scores in the bottom left corner
(different from the score page as the latter takes only best values)
Is similar to graph_data for the profile page
"""
def GET(self):
# get the latest risk and cost
user_id = context.user_id()
web.header('Content-Type', 'application/json')
scores = db.select('scores', where='userid=$user_id', order="date DESC", limit=2, vars=locals())
scores_result = []
for row in scores:
tmp = {}
for key, value in row.iteritems():
tmp[key] = str(value)
scores_result.append(tmp)
return json.dumps(scores_result) | {
"repo_name": "mapto/sprks",
"path": "controllers/policy_history.py",
"copies": "1",
"size": "1786",
"license": "mit",
"hash": -5691688441002371000,
"line_mean": 29.8103448276,
"line_max": 104,
"alpha_frac": 0.5951847704,
"autogenerated": false,
"ratio": 4.163170163170163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019952060417413463,
"num_lines": 58
} |
__author__ = 'dan'
import networkx as nx
from datetime import datetime
import numpy as np
import pandas as pd
import itertools
import scipy.stats as stats
import os
import mlalgorithms.transfer_entropy
nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.desc.csv')]
out_dir = '/Users/dan/dev/datasci/kaggle/connectomix/out/'
#nets = {'valid':'fluorescence_valid.txt.desc.csv', 'test':'fluorescence_test.txt.desc.csv'}
#out_dir = '/Users/dan/dev/datasci/kaggle/connectomix/out/'
in_dir = '/Users/dan/dev/datasci/kaggle/connectomix/'
def gte_analyze(in_dir, out_dir, nets):
print('starting gte analyze')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
res= []
for t in nets:
k = t[0]
v = t[1]
print('reading discretized file ' + in_dir + k + '/out/' + v )
n_act = pd.read_table(in_dir + k + '/out/' + v, sep=',', header=None)
# set the columns headers to 1-based series
neuron_ct = len(n_act.columns)
n_act.columns = range(1, neuron_ct+1)
# call GTE
entropy = mlalgorithms.transfer_entropy.GTE(n_act.values, 3)
# reformat
for i in range(0, neuron_ct):
if i % 10 == 0:
print('on neuron ' + str(i))
for j in range(0, neuron_ct):
key = k + '_' + str(i) + '_' + str(j)
res.append([key, entropy[i][j]])
df = pd.DataFrame(res,columns=['NET_neuronI_neuronJ','Strength'])
df.to_csv(out_dir + '/predictions-gte.csv', index=False)
gte_analyze(in_dir, out_dir, nets) | {
"repo_name": "ecodan/kaggle-connectomix",
"path": "cnct_gte.py",
"copies": "1",
"size": "1564",
"license": "apache-2.0",
"hash": 2388798853302309400,
"line_mean": 30.9387755102,
"line_max": 92,
"alpha_frac": 0.6042199488,
"autogenerated": false,
"ratio": 3.0076923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4111912256492308,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dan'
import networkx as nx
from datetime import datetime
import numpy as np
import pandas as pd
import itertools
import scipy.stats as stats
import os
nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.diff.csv')]
out_dir = '/Users/dan/dev/datasci/kaggle/connectomix/out/'
#nets = {'valid':'fluorescence_valid.txt.desc.csv', 'test':'fluorescence_test.txt.desc.csv'}
#out_dir = '/Users/dan/dev/datasci/kaggle/connectomix/out/'
in_dir = '/Users/dan/dev/datasci/kaggle/connectomix/'
start = datetime.now()
last = datetime.now()
def pearson_analyze(in_dir, out_dir, nets):
print('starting pearson analyze')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
res = []
for t in nets:
k = t[0]
v = t[1]
print('reading discretized file ' + in_dir + k + '/out/' + v )
n_act = pd.read_table(in_dir + k + '/out/' + v, sep=',', header=None)
# set the columns headers to 1-based series
neuron_ct = len(n_act.columns)
n_act.columns = range(1, neuron_ct+1)
# loop through all neuron combinations and calculate pearson coeff
for i in range(1,neuron_ct+1):
if i % 10 == 0:
print('on neuron ' + str(i))
for j in range(1,neuron_ct+1):
key = k + '_' + str(i) + '_' + str(j)
if i == j:
res.append([key, 0.0])
else:
corr = str(stats.pearsonr(n_act[i], n_act[j])[0])
res.append([key, corr])
df = pd.DataFrame(res,columns=['NET_neuronI_neuronJ','Strength'])
df.to_csv(out_dir + '/predictions-pearson.csv', index=False)
pearson_analyze(in_dir, out_dir, nets) | {
"repo_name": "ecodan/kaggle-connectomix",
"path": "cnct_pearson.py",
"copies": "1",
"size": "1717",
"license": "apache-2.0",
"hash": -6174207283983943000,
"line_mean": 32.0384615385,
"line_max": 92,
"alpha_frac": 0.5835760047,
"autogenerated": false,
"ratio": 3.071556350626118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41551323553261177,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dan'
"""
Obsolete, but used for comparison.
"""
from collections import deque, defaultdict
import time
from redis import Redis
from colored_bitcoins.util import hashEncode
r = Redis(db=2)
def genesis(tx):
outputs = r.hgetall(tx)
total = sum(map(int, [outputs[k] for k in outputs if ":v" in k]))
return tx, 1, total
def load_txs(queue, cache):
neededTxs = [x[0] for x in queue if x[0] not in cache]
# print "loading", len(neededTxs), "transactions"
pipe = r.pipeline()
for tx in neededTxs:
pipe.hgetall(tx)
result = pipe.execute()
for k, v in zip(neededTxs, result):
cache[k] = v
def color(tx):
begin = time.clock()
cache = {}
visitedtimes = defaultdict(int)
q = deque([None, genesis(hashEncode(tx))])
while q:
item = q.popleft()
if item is None:
if q:
load_txs(q, cache)
q.append(None)
continue
t, colorid, value = item
# print "Processing tx", hashDecode(t)
outs = cache[t]
validkeys = [k for k in outs if ":v0" in k and outs[k] != 0 and outs[k] != "0"]
validkeys.sort(key=lambda x: int(x.partition(":")[0]))
for key in validkeys:
if value == 0:
break
capacity = outs[key]
capacity = int(capacity)
moving = min(capacity, value)
prefix = key.partition(":")[0]
newkey = prefix + ":v" + str(colorid)
outs[key] = int(outs[key]) - moving
outs[newkey] = int(outs.get(newkey, 0)) + moving
# print "Moving", moving,
value -= moving
successor = prefix + ":s"
if successor in outs:
q.append((outs[successor], colorid, moving))
# print "to", hashDecode(q[-1][0])
else:
pass# print
if value:
pass #print "Colored bitcoins lost! Amount:", value
print "visited", len(cache), "transactions. It took", time.clock()-begin
return cache
def test():
keys = r.keys()
pipe = r.pipeline()
for k in keys:
pipe.hgetall(k)
result = pipe.execute()
# test()
# color("f96a743f808d7dd68c8b84ab139a6f26b882af8587e3e21ce53785e59fd8e87b")
# walkthrough("5468451affd4814bbaa4e986ed85413d1daf39f3a8312ba244fcc1ae7c4a682a")
# color("f3710095e59239927c96f4479aedb97fa9568c084932c20238cb19e2bb6d777e")
# color("95a7de176997631e96dfa882b36202c21c343b61c3b8b58b54ca668db10c31a1")
""""
i = 50
for k in r.scan_iter():
if not i:
break
print hashDecode(k)
color(hashDecode(k))
i -= 1"""
# 62daaf3e5bff38a065e794cbe3214dfe6a63799064be326b34706808778ea755 U
| {
"repo_name": "Danstahr/colored-bitcoins-project",
"path": "colored_bitcoins/processor.py",
"copies": "1",
"size": "2734",
"license": "mit",
"hash": -5796308567659321000,
"line_mean": 27.4791666667,
"line_max": 87,
"alpha_frac": 0.5877834674,
"autogenerated": false,
"ratio": 3.1174458380843784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9184532662717423,
"avg_score": 0.004139328553391054,
"num_lines": 96
} |
__author__ = 'dan'
'''
Step 1 in pipeline
Input: flourescence files
Output:
1) diff'd time series (delta between each two time frames)
2) descretized time series (deltas converted to binary with threshold N)
'''
import pandas as pd
import numpy as np
import os
from datetime import datetime
# nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt'),
# ('small','fluorescence_iNet1_Size100_CC02inh.txt'),
# ('small','fluorescence_iNet1_Size100_CC03inh.txt'),
# ('small','fluorescence_iNet1_Size100_CC04inh.txt'),
# ('small','fluorescence_iNet1_Size100_CC05inh.txt'),
# ('small','fluorescence_iNet1_Size100_CC06inh.txt')]
# nets = [('valid','fluorescence_valid.txt'),
# ('test','fluorescence_test.txt')]
nets = [('normal-1','fluorescence_normal-1.txt'),
('normal-2','fluorescence_normal-2.txt'),
('normal-3-highrate','fluorescence_normal-3-highrate.txt'),
('normal-3','fluorescence_normal-3.txt'),
('normal-4','fluorescence_normal-4.txt'),
('normal-4-lownoise','fluorescence_normal-4-lownoise.txt'),
('highcc','fluorescence_highcc.txt'),
('lowcc','fluorescence_lowcc.txt'),
('highcon','fluorescence_highcon.txt'),
('lowcon','fluorescence_lowcon.txt')
]
in_dir = '/Users/dan/dev/datasci/kaggle/connectomix/'
threshold = 0.12
def munge(in_dir, nets, threshold=0.12):
for t in nets:
k = t[0]
v = t[1]
out_dir = in_dir + k + '/out/'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# read flourescence file
in_file = in_dir + k + '/' + v
print(k + ': reading file ' + in_file + ' at ' + str(datetime.now()))
df = pd.read_table(in_file, sep=',', header=None)
df.columns = range(1, len(df.columns) + 1)
# diff and output
print(k + ': calclating diff...')
dfd = df.diff()
print(k + ': writing diff file ' + out_dir + v + '.diff.csv')
dfd.to_csv(out_dir + v + '.diff.csv', index=False, header=False)
# discretize and output
print (k + ': discretizing...')
df_desc = dfd.applymap(lambda x: 3 if x > 3 * threshold else 2 if x > 2 * threshold else 1 if x > threshold else 0)
print(k + ': writing desc file ' + out_dir + v + '.desc.csv')
df_desc.to_csv(out_dir + v + '.desc.csv', index=False, header=False)
munge(in_dir, nets, threshold) | {
"repo_name": "ecodan/kaggle-connectomix",
"path": "cnct_munge.py",
"copies": "1",
"size": "2441",
"license": "apache-2.0",
"hash": 2677448739148627000,
"line_mean": 34.3913043478,
"line_max": 123,
"alpha_frac": 0.5944285129,
"autogenerated": false,
"ratio": 2.965978128797084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9039557234683439,
"avg_score": 0.004169881402728954,
"num_lines": 69
} |
__author__ = 'dan'
'''
Step 2 in pipeline
This is the first approach I took. Basically it counts single occurances of potential connectivity
between neurons in the current time frame and up to N frames back.
Input: the descretized file
Output: a graphml file with info about each directed edge
'''
import networkx as nx
import numpy as np
import pandas as pd
import itertools
from datetime import datetime
import time
# nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.desc.csv')]
# nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC02inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC03inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC04inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC05inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC06inh.txt.desc.csv')]
nets = [('valid','fluorescence_valid.txt.desc.csv'),
('test','fluorescence_test.txt.desc.csv')]
nets = [
# ('normal-1','fluorescence_normal-1.txt.desc.csv'),
('normal-2','fluorescence_normal-2.txt.desc.csv'),
('normal-3-highrate','fluorescence_normal-3-highrate.txt.desc.csv'),
('normal-3','fluorescence_normal-3.txt.desc.csv'),
('normal-4','fluorescence_normal-4.txt.desc.csv'),
('normal-4-lownoise','fluorescence_normal-4-lownoise.txt.desc.csv'),
('highcc','fluorescence_highcc.txt.desc.csv'),
('lowcc','fluorescence_lowcc.txt.desc.csv'),
('highcon','fluorescence_highcon.txt.desc.csv'),
('lowcon','fluorescence_lowcon.txt.desc.csv')
]
in_dir = '/Users/dan/dev/datasci/kaggle/connectomix/'
def array_to_int(a):
ret = 0
for i in range(0, len(a)):
exp = len(a)-1-i
ret += 2**exp if a[i] != 0 else 0
return ret
def time_remaining(tot_iters, cur_iter, total_dur):
avg = total_dur/((cur_iter if cur_iter != 0 else 1)*1.0)
rem = (tot_iters - cur_iter) * avg
return avg/1000, rem/1000
def nw_graph(in_dir, nets, lookforward_pers=3):
for t in nets:
k = t[0]
v = t[1]
print(k + ': starting network analyze at ' + str(datetime.now()))
G = nx.DiGraph()
out_dir = in_dir + k + '/out/'
print('reading discretized file')
n_act = pd.read_table(in_dir + k + '/out/' + v, sep=',', header=None)
# set the columns headers to 1-based series
neuron_ct = len(n_act.columns)
n_act.columns = range(1, neuron_ct+1)
# add nodes and all possible edges
G.add_nodes_from(range(1,neuron_ct+1))
p = itertools.product(G.nodes(), G.nodes())
G.add_edges_from(p)
activated_lasts = [[],[],[]]
totals = [0l,0l,0l]
print(k + ': starting at ' + str(time.time()))
start = time.time() * 1000
# loop through all time periods
for t in range(0,len(n_act)):
if t % 1000 == 0:
cur_time = time.time() * 1000
avg, rem = time_remaining(len(n_act), t, cur_time - start )
print(k + ': on time period ' + str(t) + ' | avg=' + str(avg) + ' | rem=' + str(rem))
# this section is for discreet historical connectivity tracking
for thr in [1,2,3]:
# get all of the values above threshold
activated_current = n_act.columns.values[n_act.iloc[t].values >= thr]
totals[thr-1] += len(activated_current)
# get the historical list for this threshold
activated_last = activated_lasts[thr-1]
tstr = str(thr)
# connect all activated (bi-directional)
p = itertools.product(activated_current, activated_current)
for tup in p:
i = tup[0]
j = tup[1]
if i == j: continue
edge = G[i][j]
redge = G[j][i]
# if > 20% of neurons fired assume a blast event and clear the history
if len(activated_current) > len(G.nodes())*.2:
f = tstr + '-B'
edge[f] = edge[f] + 1 if f in edge else 1
redge[f] = redge[f] + 1 if f in redge else 1
activated_last = []
else:
f = tstr + '-0'
edge[f] = edge[f] + 1 if f in edge else 1
redge[f] = redge[f] + 1 if f in redge else 1
# connect with previous periods (uni-directional)
if (len(activated_last) > 0):
degrees = 0
for i,e in reversed(list(enumerate(activated_last))):
degrees += 1
p = itertools.product(e, activated_current)
for tup in p:
i = tup[0]
j = tup[1]
if i == j: continue
edge = G[i][j]
# G.add_edge(i,j)
f = tstr + '-' + str(degrees)
edge[f] = edge[f] + 1 if f in edge else 1
# drop the current activated list back one cycle
activated_last.append(activated_current)
if (len(activated_last) > lookforward_pers):
activated_last.pop(0)
activated_current = []
print(k + ': DIAG nodes=' + str(len(G.nodes())))
print(k + ': DIAG edges=' + str(len(G.edges())))
print(k + ': DIAG thr cts=' + str(totals))
nx.write_graphml(G, out_dir + v + '-graph.graphml')
return
nw_graph(in_dir, nets) | {
"repo_name": "ecodan/kaggle-connectomix",
"path": "cnct_graph.py",
"copies": "1",
"size": "5773",
"license": "apache-2.0",
"hash": 8339368895633037000,
"line_mean": 35.0875,
"line_max": 101,
"alpha_frac": 0.5257231942,
"autogenerated": false,
"ratio": 3.463107378524295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9467110866953263,
"avg_score": 0.004343941154206342,
"num_lines": 160
} |
__author__ = 'dan'
'''
Step 2 in pipeline
This is the second approach. Basically it counts complex potential connectivity
between neurons in the current time frame and up to 3 frames back and tracks that in a 2**4 matrix
(flattened to a 4 bit binary number). For example, in I -> J if I fired current and n-2 frames and J fired current,
this would increment I(0101) -> J or I(5) -> J.
Input: the descretized file
Output: a graphml file with info about each directed edge
'''
import networkx as nx
import numpy as np
import pandas as pd
import itertools
from datetime import datetime
import time
# nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.desc.csv')]
# nets = [('small','fluorescence_iNet1_Size100_CC01inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC02inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC03inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC04inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC05inh.txt.desc.csv'),
# ('small','fluorescence_iNet1_Size100_CC06inh.txt.desc.csv')]
# nets = [('valid','fluorescence_valid.txt.desc.csv'),
# ('test','fluorescence_test.txt.desc.csv')]
nets = [
# ('normal-1','fluorescence_normal-1.txt.desc.csv'),
('normal-2','fluorescence_normal-2.txt.desc.csv'),
('normal-3-highrate','fluorescence_normal-3-highrate.txt.desc.csv'),
('normal-3','fluorescence_normal-3.txt.desc.csv'),
('normal-4','fluorescence_normal-4.txt.desc.csv'),
('normal-4-lownoise','fluorescence_normal-4-lownoise.txt.desc.csv'),
('highcc','fluorescence_highcc.txt.desc.csv'),
('lowcc','fluorescence_lowcc.txt.desc.csv'),
('highcon','fluorescence_highcon.txt.desc.csv'),
('lowcon','fluorescence_lowcon.txt.desc.csv')
]
in_dir = '/Users/dan/dev/datasci/kaggle/connectomix/'
def array_to_int(a):
ret = 0
for i in range(0, len(a)):
exp = len(a)-1-i
ret += 2**exp if a[i] != 0 else 0
return ret
def time_remaining(tot_iters, cur_iter, total_dur):
avg = total_dur/((cur_iter if cur_iter != 0 else 1)*1.0)
rem = (tot_iters - cur_iter) * avg
return avg/1000, rem/1000
def nw_graph(in_dir, nets, lookforward_pers=3):
for t in nets:
k = t[0]
v = t[1]
print(k + ': starting network analyze at ' + str(datetime.now()))
G = nx.DiGraph()
out_dir = in_dir + k + '/out/'
print('reading discretized file')
n_act = pd.read_table(in_dir + k + '/out/' + v, sep=',', header=None)
# set the columns headers to 1-based series
neuron_ct = len(n_act.columns)
n_act.columns = range(1, neuron_ct+1)
# add nodes and all possible edges
G.add_nodes_from(range(1,neuron_ct+1))
p = itertools.product(G.nodes(), G.nodes())
G.add_edges_from(p)
activated_lasts = [[],[],[]]
totals = [0l,0l,0l]
print(k + ': starting at ' + str(time.time()))
start = time.time() * 1000
# loop through all time periods
for t in range(0,len(n_act)):
if t % 1000 == 0:
cur_time = time.time() * 1000
avg, rem = time_remaining(len(n_act), t, cur_time - start )
print(k + ': on time period ' + str(t) + ' | avg=' + str(avg) + ' | rem=' + str(rem))
# look back holistically over last N frames and if there is a possible impact track as a unidirectional effect
if t >= 3:
# get a slice of 4 time frames starting at the current
window = n_act.iloc[t-3:t+1].values
# get a list of columns with values other than zero
wsum = np.sum(window, axis=0)
act_window = np.array(range(0,neuron_ct))
act_window = act_window[wsum > 0]
if len(act_window) == 0:
# nothing here
continue
if len(act_window) > (neuron_ct*.2):
# ignore burst period windows
continue
activated_current = np.array(range(0,neuron_ct))
activated_current = activated_current[window[3] > 0]
for i in activated_current:
for j in act_window:
if i == j: continue
# print('process edge ' + str(i+1) + ',' + str(j+1))
edge = G[j+1][i+1] # process entropy from J to I
if 'e1' not in edge:
for e in range(0,16):
edge['e'+str(e)] = 0
_ent = array_to_int(window[::,j]) # convert the vertical array to an int
edge['e' + str(_ent)] += 1
print(k + ': DIAG nodes=' + str(len(G.nodes())))
print(k + ': DIAG edges=' + str(len(G.edges())))
print(k + ': DIAG thr cts=' + str(totals))
nx.write_graphml(G, out_dir + v + '-graph2.graphml')
return
nw_graph(in_dir, nets) | {
"repo_name": "ecodan/kaggle-connectomix",
"path": "cnct_graph2.py",
"copies": "1",
"size": "5050",
"license": "apache-2.0",
"hash": -4022575771481922600,
"line_mean": 35.8686131387,
"line_max": 122,
"alpha_frac": 0.5594059406,
"autogenerated": false,
"ratio": 3.2664941785252264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43259001191252267,
"avg_score": null,
"num_lines": null
} |
__author__ = 'danny'
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class UserCreationEmailForm(UserCreationForm):
email = forms.EmailInput()
class Meta:
model = User
fields = ('username', 'email')
class EmailAuthenticateForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(label='password', widget=forms.PasswordInput())
def __init__(self, *args, **kwargs):
self.user_cache = None
super(EmailAuthenticateForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
self.user_cache = authenticate(email=email, password=password)
if self.user_cache is None:
raise forms.ValidationError('Usuario Incorrecto')
elif not self.user_cache.is_active:
raise forms.ValidationError("Usuario inactive")
return self.cleaned_data
def get_user(self):
return self.user_cache | {
"repo_name": "daatrujillopu/Sfotipy",
"path": "userprofiles/forms.py",
"copies": "1",
"size": "1125",
"license": "mit",
"hash": -8312996375755603000,
"line_mean": 31.1714285714,
"line_max": 78,
"alpha_frac": 0.6764444444,
"autogenerated": false,
"ratio": 4.1208791208791204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00225714779944152,
"num_lines": 35
} |
__author__ = 'danoday'
import string
from nltk.corpus import stopwords
from nltk.tokenize import WordPunctTokenizer
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
"""
# this extracted unigrams and bigrams but created too large of a sample file with bigrams
def extract_words_as_features(text):
# extract words
text = text.replace('\n', ' ').replace('\r', '')
text = text.translate(string.maketrans("",""), string.punctuation) # eliminates punctuation
tokenizer = WordPunctTokenizer()
tokens = tokenizer.tokenize(text)
# extract bigrams
bigram_finder = BigramCollocationFinder.from_words(tokens)
bigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 500)
for bigram_tuple in bigrams:
x = "%s %s" % bigram_tuple
tokens.append(x)
unigram_and_bigram_set = set([x.lower() for x in tokens if x not in stopwords.words('english') and len(x) > 2])
return list(unigram_and_bigram_set)
"""
def extract_words_as_features(text):
# extract ONLY unigrams
text = text.replace('\n', ' ').replace('\r', '')
text = text.translate(string.maketrans("",""), string.punctuation) # eliminates punctuation
tokenizer = WordPunctTokenizer()
tokens = tokenizer.tokenize(text)
unigrams = set([x.lower() for x in tokens if x not in stopwords.words('english') and len(x) > 3])
return list(unigrams) | {
"repo_name": "danzek/email-formality-detection",
"path": "features/bagofwords.py",
"copies": "1",
"size": "1422",
"license": "mit",
"hash": -4837631634303933000,
"line_mean": 34.575,
"line_max": 115,
"alpha_frac": 0.7004219409,
"autogenerated": false,
"ratio": 3.4347826086956523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46352045495956523,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dante'
import sys
import os
from optparse import OptionParser
import numpy
import openbabel as ob
from pybel import Outputfile
from pybel import readfile
def get_Molfiles(molFileLocation, startCompFile):
'''Grab all molfiles from a folder and separate them from the cofactors.
Arguments: molFileLocation: folder in question
startCompFile: file listing start compounds.
Returns: startComp: start compounds, sans cofactors
genMols: generated molfiles
'''
compList = os.listdir(molFileLocation)
intList = [int(x.strip('.mol')) for x in compList].sort()
allMolList = [os.path.join(molFileLocation, x + '.mol') for x in intList]
#Read start compounds file for cofactors and non-cofactors.
f = open(startCompFile, 'r')
startComp = []
cof = []
for x in f:
if 'Cofactor' in x:
cof.append(x.strip('\n'))
elif '.mol' in x:
startComp.append(x.strip('\n'))
else:
pass
f.close()
m = len(allMolList)
n = len(cof)
#The `new` molfiles are the ones not in the start compounds file, so we segregate them here.
if m > 0:
genMols = allMolList[m:n]
elif m == 0:
genMols = allMolList[n:]
else:
sys.exit('Invalid input compounds file.')
return genMols, startComp
def make_LibrariesFp(genMols, startComp, target, outName, fpForm):
'''Messy way to make an sdf and subsequent dictionary of TC values.
Don't worry about most of the arguments, they end up defaulted.
Arguments: genMols: list of generated molfiles
startComp: list of starting molfiles
target: molfile of target compound
outname: name of library
fpForm: fignerprint type
Returns: baseTc: scalar TC value that is the basis for elimination
molTcs: Tc value fo generated compound with the target.
'''
genMolLibrary = Outputfile("sdf", "%s_MolLibrary.sdf" % outName, True)
startLibrary = Outputfile("sdf", "%s_StartLibrary.sdf" % outName, True)
#Compress into sdf file.
for x in genMols:
for y in readfile('mol', x):
genMolLibrary.write(y)
genMolLibrary.close()
startCompFiles = [os.path.join('Molfiles', x.rstrip('\n')) for x in startComp]
for x in startCompFiles:
for y in readfile('mol', x):
startLibrary.write(y)
startLibrary.close()
targetFile = readfile('mol', target).next()
#Suppress error messages for neatness.
error_log_check = hasattr(ob, "obErrorLog")
if error_log_check:
ob.obErrorLog.ClearLog()
ob.obErrorLog.SetOutputLevel(-1)
#Generate fingerprints and calculate TCs...then make them into a dictionary.
molFps = [x.calcfp(fpForm) for x in readfile("sdf", "%s_MolLibrary.sdf" % outName)]
startFps = [x.calcfp(fpForm) for x in readfile("sdf", "%s_StartLibrary.sdf" % outName)]
targetFp = targetFile.calcfp(fpForm)
startTcs = [x | targetFp for x in startFps]
baseTc = numpy.average(startTcs)
molTcs = {genMols[i]: targetFp | x for i, x in enumerate(molFps)}
return baseTc, molTcs
def remove_LowTc(tcBasis, tcDict, t):
'''Elimination function.
Arguments: tcBasis: base TC
tcDict: Dictionary of TCs
t: tolerance
'''
i = 0
for k, v in tcDict.iteritems():
if v <= t * tcBasis:
os.remove(k)
i += 1
def sim_index_main(location, startCompFile, target, outName, fpForm, t):
'''Main script. For easy wrapper calling.
Arguments: See above functions.
'''
genMols, startComp = get_Molfiles(location, startCompFile)
basis, molTcs = make_LibrariesFp(genMols, startComp, target, outName, fpForm)
remove_LowTc(basis, molTcs,t)
if __name__ == '__main__':
usage = "Usage: %prog [options] location/ startcompfile.dat targetmoleculepath.mol"
parser = OptionParser(usage=usage)
parser.add_option('-f', '--fingerprint', dest='finger', default='FP4', help='Fingerprint format, choose FP2, FP3, or FP4')
parser.add_option('-n', '--nameoutput', dest='name', default='Net_Gen', help='Name of output sdf libraries')
parser.add_option('-t', '--tolerance', dest='tol', default=1, help='Tolerance for compound TC removal')
(options, args) = parser.parse_args()
if options.finger not in ['FP4', 'FP2', 'fp2', 'fp4', 'FP3', 'fp3']:
sys.exit('Fingerprint type is invalid\nValid types are FP2, FP3, or FP4')
if options.tol < 0:
sys.exit('Tolerance is negative\nChoose a value greater than 0')
if options.tol > 1:
print 'Warning! Tolerance given could result in a loss of a generation.'
sim_index_main(options.loc, args[0], args[1], options.name, options.finger, float(options.tol))
| {
"repo_name": "tyo-lab-nu/SimScripts",
"path": "SimIndex.py",
"copies": "1",
"size": "4635",
"license": "mit",
"hash": -7708693258381587000,
"line_mean": 27.1509433962,
"line_max": 123,
"alpha_frac": 0.6714131607,
"autogenerated": false,
"ratio": 3.0058365758754864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4177249736575486,
"avg_score": null,
"num_lines": null
} |
import xml.etree.ElementTree as ET
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# A class for parsing PDML files from Wireshark
class PDMLParse:
def __init__(self,filename):
self.filename=filename
def get_root(self):
tree=ET.parse(self.filename)
return tree.getroot()
# @param root element for parsing packets from a wireshark PDML file
# @return an array of packet dictionaries with
# ['type'] = 'control' for USB control packet or 'bulk' for bulk transfer
# ['direction'] = 'in' or 'out' depending on USB direction WRT host
# ['data'] = hex data in ASCII format
def get_packets(self):
root=self.get_root()
packets=[]
for packet in root.iter('packet'):
usb_pkt = {}
for child in packet:
if child.attrib['name'] == 'usb':
proto=child
for child in proto:
if child.attrib['name'] == 'usb.transfer_type':
if child.attrib['value'] == '02':
usb_pkt['type']='control'
else:
usb_pkt['type']='bulk'
# Newer versions of wireshark use endpoint_addresss instead of endpoint_number
if child.attrib['name'] == 'usb.endpoint_number' or child.attrib['name'] == 'usb.endpoint_address':
if child.attrib['showname'].find('Direction: IN') is not -1:
usb_pkt['direction'] = 'in'
else:
usb_pkt['direction']='out'
elif child.attrib['name'] == 'fake-field-wrapper' and 'type' in usb_pkt :
proto=child
for child in proto:
if child.attrib['name'] == 'usb.capdata':
usb_pkt['data'] = child.attrib['value']
if usb_pkt is not None:
packets.append(usb_pkt)
return packets
# @return a reordered version of @param sequece with @param move_transfer after
# @param after_transfer
# I originally thought the bulk transfer sequences might be out of order in the pcap trace
# on further investigation I found they were note
# Here's an example of how you can use this function to test reordering
# self.reorder_transfer(get_bulk_packet_sequence(self),{'type': 'bulk', 'direction': 'in',
# 'data': '4f4b41593a20657870656374696e6720646f776e6c6f616400'},
# {'type': 'bulk', 'direction': 'out', 'data': '00000000'})
def reorder_transfer(self,sequence,move_transfer,after_transfer):
newsequence=sequence
try:
move_index=newsequence.index(move_transfer)
beforesequence=newsequence[0:move_index]
aftersequence=newsequence[move_index+1:]
after_index=aftersequence.index(after_transfer)
aftersequence.insert(after_index+1,move_transfer)
newsequence=beforesequence+aftersequence
except ValueError:
logger.warn("Did not find move transfer " + str(move_transfer) + " and after transfer " + str(after_transfer) + " in trace")
return newsequence
# @return an array of packet dictionaries with
# ['type'] = 'enumeration' for (re)enumeration sequences or 'bulk' for bulk transfer
# ['direction'] = 'in' or 'out' depending on USB direction WRT host
# ['data'] = hex data in ASCII format
def get_bulk_packet_sequence(self):
packets=self.get_packets()
bulksequence=[]
in_enumeration=False
for packet in packets:
if packet['type'] is 'bulk':
bulksequence.append(packet)
in_enumeration=False
elif packet['type'] is 'control':
if in_enumeration is not True:
bulksequence.append({'type':'enumeration'})
in_enumeration=True
return bulksequence
def dump_usb_data(self,packets):
for packet in packets:
print(packet)
if __name__ == '__main__':
if len(sys.argv) != 2:
print("usage: pdml_parse <path_to_pdml_file>")
else:
parser=PDMLParse(sys.argv[1])
print("USB Data:")
parser.dump_usb_data(parser.get_packets())
print("Bulk packet sequence:")
bulk_sequence=parser.get_bulk_packet_sequence()
parser.dump_usb_data(bulk_sequence)
| {
"repo_name": "Trellis-Logic/pyusb_pcap_replay",
"path": "scripts/pdml_parse.py",
"copies": "1",
"size": "4876",
"license": "bsd-2-clause",
"hash": 5690656827328366000,
"line_mean": 43.1481481481,
"line_max": 136,
"alpha_frac": 0.5549630845,
"autogenerated": false,
"ratio": 4.273444347063979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5328407431563978,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
def memoize(func):
"""
the function must not modify or rely on external state
the function should be stateless.
usage: @memoize as function annotation
:param func: the function, whose result you would like to cached based on input arguments
"""
cache = {}
def ret(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return ret
def memoize_force(func):
"""
Similar to memoize, but force the hash by using its string value
But caching performance may be a issue
:param func: the function, whose result you would like to cached based on input arguments
"""
cache = {}
def ret(*args):
k = str(args)
if k not in cache:
cache[k] = func(*args)
return cache[k]
return ret
def memoize_iterable(func):
"""
Similar to memoize, but force the hash by using its tuple value
The arguments for the function must be iterable
"""
cache = {}
def ret(*args):
k = tuple(args)
if k not in cache:
cache[k] = func(*args)
return cache[k]
return ret | {
"repo_name": "idf/FaceReader",
"path": "util/commons_util/decorators/algorithms.py",
"copies": "2",
"size": "1173",
"license": "mit",
"hash": -2477212203891147300,
"line_mean": 25.0888888889,
"line_max": 93,
"alpha_frac": 0.6035805627,
"autogenerated": false,
"ratio": 4.144876325088339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5748456887788339,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
from cross_validation import CrossValidator
import os
import re
class TotalVerifier(CrossValidator):
def verify(self):
RAW_FOLDER = "data"
INPUT_FOLDER = "auto"
OUTPUT_FOLDER = "auto-tagged-model-all"
os.system("java -cp ../lib/ner/stanford-ner.jar edu.stanford.nlp.ie.crf.CRFClassifier -prop all.prop")
for non_auto_file in self.file_names:
auto_file = re.sub(r'%s'%(RAW_FOLDER), INPUT_FOLDER, non_auto_file)
auto_file = re.sub(r'\.tsv', '.txt', auto_file)
auto_file_tagged = re.sub(r'%s'%INPUT_FOLDER, OUTPUT_FOLDER, auto_file)
auto_file_tagged = re.sub(r'\.txt', '-tagged.xml', auto_file_tagged)
os.system("java -mx500m -cp ../lib/ner/stanford-ner.jar edu.stanford.nlp.ie.crf.CRFClassifier\
-loadClassifier ner-model-all.ser.gz -textFile %s -outputFormat inlineXML > %s"%(auto_file, auto_file_tagged))
def do(self):
self.verify()
if __name__=="__main__":
TotalVerifier().do()
| {
"repo_name": "idf/RecipeIngredients",
"path": "ner/recipe/all_verfication.py",
"copies": "1",
"size": "1042",
"license": "apache-2.0",
"hash": -4156152102768195600,
"line_mean": 36.2142857143,
"line_max": 123,
"alpha_frac": 0.6218809981,
"autogenerated": false,
"ratio": 3.1011904761904763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9010541819735388,
"avg_score": 0.04250593091101762,
"num_lines": 28
} |
__author__ = 'Danyang'
import logging
import sys
class Solution(object):
@property
def logger(self):
lgr = logging.getLogger(__name__)
lgr.setLevel(logging.CRITICAL)
if not lgr.handlers:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
lgr.addHandler(ch)
return lgr
def solve(self, cipher):
"""
bfs
attention to the usage of unvisited array
:param cipher: the cipher
"""
dirs = ((1, 0), (-1, 0), (0, 1), (0, -1))
M, N, matrix, K = cipher
start = None
end = None
for i in xrange(M):
for j in xrange(N):
if matrix[i][j] == "M":
start = (i, j)
elif matrix[i][j] == "*":
end = (i, j)
pi = [[None for _ in xrange(N)] for _ in xrange(M)]
visited = [[False for _ in xrange(N)] for _ in xrange(M)]
visited[start[0]][start[1]] = True
q = [start]
ended = False
while q and not ended:
l = len(q)
for i in xrange(l):
cur = q[i]
for dir in dirs:
r = cur[0] + dir[0]
c = cur[1] + dir[1]
if 0 <= r < M and 0 <= c < N and not visited[r][c]:
visited[r][c] = True
if matrix[r][c] in (".", "*"):
pi[r][c] = cur
q.append((r, c))
if matrix[r][c] == "*":
ended = True
q = q[l:]
if not ended:
return "not found"
path = [end]
cur = end
while cur != start:
cur = pi[cur[0]][cur[1]]
path.append(cur)
path.reverse()
self.logger.debug(str(path))
cnt = 0
visited = [[False for _ in xrange(N)] for _ in xrange(M)]
for cur in path[:-1]:
dir_cnt = 0
visited[cur[0]][cur[1]] = True
for dir in dirs:
r = cur[0] + dir[0]
c = cur[1] + dir[1]
if 0 <= r < M and 0 <= c < N:
if matrix[r][c] in (".", "*") and not visited[r][c]:
dir_cnt += 1
if dir_cnt > 1:
cnt += 1
self.logger.debug("Wand@" + str(cur))
if cnt > K:
return "Oops!"
self.logger.debug("cnt: %d, K: %d" % (cnt, K))
if cnt == K: # exactly K times
return "Impressed"
else:
return "Oops!"
if __name__ == "__main__":
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
M, N = map(int, f.readline().strip().split(' '))
matrix = []
for _ in xrange(M):
matrix.append(list(f.readline().strip()))
K = int(f.readline().strip())
cipher = M, N, matrix, K
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
| {
"repo_name": "algorhythms/HackerRankAlgorithms",
"path": "Count Luck.py",
"copies": "1",
"size": "3238",
"license": "apache-2.0",
"hash": 7425214293827431000,
"line_mean": 28.4363636364,
"line_max": 77,
"alpha_frac": 0.4107473749,
"autogenerated": false,
"ratio": 3.778296382730455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9687067473045474,
"avg_score": 0.0003952569169960474,
"num_lines": 110
} |
__author__ = 'Danyang'
class Solution:
def addBinary_builtin(self, a, b):
"""
Built-in function
:param a: string
:param b: string
:return: string
"""
a = int(a, 2)
b = int(b, 2)
return bin(a+b)[2:]
def addBinary(self, a, b):
"""
Built-in function
:param a: string
:param b: string
:return: string
"""
if len(a)>len(b):
a, b = b, a
a, b = list(a), list(b)
# from LSB to MSB
a.reverse()
b.reverse()
# b as the base number
for i in xrange(len(a)):
if a[i]=="0": # 0
continue
elif b[i]=="0": # 0+1
b[i] = "1"
continue
else: # 1+1
b[i] = "0"
# carry forward
if i==len(b)-1:
b.append("1")
else:
for j in range(i+1, len(b)):
if b[j]=="0":
b[j] = "1"
break
else:
b[j] = "0" # carry forward
if j==len(b)-1:
b.append("1")
break
b.reverse()
return "".join(b) # reversed back
if __name__=="__main__":
print Solution().addBinary("11", "1")
| {
"repo_name": "ee08b397/LeetCode-4",
"path": "067 Add Binary.py",
"copies": "3",
"size": "1525",
"license": "mit",
"hash": 7767312968250361000,
"line_mean": 24.2931034483,
"line_max": 55,
"alpha_frac": 0.3226229508,
"autogenerated": false,
"ratio": 4.066666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5889289617466666,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
class Solution:
def evalRPN(self, tokens):
"""
stack
basic in bytecode operation
basic in compiler technique
:param tokens:
:return:
"""
ops = ["+", "-", "*", "/"]
def arith(a, b, op):
if (op=="+"):
return a + b
if (op=="-"):
return a - b
if (op=="/"):
# return a/b # python treat differently for division 6/-132 is -1
return int(float(a) / b) # round towards 0
if (op=="*"):
return a * b
# function is first-order class
# not supported by leetcode
# import operator
# ops = {
# "+": operator.add,
# "-": operator.sub,
# "*": operator.mul,
# "/": operator.div,
# }
#
# def arith(a, b, op):
# return ops[op](a, b)
# stack
stack = []
for token in tokens:
if token not in ops:
stack.append(int(token))
else:
arg2 = stack.pop()
arg1 = stack.pop()
result = arith(arg1, arg2, token)
stack.append(result)
return stack.pop()
if __name__=="__main__":
Solution().evalRPN(["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]) | {
"repo_name": "ee08b397/LeetCode-4",
"path": "150 Evaluate Reverse Polish Notation.py",
"copies": "2",
"size": "1467",
"license": "mit",
"hash": -5044113127129926000,
"line_mean": 27.38,
"line_max": 93,
"alpha_frac": 0.3810497614,
"autogenerated": false,
"ratio": 4.086350974930362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467400736330361,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
class Solution:
def solve(self, cipher, lst):
"""
dp
"""
N, K= cipher
N = int(N)
K = int(K)
N = len(lst)
dp = [[1<<32 for _ in xrange(N+1)] for _ in xrange(N+1)]
for i in xrange(N):
dp[i][i] = 1
for j in xrange(i+1, N):
dp[j][i] = 0
dp[i][i+1] = 2
for w in xrange(2, N+1): # breadth
for i in xrange(0, N-w):
j = i+w
if lst[j]-lst[i]==2*K:
for p in xrange(i+1, j):
if 2*lst[p]==lst[i]+lst[j] and dp[i+1][p-1]==0 and dp[p+1][j-1]==0:
dp[i][j] = 0
if dp[i][j]!=0:
dp[i][j] = min(dp[i][j], min(dp[i][p]+dp[p+1][j] for p in xrange(i, j)))
return dp[0][N-1]
if __name__=="__main__":
f = open("2.in", "r")
o = open("out.out", "w")
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip().split(' ')
lst = [int(item) for item in f.readline().strip().split(' ')]
# solve
s = "Case #%d: %s\n"%(t+1, Solution().solve(cipher, lst))
print s,
o.write(s)
| {
"repo_name": "algorhythms/GoogleApacCodeJamRoundB",
"path": "C/main2.py",
"copies": "2",
"size": "1350",
"license": "mit",
"hash": -8708761391686430000,
"line_mean": 25.5510204082,
"line_max": 92,
"alpha_frac": 0.382962963,
"autogenerated": false,
"ratio": 3.0821917808219177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44651547438219175,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
class Solution:
smallest = 1<<32
def dfs(self, seq, K):
if self.smallest==0:
return
length = len(seq)
self.smallest = min(self.smallest, length)
if length<3:
return
for i in xrange(length-2):
if seq[i+2]-seq[i+1]==K and seq[i+1]-seq[i]==K:
self.dfs(seq[:i]+seq[i+3:], K)
def solve(self, cipher, lst):
"""
K = 0, greedy
"""
N, K= cipher
N = int(N)
K = int(K)
while True:
flag = False
for i in xrange(len(lst)-2):
if lst[i+2]-lst[i+1]==K and lst[i+1]-lst[i]==K:
lst = lst[:i]+lst[i+3:]
flag = True
break
if flag:
continue
else:
break
return len(lst)
if __name__=="__main__":
f = open("1.in", "r")
o = open("out.out", "w")
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip().split(' ')
lst = [int(item) for item in f.readline().strip().split(' ')]
# solve
s = "Case #%d: %s\n"%(t+1, Solution().solve(cipher, lst))
print s,
o.write(s)
| {
"repo_name": "ee08b397/GoogleApacCodeJamRoundB",
"path": "C/main.py",
"copies": "2",
"size": "1389",
"license": "mit",
"hash": 6678039202124685000,
"line_mean": 21.15,
"line_max": 69,
"alpha_frac": 0.4103671706,
"autogenerated": false,
"ratio": 3.5891472868217056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49995144574217054,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# ascending
def insertionSortList_TLE(self, head):
"""
Time Limit Exceded
"""
comparator = lambda x, y: cmp(x.val, y.val)
# open set & closed set
dummy_head = ListNode(0)
dummy_head.next = head
closed_tail = dummy_head.next
while(closed_tail and closed_tail.next):
open_head = closed_tail.next
# open_head_next = closed_tail.next.next
# find position
ptr_before = dummy_head
ptr = dummy_head.next # error using ptr = head
# WHILE OUTSIDE IF THUS INCREASING TIME COMPLEXITY
while(ptr_before):
if comparator(ptr, open_head)>0:
ptr_before.next = open_head
closed_tail.next = open_head.next
open_head.next = ptr
# closed_tail.next = open_head_next
break
if ptr==open_head:
closed_tail = closed_tail.next
break
ptr_before = ptr_before.next
ptr = ptr.next
return dummy_head.next
def insertionSortList(self, head):
"""
O(n^2), but with better performance since while loop inside a if
[ [closed_lst], [open_lst] ], insert the first item of open_lst into the closed_lst:
1. compare the first item the last item of the closed_lst
2. if in order, continue to next
3. if not, find the insertion point and insert
:param head: ListNode
:return: ListNode
"""
comparator = lambda x, y: cmp(x.val, y.val)
# open set & closed set
# iterate through all the nodes
dummy = ListNode(0) # Singly-linked list, thus need dummy_head
dummy.next = head
closed_tail = head
while (closed_tail and closed_tail.next):
open_head = closed_tail.next
open_head_next = closed_tail.next.next
if not comparator(closed_tail, open_head)<=0: # Step 1: only compare the closed set tail and open set head
pre = dummy
while comparator(pre.next, open_head)<0: # Step 2: find position
pre = pre.next
# swap nodes
open_head.next = pre.next
pre.next = open_head
closed_tail.next = open_head_next
else:
closed_tail = closed_tail.next
return dummy.next
if __name__=="__main__":
import random
lst = [ListNode(i) for i in random.sample(xrange(-1000, 1000), 1000)]
# lst = [ListNode(1), ListNode(3), ListNode(2)]
# lst = [ListNode(i) for i in range(10, -1, -1)]
for i in range(len(lst)):
try:
lst[i].next = lst[i+1]
except IndexError: # last
lst[i].next = None
head = Solution().insertionSortList(lst[0])
current = head
for i in range(len(lst)):
print current.val
current = current.next
| {
"repo_name": "ee08b397/LeetCode-4",
"path": "147 Insertion Sort List.py",
"copies": "3",
"size": "3323",
"license": "mit",
"hash": 7524987850643847000,
"line_mean": 28.2090909091,
"line_max": 119,
"alpha_frac": 0.5167017755,
"autogenerated": false,
"ratio": 4.102469135802469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.611917091130247,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
def sum_sum(n):
return (n**3+3*n**2+2*n)/6
class Solution:
def solve(self, cipher):
"""
Large Problem Set Not solved
"""
B, L, N= cipher
B = int(B)
L = int(L)
N = int(N)
lowest_level = 0
# 1 is 250, 3 is 750 ml
n = 1
while True:
temp = sum_sum(n)
temp2 = 3*B
if sum_sum(n)<3*B<=sum_sum(n+1):
lowest_level = n+1
break
n += 1
if L<lowest_level:
return 250
if L>lowest_level:
return 0
else:
remain = 3*B - sum_sum(lowest_level-1)
l_1 = 3
l_2 = (lowest_level-2)*3
l_3 = (lowest_level*(lowest_level+1)/2)-l_1-l_2
eff = float(remain)/(l_1+2*l_2+3*l_3)
eff *= 250.0
cor3 = sum(range(1, lowest_level+1))
cor2 = cor3 - lowest_level+1
if N in (1, cor2, cor3):
return eff
border = []
border += [sum(range(1, i+1)) for i in range(2, lowest_level)]
border += [sum(range(1, i+1))-i-1 for i in range(2, lowest_level)]
border += range(cor2+1, cor3)
if N in border:
return 2*eff
return 3*eff
if __name__=="__main__":
f = open("0.in", "r")
o = open("out.out", "w")
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip().split(' ')
# solve
s = "Case #%d: %.6f\n"%(t+1, Solution().solve(cipher))
print s,
o.write(s)
| {
"repo_name": "algorhythms/GoogleApacCodeJamRoundB",
"path": "B/main.py",
"copies": "2",
"size": "1773",
"license": "mit",
"hash": -2764430957644004000,
"line_mean": 21.9594594595,
"line_max": 78,
"alpha_frac": 0.4139875917,
"autogenerated": false,
"ratio": 3.3389830508474576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4752970642547458,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
import logging
import sys
class Solution(object):
@property
def logger(self):
lgr = logging.getLogger(__name__)
lgr.setLevel(logging.CRITICAL)
if not lgr.handlers:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
lgr.addHandler(ch)
return lgr
def solve(self, cipher):
"""
bfs
attention to the usage of unvisited array
:param cipher: the cipher
"""
dirs = ((1, 0), (-1, 0), (0, 1), (0, -1))
M, N, matrix, K = cipher
start = None
end = None
for i in xrange(M):
for j in xrange(N):
if matrix[i][j]=="M":
start = (i, j)
elif matrix[i][j]=="*":
end = (i, j)
pi = [[None for _ in xrange(N)] for _ in xrange(M)]
visited = [[False for _ in xrange(N)] for _ in xrange(M)]
visited[start[0]][start[1]] = True
q = [start]
ended = False
while q and not ended:
l = len(q)
for i in xrange(l):
cur = q[i]
for dir in dirs:
r = cur[0]+dir[0]
c = cur[1]+dir[1]
if 0<=r<M and 0<=c<N and not visited[r][c]:
visited[r][c] = True
if matrix[r][c] in (".", "*"):
pi[r][c] = cur
q.append((r, c))
if matrix[r][c]=="*":
ended = True
q = q[l:]
if not ended:
return "not found"
path = [end]
cur = end
while cur!=start:
cur = pi[cur[0]][cur[1]]
path.append(cur)
path.reverse()
self.logger.debug(str(path))
cnt = 0
visited = [[False for _ in xrange(N)] for _ in xrange(M)]
for cur in path[:-1]:
dir_cnt = 0
visited[cur[0]][cur[1]] = True
for dir in dirs:
r = cur[0]+dir[0]
c = cur[1]+dir[1]
if 0<=r<M and 0<=c<N:
if matrix[r][c] in (".", "*") and not visited[r][c]:
dir_cnt += 1
if dir_cnt>1:
cnt += 1
self.logger.debug("Wand@"+str(cur))
if cnt>K:
return "Oops!"
self.logger.debug("cnt: %d, K: %d"%(cnt, K))
if cnt==K: # exactly K times
return "Impressed"
else:
return "Oops!"
if __name__=="__main__":
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
M, N = map(int, f.readline().strip().split(' '))
matrix = []
for _ in xrange(M):
matrix.append(list(f.readline().strip()))
K = int(f.readline().strip())
cipher = M, N, matrix, K
# solve
s = "%s\n"%(Solution().solve(cipher))
print s, | {
"repo_name": "ee08b397/HackerRankAlgorithms",
"path": "Count Luck.py",
"copies": "1",
"size": "3300",
"license": "apache-2.0",
"hash": -5842274116500028000,
"line_mean": 28.0181818182,
"line_max": 77,
"alpha_frac": 0.403030303,
"autogenerated": false,
"ratio": 3.891509433962264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794539736962264,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Danyang'
class Solution(object):
def evalRPN(self, tokens):
"""
stack
basic in bytecode operation
basic in compiler technique
:param tokens:
:return:
"""
ops = ["+", "-", "*", "/"]
def arith(a, b, op):
if (op == "+"):
return a+b
if (op == "-"):
return a-b
if (op == "/"):
# return a/b # python treat differently for division 6/-132 is -1
return int(float(a)/b) # round towards 0
if (op == "*"):
return a*b
# function is first-order class
# not supported by leetcode
# import operator
# ops = {
# "+": operator.add,
# "-": operator.sub,
# "*": operator.mul,
# "/": operator.div,
# }
#
# def arith(a, b, op):
# return ops[op](a, b)
# stack
stack = []
for token in tokens:
if token not in ops:
stack.append(int(token))
else:
arg2 = stack.pop()
arg1 = stack.pop()
result = arith(arg1, arg2, token)
stack.append(result)
return stack.pop()
if __name__ == "__main__":
assert Solution().evalRPN(["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]) == 22 | {
"repo_name": "algorhythms/LeetCode",
"path": "150 Evaluate Reverse Polish Notation.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": -447761745875300800,
"line_mean": 26.2075471698,
"line_max": 106,
"alpha_frac": 0.3837910248,
"autogenerated": false,
"ratio": 4.112947658402204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993822068362639,
"avg_score": 0.0005833229679129477,
"num_lines": 53
} |
__author__ = 'Danyang'
class Solution:
def generateParenthesisDfs(self, result, cur, left, right):
"""
DFS
Catalan Number
:param result: result list
:param cur: currently processing string
:param left: number of left parenthesis remaining
:param right: number of right parenthesis remaining
"""
# trivial
if left==0 and right==0:
result.append(cur)
return
# add left parenthesis
if left>0:
self.generateParenthesisDfs(result, cur+"(", left-1, right)
# add right parenthesis
if right>left:
self.generateParenthesisDfs(result, cur+")", left, right-1)
def catalan(self, n):
"""
number of unique binary search tree
Catalan Number
C_n = {2n\choose n} - {2n\choose n+1}
Proof: http://en.wikipedia.org/wiki/Catalan_number#Second_proof
:param n: integer
:return: integer
"""
return self.factorial(2*n)/(self.factorial(n)*self.factorial(n))-self.factorial(2*n)/(
self.factorial(n+1)*self.factorial(n-1))
def factorial(self, n):
factorial = 1
for i in range(n):
factorial *= i+1
return factorial
def solve_small(self, cipher):
"""
"""
n, k= cipher
n = int(n)
k = int(k)
result = []
self.generateParenthesisDfs(result, "", n, n)
try:
return result[k-1]
except IndexError:
return "Doesn't Exist!"
def solve(self, cipher):
"""
"""
n, k = cipher
n = int(n)
k = int(k)
if k>self.catalan(n):
return "Doesn't Exist!"
if __name__=="__main__":
f = open("1.in", "r")
o = open("out.out", "w")
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = f.readline().strip().split(' ')
# solve
s = "Case #%d: %s\n"%(t+1, Solution().solve_small(cipher))
print s,
o.write(s)
| {
"repo_name": "algorhythms/GoogleApacCodeJamRoundB",
"path": "D/main.py",
"copies": "2",
"size": "2234",
"license": "mit",
"hash": -847702129362540400,
"line_mean": 21.7659574468,
"line_max": 94,
"alpha_frac": 0.4923903312,
"autogenerated": false,
"ratio": 3.885217391304348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004174568793812843,
"num_lines": 94
} |
# Loads one of the Zemax sample files "Cooke Triplet".
# Performs an exercise from the Short Course, optimising the lens.
from __future__ import print_function
from zemaxclient import Connection
from libzmx import *
import surface
# Establish a connection to the running Zemax application
z = Connection()
# Load a lens file into the Zemax server memory.
# It won't yet appear in the zemax window. For that we need to do z.PushLens()
z.LoadFile("C:\\Program Files\\ZEMAX\\Samples\\Short course\\sc_cooke1.zmx")
# Get a SurfaceSequence instance. This behaves like a list of surfaces
# (the same sequence as viewed in the Zemax lens data editor).
model = SurfaceSequence(z)
# Get a SystemConfig object. This allows us to get/set certain system
# parameters (eg. the stop surface, ray aiming, etc.)
systemconfig = SystemConfig(z)
# Show the number of surfaces in the lens
print("Number of surfaces in model : %d " % len(model))
# Display some information about each surface
print("Surface number, radius, thickness....")
for surf in model:
curvature = surf.curvature.value
if curvature:
radius = str(1.0/curvature)
else:
radius = "Infinity"
print((surf.get_surf_num(), radius, surf.thickness))
# Add some comments. These will appear in the editor.
model[1].comment = "Front surface"
model[-2].comment = "Back surface"
print("Setting variables...")
surfaces_to_optimise = range(1, 7)
for i in surfaces_to_optimise:
surf = model[i]
surf.curvature.vary()
surf.thickness.vary()
# Insert an f/# solve on the curvature of surface #6
z.SetSolve(
6, # surface number
0, # solve code for curvature
11, # solve type code for f/#
3.5 # desired f/#
)
# Let's add an extra constraint. We'll make the curvatures on the
# faces of the central element equal. We can insert a pickup solve
# like this:
central_front_face = model[3]
central_rear_face = model[4]
central_rear_face.curvature = -central_front_face.curvature.linked()
# Load a merit function from another zemax file
z.LoadMerit("C:\\Program Files\\ZEMAX\\Samples\\Short course\\sc_cooke2.zmx")
# Insert a flat, glass window in front of the lens
model.insert_new(1, surface.Standard, "Window", thickness=1.0, glass="BK7")
model.insert_new(2, surface.Standard, thickness=10.0)
print("Optimising ....")
print("Initial merit func = %g" % z.Optimize(-1))
print("Final merit func = %g" % z.Optimize())
# Push the lens from the Zemax server into the display.
# The option "allow extensions to push lenses" should be enabled in
# Zemax preferences.
z.GetUpdate()
z.PushLens()
| {
"repo_name": "dariussullivan/libzmx",
"path": "examples/cooke_triplet.py",
"copies": "1",
"size": "2760",
"license": "bsd-3-clause",
"hash": -4454346693875920400,
"line_mean": 33.0740740741,
"line_max": 78,
"alpha_frac": 0.7173913043,
"autogenerated": false,
"ratio": 3.1399317406143346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9357323044914334,
"avg_score": 0,
"num_lines": 81
} |
# These unit tests require the Zemax application to be running.
# Run the tests with the command:
# $ python -m libzmx.tests
from __future__ import print_function
import zemaxclient
from zemaxclient import Connection, SurfaceLabelError
from libzmx import (SurfaceSequence, return_to_coordinate_frame,
SystemConfig, make_singlet, NamedElements)
import libzmx
import surface
import unittest
import numpy
import os
import time
import tempfile
from itertools import count
# TODO :
# - test for chief ray solves on coordinate break parameters
class ConnectionTestCase(unittest.TestCase):
def runTest(self):
z = Connection()
self.assertTrue(z.GetVersion(), "Can't connect")
class CopyLensTestCase(unittest.TestCase):
def runTest(self):
z = Connection()
response = z.NewLens()
self.assertFalse(response, "Can't create new lens")
# find number of surfaces
response = z.GetSystem()
numsurfs1 = response[0]
response = z.InsertSurface(2)
self.assertFalse(response, "Can't insert a surface")
# check number of surfaces increased
response = z.GetSystem()
numsurfs2 = response[0]
self.assertEqual(numsurfs2, numsurfs1 + 1,
"Number of surfaces didn't increase")
# copy lens to editor
response = z.PushLens()
self.assertFalse(response, "Can't push lens to editor window")
response = z.NewLens()
self.assertFalse(response, "Can't create new lens")
response = z.GetSystem()
self.assertNotEqual(response[0], numsurfs2,
"New lens has same number of surfaces as old "
"(modified) one")
# copy modified lens back into server memory
response = z.GetRefresh()
# check it's our modified lens
response = z.GetSystem()
self.assertEqual(response[0], numsurfs2, "Didn't get our lens back")
class ClientSurfaceLabels(unittest.TestCase):
def setUp(self):
self.z = Connection()
self.n = self.z.GetSystem()[0]+1
self.z.NewLens()
for i in range(self.n):
self.z.SetLabel(i, 0)
def setlabel(self, surf, label):
self.assertNotEqual(self.z.GetLabel(surf), label)
self.z.SetLabel(surf, label)
self.assertEqual(self.z.GetLabel(surf), label)
self.assertEqual(self.z.FindLabel(label), surf, str(label))
def testAll(self):
for exp in [2, 16, 31]:
i = 2**exp-1
self.setlabel(1, i)
self.setlabel(1, -i)
def testSurfaceSequence(self):
self.setlabel(1, SurfaceSequence.max_surf_id)
class ClientInsertSurface(unittest.TestCase):
"""Check that we understand the semantics of the InsertSurface method
Insert 0 => No effect
Insert 1 => Adds surface after object
Insert n-1 => Adds surface before image
Insert i (where i>=n) => As Insert n-1
"""
def setUp(self):
self.z = Connection()
self.z.NewLens()
# self.model = SurfaceSequence(self.z)
self.n = self.get_len()
self.original_label = 1
self.new_label = 2
for i in range(self.n):
self.z.SetLabel(i, self.original_label)
def get_len(self):
response = self.z.GetSystem()
return response[0]+1
def mark_new_surfaces(self):
for i in range(self.n):
if not self.z.GetLabel(i) == self.original_label:
self.z.SetLabel(i, self.new_label)
def testAllLabels(self):
z = self.z
n = self.n
for i in range(n):
self.assertEqual(z.GetLabel(i), self.original_label)
self.mark_new_surfaces()
for i in range(n):
self.assertEqual(z.GetLabel(i), self.original_label)
def testInsertAtZero(self):
z = self.z
n = self.n
# Insert at zero does nothing
z.InsertSurface(0)
self.assertEqual(n, self.get_len())
def testInsertAtOne(self):
z = self.z
n = self.n
# Insert at 1 adds after object surface
z.InsertSurface(1)
self.assertEqual(n+1, self.get_len())
self.mark_new_surfaces()
self.assertEqual(z.GetLabel(1), self.new_label)
def testInsertAtNminus1(self):
z = self.z
n = self.n
# Insert at n-1 adds before image surface
z.InsertSurface(n-1)
self.assertEqual(n+1, self.get_len())
self.mark_new_surfaces()
# self.assertEqual(model[n-1].comment.value, self.new_label)
self.assertEqual(z.GetLabel(n-1), self.new_label)
def testInsertAt100(self):
z = self.z
n = self.n
# Insert at i>>n adds before image surface
z.InsertSurface(100)
self.assertEqual(n+1, self.get_len())
self.mark_new_surfaces()
self.assertEqual(z.GetLabel(n-1), self.new_label)
class ClientDeleteSurface(unittest.TestCase):
"""Check that we understand the semantics of the DeleteSurface method.
Delete 0 => No effect
Delete 1 => Delete surface 1 (first surface after object)
Delete n-1 => Delete last surface (penultimate surface becomes image)
Delete i (where i>=n) => As Delete n-1
"""
def setUp(self):
self.z = Connection()
self.z.NewLens()
self.z.InsertSurface(1)
self.z.InsertSurface(1)
self.n = self.get_len()
for i in range(self.n):
self.z.SetLabel(i, i)
def get_len(self):
response = self.z.GetSystem()
return response[0]+1
def ListLabels(self):
for i in range(self.get_len()):
print((i, self.z.GetLabel(i)))
def testFind(self):
for i in range(self.n):
j = self.z.FindLabel(i)
self.assertEqual(i, j)
def testDeleteAtZero(self):
z = self.z
n = self.n
# Delete at zero does nothing
z.DeleteSurface(0)
self.assertEqual(n, self.get_len())
def testDeleteAtOne(self):
z = self.z
n = self.n
# Delete at 1 deletes after object surface
z.DeleteSurface(1)
self.assertEqual(n-1, self.get_len())
# i=1 removed
self.assertRaises(SurfaceLabelError, lambda: z.FindLabel(1))
def testDeleteAtEnd(self):
z = self.z
n = self.n
# Delete at n-1 removes image surface
z.DeleteSurface(n-1)
self.assertEqual(n-1, self.get_len())
# i=n-1 removed
self.assertRaises(SurfaceLabelError, lambda: z.FindLabel(n-1))
def testDeleteAt100(self):
z = self.z
n = self.n
# Delete at i>>n removes image surface
z.DeleteSurface(100)
self.assertEqual(n-1, self.get_len())
# i=n-1 removed
self.assertRaises(SurfaceLabelError, lambda: z.FindLabel(n-1))
class SurfaceSequenceManipulate(unittest.TestCase):
"""Check that semantics of SurfaceSequence are close to Python list type.
"""
def setUp(self):
self.z = Connection()
self.z.NewLens()
self.model = SurfaceSequence(self.z, empty=True)
self.model[0].comment.value = "OBJ"
self.model[-1].comment.value = "IMG"
self._list = ["OBJ", "IMG"]
def verifyIdentical(self):
self.z.GetUpdate()
self.assertEqual(len(self._list), len(self.model))
for a, s in zip(self._list, self.model):
self.assertEqual(a, s.comment.value)
def testInit(self):
self.verifyIdentical()
def testInsert(self):
self.model.insert_new(1, surface.Standard, "Inserted 1")
self._list.insert(1, "Inserted 1")
self.model.insert_new(-1, surface.Standard, "Inserted -1")
self._list.insert(-1, "Inserted -1")
self.verifyIdentical()
def testDelete(self):
self.model.insert_new(1, surface.Standard, "Inserted 1")
self._list.insert(1, "Inserted 1")
self.model.insert_new(-1, surface.Standard, "Inserted -1")
self._list.insert(-1, "Inserted -1")
self.verifyIdentical()
del self.model[1]
del self._list[1]
self.verifyIdentical()
del self.model[-2]
del self._list[-2]
self.verifyIdentical()
def testGetItem(self):
self.model.insert_new(1, surface.Standard, "Inserted 1")
self._list.insert(1, "Inserted 1")
for i in range(len(self.model)):
self.assertEqual(self.model[i].comment.value, self._list[i])
self.assertEqual(self.model[-i].comment.value, self._list[-i])
def testAppendItem(self):
# Here the behaviour differs. The surface is inserted before
# the last (image) surface
self.model.insert_new(1, surface.Standard, "Inserted 1")
self._list.insert(1, "Inserted 1")
self.model.append_new(surface.Standard, "Appended")
self._list.insert(-1, "Appended")
self.verifyIdentical()
def testIndexing(self):
new_surf = self.model.insert_new(1, surface.Grating, "Inserted 1")
indexed_surf = self.model[1]
self.assertEqual(new_surf.id, indexed_surf.id)
# check that model retrieves surface object with correct class
self.assertEqual(new_surf.__class__, indexed_surf.__class__)
class SetSurfaceAttributes(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
# id = model.insert_surface(1)
# m1 = surface.Standard(z,id)
m1 = model.insert_new(1, surface.Toroidal)
m1.comment.value = "M1"
self.assertEqual(m1.comment.value, "M1")
m1.glass.value = "MIRROR"
self.assertEqual(m1.glass.value, "MIRROR")
m1.curvature.value = 1
self.assertAlmostEqual(m1.curvature.value, 1)
m1.ignored.value = True
self.assertEqual(m1.ignored.value, True)
m1.semidia.value = 2
self.assertAlmostEqual(m1.semidia.value, 2)
m1.thermal_expansivity.value = .001
self.assertAlmostEqual(m1.thermal_expansivity.value, .001)
m1.coating.value = "METAL"
self.assertEqual(m1.coating.value, "METAL")
# "extra" type parameters
m1.num_poly_terms = 11
self.assertEqual(m1.num_poly_terms.value, 11)
m1.norm_radius = 123.0
self.assertAlmostEqual(m1.norm_radius.value, 123.0)
def access_missing_attr():
item = m1.this_will_never_ever_exist
print("Item: " + str(item))
return item
self.assertRaises(AttributeError, access_missing_attr)
def access_missing_attr_linked():
item = m1.this_will_never_ever_exist_linked
return item
self.assertRaises(AttributeError, access_missing_attr_linked)
class SetExtraAttributes(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
# setting the extra parameter on surface creation requires
# surface type to be set first, internally
m1 = model.insert_new(1, surface.Toroidal, num_poly_terms=11)
self.assertEqual(m1.num_poly_terms.value, 11)
class SetSurfaceAttributesOnInitialisation(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
m1 = model.insert_new(1, surface.Standard, "M1",
glass="MIRROR", curvature=1,
ignored=True, semidia=2,
thermal_expansivity=.001,
coating="METAL")
self.assertEqual(m1.comment.value, "M1")
self.assertEqual(m1.glass.value, "MIRROR")
self.assertAlmostEqual(m1.curvature.value, 1)
self.assertEqual(m1.ignored.value, True)
self.assertAlmostEqual(m1.semidia.value, 2)
self.assertAlmostEqual(m1.thermal_expansivity.value, .001)
self.assertEqual(m1.coating.value, "METAL")
class AccessSurfaceAttributes(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
m1 = model.insert_new(1, surface.Standard)
m1.coating.set_value("METAL")
self.assertEqual(m1.coating.value, "METAL")
self.assertEqual(m1.coating.get_value(), "METAL")
# We allow direct access to attributes values (similar to Django Model)
m1.coating = "METAL2"
self.assertEqual(m1.coating.value, "METAL2")
self.assertEqual(repr(m1.coating), repr("METAL2"))
self.assertEqual(str(m1.coating), "METAL2")
thickness = 3.1
m1.thickness = thickness
self.assertAlmostEqual(m1.thickness.value, thickness)
self.assertAlmostEqual(float(repr(m1.thickness)), thickness)
self.assertAlmostEqual(float(str(m1.thickness)), thickness)
class SetSurfacePickups(unittest.TestCase):
def setUp(self):
self.z = Connection()
self.z.NewLens()
model = SurfaceSequence(self.z)
m1 = model.insert_new(1, surface.Toroidal)
m1.comment.value = "M1"
m1.glass.value = "MIRROR"
m1.curvature.value = 1
m1.conic.value = 0.1
m1.ignored.value = True
m1.semidia.value = 2
m1.thermal_expansivity.value = .001
m1.coating.value = "METAL"
m1.thickness.value = 30
m1.norm_radius = 123.0
m1.num_poly_terms = 5
self.m1 = m1
self.m2 = model.insert_new(-1, surface.Toroidal)
cb1 = model.insert_new(-1, surface.CoordinateBreak)
cb1.rotate_x.value = 34
cb1.rotate_y.value = 42
cb1.rotate_z.value = 83
cb1.offset_x.value = 2.63
cb1.offset_y.value = 753.3
cb1.thickness.value = 322.3
self.cb1 = cb1
self.cb2 = model.insert_new(-1, surface.CoordinateBreak)
def testIdenticalColRefs(self):
z = self.z
m1 = self.m1
m2 = self.m2
m2.semidia.value = 3*m1.semidia.linked()
z.GetUpdate()
self.assertAlmostEqual(m2.semidia.value, 3*m1.semidia.value)
m2.curvature.value = 2*(+m1.curvature.linked())
z.GetUpdate()
self.assertAlmostEqual(m2.curvature.value, 2*(+m1.curvature.value))
m2.thickness.value = 10 - (-m1.thickness.linked()/2.0)
z.GetUpdate()
self.assertAlmostEqual(m2.thickness.value,
10 - (-m1.thickness.value/2.0))
m2.glass.value = m1.glass.linked()
z.GetUpdate()
self.assertEqual(m1.glass.value, m2.glass.value)
m2.conic.value = 4*m1.conic.linked()
z.GetUpdate()
self.assertAlmostEqual(m2.conic.value, 4*m1.conic.value)
# "extra" type parameters
m2.norm_radius = 0.7*m1.norm_radius.linked()
z.GetUpdate()
self.assertAlmostEqual(m2.norm_radius.value, 0.7*m1.norm_radius.value)
m2.num_poly_terms = 2*m1.num_poly_terms.linked()
z.GetUpdate()
self.assertEqual(m2.num_poly_terms.value, 2*m1.num_poly_terms.value)
# check we catch inappropriate pickup expressions
# (offsets, where only scaling permitted).
def offset1():
m2.semidia.value = 1 + m1.semidia.linked()
self.assertRaises(TypeError, offset1)
def offset2():
m2.curvature.value = 1 + m1.curvature.linked()
self.assertRaises(TypeError, offset2)
def testOtherColRefs(self):
z = self.z
m1 = self.m1
cb1 = self.cb1
cb2 = self.cb2
# Check we can dereference SurfaceParameter instances
cb2.offset_x = 1 - 2*m1.thickness.linked()
z.GetUpdate()
self.assertAlmostEqual(cb2.offset_x.value, 1 - 2*m1.thickness.value)
# Check we can dereference SurfaceAuxParameter instances
cb2.rotate_x = 1 - 2*cb1.rotate_y.linked()
z.GetUpdate()
self.assertAlmostEqual(cb2.rotate_x.value, 1 - 2*cb1.rotate_y.value)
# Check we can dereference columns from the same surface
self.assertNotAlmostEqual(cb1.rotate_x.value, cb1.rotate_y.value)
cb1.rotate_y = cb1.rotate_x.linked()
z.GetUpdate()
self.assertAlmostEqual(cb1.rotate_x.value, cb1.rotate_y.value)
class NamedSurfaces(unittest.TestCase):
def testTagging(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
els = NamedElements(model)
surf = model.insert_new(1, surface.Standard)
comment = "first comment"
surf.comment = comment
# if no tag set, comment matches "comment" verbatim
self.assertEqual(comment, surf.comment._client_get_value())
self.assertEqual(comment, surf.comment.value)
# Setting the surface as an attribute of a NamedElements instance,
# causes the name to be stored as a tag on the surface.
els.m1 = surf
# value in zemax model now has tag embedded
self.assertNotEqual(comment, surf.comment._client_get_value())
# the tag is invisible in the value of comment
self.assertEqual(comment, surf.comment.value)
# the tag can be accessed, however
self.assertEqual("m1", surf.comment.tag)
# Updating the comment does not alter the tag
comment = "second comment"
self.assertNotEqual(comment, surf.comment.value)
surf.comment = comment
self.assertEqual(comment, surf.comment.value)
# the tag is unchanged
self.assertEqual("m1", surf.comment.tag)
# we can access the surface as a property of a newly-created
# NamedElements instance
els2 = NamedElements(model)
self.assertEqual(comment, els2.m1.comment.value)
self.assertEqual(surf.id, els2.m1.id)
# named surfaces can be discovered in models
self.assertTrue("m1" in dir(els2))
def testSaving(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
# Zemax surface comments must be 32 characters or less, to
# survive saving and reloading
surf = model.insert_new(1, surface.Standard)
id = surf.id
comment = "c"*20
tag = "t"*9
surf.comment = comment
surf.comment.tag = tag
self.assertEqual(len(surf.comment._client_get_value()),
libzmx.CommentParameter.max_len)
# Surface can be retrieved from unsaved model
els = NamedElements(model)
self.assertEqual(id, getattr(els, tag).id)
# Save model
(fd, modelf) = tempfile.mkstemp(".ZMX")
z.SaveFile(modelf)
n = len(model)
z.NewLens()
self.assertNotEqual(n, len(model))
# Reload model
z.LoadFile(modelf)
els2 = NamedElements(model)
# Comment and tag are intact
s2 = getattr(els2, tag)
self.assertEqual(id, s2.id)
self.assertEqual(s2.comment.value, comment)
os.close(fd)
os.remove(modelf)
def testLimitLength(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
# Zemax surface comments must be 32 characters or less, to
# survive saving and reloading
surf = model.insert_new(1, surface.Standard)
def set_comment():
surf.comment = "z" * (libzmx.CommentParameter.max_len+1)
self.assertRaises(ValueError, set_comment)
comment = "c"*20
tag = "t"*10
surf.comment = comment
def set_tag():
surf.comment.tag = tag
self.assertRaises(ValueError, set_tag)
class Optimisation(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z)
# id = model.insert_surface(1)
# m1 = surface.Standard(z,id)
s = model.insert_new(1, surface.Standard)
# there are no adjustable variables
self.assertEqual(0, len(s.fix_variables()))
# make some parameters adjustable
s.thickness.vary()
s.curvature.vary()
# there are some adjustable variables
self.assertEqual(2, len(s.fix_variables()))
# adjustable variables are now fixed
self.assertEqual(0, len(s.fix_variables()))
def build_coord_break_sequence(model):
s = model[0]
s.thickness.value = 1
insertion_point = -1
# id = model.insert_surface(insertion_point)
# s = surface.Standard(self.z,id)
s = model.insert_new(insertion_point, surface.Standard)
s.thickness.value = 5
first = s.get_surf_num()
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 34
s.rotate_y.value = 42
s.rotate_z.value = 83
s.offset_x.value = 2.63
s.offset_y.value = 753.3
s.thickness.value = 322.3
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 75
s.rotate_y.value = 85
s.rotate_z.value = 21
s.offset_x.value = 543.64
s.offset_y.value = 654.32
s.thickness.value = 543.43
s.rotate_before_offset.value = True
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 34
s.rotate_y.value = 65
s.rotate_z.value = 84
s.offset_x.value = 543.324
s.offset_y.value = 43.23
s.thickness.value = 0
s.rotate_before_offset.value = True
# id = model.insert_surface(insertion_point)
# s = surface.Standard(self.z,id)
s = model.insert_new(insertion_point, surface.Standard)
s.thickness.value = -38.21
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 54
s.rotate_y.value = 88
s.rotate_z.value = 22
s.offset_x.value = 43.85
s.offset_y.value = 92.84
s.thickness.value = 0
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 34
s.rotate_y.value = 43
s.rotate_z.value = 54
s.offset_x.value = 643.54
s.offset_y.value = 127.3
s.thickness.value = 23.63
# id = model.insert_surface(insertion_point)
# s = surface.CoordinateBreak(self.z,id)
s = model.insert_new(insertion_point, surface.CoordinateBreak)
s.rotate_x.value = 0
s.rotate_y.value = 0
s.rotate_z.value = 0
s.offset_x.value = 0
s.offset_y.value = 0
s.thickness.value = 23.63
# id = model.insert_surface(insertion_point)
# s = surface.Standard(self.z,id)
s = model.insert_new(insertion_point, surface.Standard)
s.thickness.value = 3.6
last = s.get_surf_num()
return (first, last)
class CoordinateReturn(unittest.TestCase):
def setUp(self):
self.z = Connection()
self.z.NewLens()
self.model = SurfaceSequence(self.z, empty=True)
self.first, self.last = build_coord_break_sequence(self.model)
def testZemaxCoordinateReturn(self):
cb = self.model.append_new(surface.CoordinateBreak)
return_surf = cb.get_surf_num()
self.z.SetSurfaceData(return_surf, 81, self.first)
self.z.SetSurfaceData(return_surf, 80, 3) # orientation + offset
self.z.GetUpdate()
self.coord_return_common_tests(return_surf)
def testLibraryCoordinateReturn(self):
cb = self.model.append_new(surface.CoordinateBreak)
cb.return_to(self.model[self.first])
self.z.GetUpdate()
self.coord_return_common_tests(cb.get_surf_num())
# To unset the coordinate return, pass None (has no effect here)
cb.return_to(None) # unset coordinate return status
def testFull(self):
return_surf = return_to_coordinate_frame(self.model, self.first,
self.last)
self.z.GetUpdate()
self.coord_return_common_tests(return_surf)
def testOmitZeroThicknesses(self):
self.z.GetUpdate()
return_surf = return_to_coordinate_frame(self.model,
self.first,
self.last,
include_null_transforms=False)
self.z.GetUpdate()
self.coord_return_common_tests(return_surf)
def testWithCursor(self):
insert_point = self.last
insertion_point_sequence = count(insert_point+1)
def factory():
return self.model.insert_new(next(insertion_point_sequence),
surface.CoordinateBreak)
self.z.GetUpdate()
return_surf = return_to_coordinate_frame(self.model,
self.first,
self.last,
include_null_transforms=False,
factory=factory)
self.z.GetUpdate()
self.coord_return_common_tests(return_surf)
def testWithAppend(self):
def factory():
return self.model.append_new(surface.CoordinateBreak)
self.z.GetUpdate()
return_surf = return_to_coordinate_frame(self.model,
self.first,
self.last,
include_null_transforms=False,
factory=factory)
self.z.GetUpdate()
self.coord_return_common_tests(return_surf)
def coord_return_common_tests(self, return_surf):
first_rot, first_offset = self.z.GetGlobalMatrix(self.first)
last_rot, last_offset = self.z.GetGlobalMatrix(self.last)
return_rot, return_offset = self.z.GetGlobalMatrix(return_surf + 1)
# check coordinate frames are identical
self.assertAlmostEqual(abs(first_rot - return_rot).max(), 0)
self.assertAlmostEqual(abs(first_offset - return_offset).max(), 0)
# check we have finite rotation matrices
self.assertNotAlmostEqual(abs(first_rot).max(), 0)
# check that first and last frames differ
self.assertNotAlmostEqual(abs(first_rot - last_rot).max(), 0)
self.assertNotAlmostEqual(abs(first_offset - last_offset).max(), 0)
class ChangeGlobalReferenceSurface(unittest.TestCase):
def runTest(self):
z = Connection()
z.NewLens()
model = SurfaceSequence(z, empty=True)
build_coord_break_sequence(model)
# set each surface to be the global reference, in turn
last = None
for surf in model:
surf.make_global_reference()
self.assertTrue(surf.is_global_reference)
if last is not None:
self.assertFalse(last.is_global_reference)
rot, offset = z.GetGlobalMatrix(surf.get_surf_num())
self.assertAlmostEqual(abs(rot - numpy.eye(3)).max(), 0)
self.assertAlmostEqual(abs(offset).max(), 0)
last = surf
class ConfigureSystemParameters(unittest.TestCase):
# numsurfs = SystemParameter(0, int)
# unitcode = SystemParameter(1, int)
# stopsurf = SystemParameter(2, int)
# nonaxialflag = SystemParameter(3, bool)
# rayaimingtype = SystemParameter(4, int)
# adjustindex = SystemParameter(5, bool)
# temperature = SystemParameter(6, float)
# pressure = SystemParameter(7, float)
# globalrefsurf = SystemParameter(8, float)
def setUp(self):
self.z = Connection()
self.z.NewLens()
self.system = SystemConfig(self.z)
self.model = SurfaceSequence(self.z)
def testSurfaceNumbers(self):
n = self.system.numsurfs
self.assertEqual(n, 2)
self.model.append_new(surface.Standard)
self.assertEqual(n+1, self.system.numsurfs)
def testGlobalReferenceSurface(self):
newglref = 2
oldglref = self.system.globalrefsurf
self.assertNotEqual(newglref, oldglref)
self.system.globalrefsurf = newglref
# self.z.GetUpdate()
self.assertEqual(self.system.globalrefsurf, newglref)
def testStopSurface(self):
newstop = 2
oldstop = self.system.stopsurf
self.assertNotEqual(newstop, oldstop)
self.system.stopsurf = newstop
self.assertEqual(self.system.stopsurf, newstop)
def testAdjustIndex(self):
old = self.system.adjustindex
new = not old
self.system.adjustindex = new
self.assertEqual(self.system.adjustindex, new)
def testTemperature(self):
new = -40.0
old = self.system.temperature
self.assertNotAlmostEqual(new, old)
self.system.temperature = new
self.assertAlmostEqual(new, self.system.temperature)
def testPressure(self):
new = 1.1
old = self.system.pressure
self.assertNotAlmostEqual(new, old)
self.system.pressure = new
self.assertAlmostEqual(new, self.system.pressure)
def testRayAiming(self):
new = 1
old = self.system.rayaimingtype
self.assertNotEqual(new, old)
self.system.rayaimingtype = new
self.assertEqual(self.system.rayaimingtype, new)
class RayCoordinates(unittest.TestCase):
marginal_ray_solve_pupil_coordinate = 0.7
tracing_accuracy = 4 # expected accuracy in decimal places
def setUp(self):
self.z = Connection()
self.z.NewLens()
self.model = SurfaceSequence(self.z)
self.system = SystemConfig(self.z)
self.system.rayaimingtype = 0
self.model[0].thickness = 10.0
# insert fold mirror
self.model.append_new(surface.CoordinateBreak, rotate_y=40.0,
rotate_z=10.0)
self.model.append_new(surface.Standard, glass="MIRROR")
cb = self.model.append_new(surface.CoordinateBreak, thickness=-20.0)
cb.rotate_x.align_to_chief_ray()
cb.rotate_y.align_to_chief_ray()
front = self.model.append_new(surface.Standard,
curvature=-0.05, glass="BK7",
thickness=-1.0)
back = self.model.append_new(surface.Standard,
curvature=-front.curvature.linked())
self.z.SetSystemAper(3, front.get_surf_num(), 2.5)
back.thickness.focus_on_next(self.marginal_ray_solve_pupil_coordinate)
self.z.GetUpdate()
def testFocus(self):
image = self.model[-1]
chief = image.get_ray_intersect()
marginal = image.get_ray_intersect(
(0, 0), (0, self.marginal_ray_solve_pupil_coordinate), 0)
self.assertAlmostEqual(abs(marginal.intersect - chief.intersect).max(),
0.0)
def testDirectTracing(self):
"""Verify that we can launch rays using normalised pupil coordinates and local
surface cartesian coordinates, with consistent results."""
pc = (0.3, 0.5) # normalised pupil coordinate under test
image = self.model[-1]
# find ray intersection on image plane
(status, vigcode, im_intersect, im_exit_cosines, normal,
intensity) = image.get_ray_intersect((0, 0), pc)
for surf in self.model:
# get ray intersection on surface
(status, vigcode, surf_intersect, exit_cosines, normal,
intensity) = surf.get_ray_intersect((0, 0), pc)
# Launch ray directly using the obtained origin and exit cosines.
# GetTraceDirect launches a ray from startsurf coordinate
# frame, but the ray does not interact with startsurf.
(status, vigcode, intersect, cosines, normal,
intensity) = self.z.GetTraceDirect(0, 0,
surf.get_surf_num(),
image.get_surf_num(),
surf_intersect,
exit_cosines)
# verify that the ray is the same as obtained with
# normalised pupil coordinates on the image plane
self.assertAlmostEqual(abs(intersect - im_intersect).max(), 0.0,
self.tracing_accuracy)
self.assertAlmostEqual(abs(cosines - im_exit_cosines).max(), 0.0,
self.tracing_accuracy)
if surf.id != image.id:
self.assertNotAlmostEqual(
abs(surf_intersect - im_intersect).max(),
0.0, self.tracing_accuracy)
def testMatrixCoordinateTransforms(self):
"""Check we can acquire and use global transformation matrices.
Make each surface the coordinate global reference in turn.
For each iteration check we can recover the original global
reference of each surface by applying the inverse of the new
global reference."""
def trans_mat(rotation, offset):
m = numpy.zeros((4, 4), float)
m[0:3, 0:3] = rotation
m[0:3, 3] = offset
m[3, 3] = 1.0
return numpy.matrix(m)
surf_ids = range(len(self.model))
initial_global_ref = self.system.globalrefsurf
initial_surface_coord_frames = [
trans_mat(*self.z.GetGlobalMatrix(i)) for i in surf_ids]
for i in surf_ids:
if isinstance(self.model[i], surface.CoordinateBreak):
# coordinate breaks as global reference surfaces give
# unexpected results
continue
self.system.globalrefsurf = i
# find inverse transformation
trans = initial_surface_coord_frames[i].I
self.z.GetUpdate()
for j in surf_ids:
new_frame = trans_mat(*self.z.GetGlobalMatrix(j))
# calc_frame = numpy.dot(trans,
# initial_surface_coord_frames[j])
calc_frame = trans * initial_surface_coord_frames[j]
self.assertAlmostEqual(abs(new_frame - calc_frame).max(), 0)
def testCheckRayTraceResults(self):
pc = (0.3, 0.5) # normalised pupil coordinate under test
for surf in self.model:
n = surf.get_surf_num()
# get ray intersection on surface in local coordinates
# (status, vigcode, intersect, exit_cosines, normal,
# intensity) = surf.get_ray_intersect((0,0), pc)
ray = surf.get_ray_intersect((0, 0), pc)
# compare with values obtained from operands
for val, op in zip(ray.intersect, ("REAX", "REAY", "REAZ")):
opval = self.z.OperandValue(op, n, 0, 0.0, 0.0, pc[0], pc[1])
self.assertAlmostEqual(opval, val, places=5)
# get ray intersection on surface in global coordinates
# (status, vigcode, intersect_gl, exit_cosines, normal,
# intensity) = surf.get_ray_intersect((0,0), pc, _global=True)
glray = surf.get_ray_intersect((0, 0), pc, _global=True)
# compare with direct global coordinates from operands
for val, op in zip(glray.intersect, ("RAGX", "RAGY", "RAGZ")):
opval = self.z.OperandValue(op, n, 0, 0.0, 0.0, pc[0], pc[1])
self.assertAlmostEqual(opval, val, self.tracing_accuracy)
class Temporaryfile(unittest.TestCase):
def testOpenWithError(self):
def open_tmpfile_with_error():
def fails(path):
self.path = path
assert(False)
with zemaxclient.tmpfile_callback(fails) as (response, f, path):
# never get here
self.assertTrue(False)
# if temporary file cannot be removed, this will raise
# WindowsError instead
self.assertRaises(AssertionError, open_tmpfile_with_error)
# check that file was removed despite the AssertionError
self.assertFalse(os.path.exists(self.path))
def testRemovedWhenNoError(self):
with zemaxclient.tmpfile_callback(len) as (response, f, path):
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def testPrematureRemove(self):
z = Connection()
def open_tmpfile_and_delete():
with zemaxclient.tmpfile_callback(os.remove) as (response, f):
pass
# if temporary file cannot be removed, this will raise WindowsError
self.assertRaises(WindowsError, open_tmpfile_and_delete)
del z
class ZemaxTextOutput(unittest.TestCase):
def setUp(self):
self.z = Connection()
self.z.NewLens()
def testSpt(self):
text = self.z.GetTextFileString("Spt")
nlines = len(text.splitlines())
# simply check we received a number of lines
self.assertTrue(nlines >= 23, "Received %d lines" % nlines)
def testPre(self):
text = self.z.GetTextFileString("Pre")
first = text.splitlines()[0]
self.assertEqual(u"System/Prescription Data", unicode(first))
def testContext(self):
with self.z.GetTextFileObject("Pre") as f:
first = next(f).strip()
self.assertEqual(u"System/Prescription Data", first)
class ExportModelToCAD(unittest.TestCase):
def setUp(self):
self.z = Connection()
self.model = SurfaceSequence(self.z, empty=True)
make_singlet(self.z)
def testLength(self):
self.assertEqual(len(self.model), 4)
def testExport(self):
(fd, resultsf) = tempfile.mkstemp(".IGS")
response = self.z.ExportCAD(resultsf, 0)
self.assertEqual(response.split()[0], "Exporting")
while self.z.ExportCheck():
time.sleep(0.2)
# check exported file is not empty
sz = os.stat(resultsf).st_size
self.assertNotEqual(sz, 0)
os.close(fd)
os.remove(resultsf)
if __name__ == "__main__":
print("Please ensure Zemax is in sequential mode before running the "
"unit tests")
unittest.main()
| {
"repo_name": "dariussullivan/libzmx",
"path": "tests.py",
"copies": "1",
"size": "38825",
"license": "bsd-3-clause",
"hash": 7288405261197799000,
"line_mean": 32.6730268864,
"line_max": 86,
"alpha_frac": 0.60128783,
"autogenerated": false,
"ratio": 3.711759082217973,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4813046912217973,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dark00ps'
# USER INPUT
# The simplest use of the input function assigns a string to a variable.
print('Please enter your first name::::\n', end='')
inputFirstName = input()
print('\nAnd your last name::::\n')
inputLastName = input()
print('\nMy full name is::::', inputFirstName, inputLastName, '\n')
# The input function only produces strings.
# One can use the int function to convert a properly formed string of digits into an integer
print('Please enter a number:::::\n')
inputNumber = input()
# print('The number entered is', inputNumber, '\n')
myNumber = int(inputNumber)
print('The user input string', inputNumber, 'is now an integer as can be confirmed by the type here:', type(myNumber), '\n')
# Calculations can also be done based on user input.
totalCount = myNumber * myNumber
print('The number entered multiplied by itself =', totalCount, '\n')
# You can also combine input and int functions into one statement(technique known as functional composition).
xxxx = int(input('\nPlease enter a string that will be turned into type int::::\n'))
print('\nThe int value of xxxx is:', xxxx, '\n')
# EVAL FUNCTION
# Input from a user produces a string.
# That input can be treated as a number.
myNumber = int(input('\nPlease enter a number to be converted::::\n'))
print('The user input has been converted into a number with the value of', myNumber, 'of type', type(myNumber), '\n')
# What if we wish evalX1 to be of type integer when the user enters 2 and evalX1 to be a floating point if the user
# enters 2.0
# The eval function can attempts to evaluate the string the same way that an interactive shell would evaluate it.
evalX1 = eval(input('Enter number you would wish to be evaluated::::\n'))
print('evalX1 = ', evalX1, 'which has type:', type(evalX1))
# Set the type of variable before user input
inputAnyNumber = float(input('\nPlease enter a floated number::::\n'))
print('\nThe number entered is now floated:', inputAnyNumber, '\n')
inputAnyNumber = int(input('\nPlease enter an int::::\n'))
print('\nThe number entered is now an int:', inputAnyNumber, '\n')
# Evaluate 2 Inputs from a user
num1, num2 = eval(input('Please enter number 1, and a number 2 making use of a comma::::\n'))
print("The total for number 1 and number 2 is", num1 + num2, '\n')
print(eval(input('Enter anything'))) | {
"repo_name": "panherz/MyPyCode",
"path": "UserInput.py",
"copies": "1",
"size": "2333",
"license": "apache-2.0",
"hash": -1752450161902282500,
"line_mean": 40.6785714286,
"line_max": 124,
"alpha_frac": 0.7183883412,
"autogenerated": false,
"ratio": 3.6003086419753085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818696983175308,
"avg_score": null,
"num_lines": null
} |
__author__ = 'darkoa'
import io, os
from django.core.management.base import BaseCommand
from workflows.models import *
class Command(BaseCommand):
"""
This command generates TextFlows user documentation. In particular it generates a ReStructuredText file which can be processed with Spyhx and transformed to HTML.
The "Example usage" field should contain links to workflows on the production TextFlows server, so the script should be run there.
"""
help = 'This command generates TextFlows documentation. In particular it generates a ReStructuredText file which can be processed with Spyhx and transformed to HTML.'
f = None
titleUnderlines = '=-~`+\''
def handle(self, *args, **options):
self.f = io.open("docs" + os.sep + "tf_user_doc.rst", "w", encoding='utf8')
categories = Category.objects.all()
for c0 in categories:
if not c0.is_basic():
self.print_categories(c0)
self.f.close()
def print_categories(self, c0):
if not c0.parent:
if not c0.user:
print " "*2 + c0.name
self.f.write( unicode( self.print_section_title('Category ' + c0.name, self.titleUnderlines[0]) ) )
for c1 in c0.children.all():
print " "*4 + c1.name
self.f.write( unicode( self.print_section_title('Category ' + c1.name, self.titleUnderlines[1]) ) )
for c2 in c1.children.all():
print " "*6 + c2.name
self.f.write( unicode( self.print_section_title('Category ' + c2.name, self.titleUnderlines[2]) ) )
for w in c2.widgets.all():
print " "*8 + " <W> " + w.name
self.print_widget( self.create_widget_dict(w), self.titleUnderlines[3])
for w in c1.widgets.all():
print " "*6 + " [W] " + w.name
self.print_widget( self.create_widget_dict(w), self.titleUnderlines[2])
for w in c0.widgets.all():
print " "*4 + " [W] " + w.name
self.print_widget( self.create_widget_dict(w), self.titleUnderlines[1])
def print_section_title(self, my_title, underline_char):
my_text = my_title + "\n"
l = len(my_title)
my_text += (underline_char * l) + "\n"
return my_text
def clean_description(self, text_value):
res = text_value
res = res.replace("\r\n", "\n")
res = res.replace("\n", "\n ")
return res
def get_widget_public_workflows(self, widget):
""" Gets a list of public workflows where an abstract widget has been used.
:param widget:
:return: list
"""
aw = widget
res_wf = []
all_widgets = aw.instances.all()
for w in all_widgets:
if w.workflow.public:
res_wf.append( w.workflow )
return res_wf
def get_widget_io(self, widget):
"""Get inputs, parameters and outputs for one widget
:param widget:
:return: a tuple ( list of inputs, list of parameters, list of outputs )
"""
inp, params, out = ([], [], [])
# Get all inputs and parameters for widget
inp_list = widget.inputs.all()
for myinp in inp_list:
s = {}
s['text'] = "%s (%s)" % ( myinp.name, self.clean_description(myinp.description) )
if len( myinp.description.strip() )==0 or myinp.name==myinp.description:
s['text'] = myinp.name
arr_opt = myinp.options.all()
if len(arr_opt) > 0:
s["possible_values"] = [opt.name for opt in arr_opt]
if len(myinp.default) > 0:
s["default_value"] = myinp.default
if myinp.parameter==False:
inp.append( s )
else:
params.append( s )
# Get all outputs for widget
inp_list = widget.outputs.all()
for myout in inp_list:
s = {}
s['text'] = "%s (%s)" % (myout.name, self.clean_description(myout.description) )
if len(myout.description.strip()) == 0 or myout.name == myout.description:
s['text'] = myout.name
out.append( s )
return (inp, params, out)
def create_widget_dict(self, my_widget):
name = my_widget.name
act = my_widget.action
desc = my_widget.description.strip()
img = my_widget.static_image.strip()
package_name = my_widget.package
full_img_path = "workflows" + os.sep + package_name + os.sep + "static" + os.sep + package_name + os.sep + "icons" + os.sep + "widget" + os.sep
try:
if len(img)>0 and not( os.path.isfile(full_img_path+img) ):
print "Missing IMG: " + full_img_path + img
if len(img) == 0 or not( os.path.isfile(full_img_path+img) ):
# print "No IMG set for w: " + name
full_img_path = 'workflows/static/widget-icons/question-mark.png'
img = ""
inp, params, out = self.get_widget_io(my_widget)
vis = len( my_widget.visualization_view.strip() ) > 0
res_dict = {'name': name, 'act':act, 'desc':desc, 'img':full_img_path + img, 'inp':inp, 'params':params, 'out':out,
'interact':my_widget.interactive, 'vis':vis, 'publ_wf':""}
publ_wf = self.get_widget_public_workflows(my_widget)
if len(publ_wf)>0:
res_dict["publ_wf"] = "http://textflows.ijs.si/workflow/%d/" % (publ_wf[0].id)
res_dict["publ_wf_n"] = publ_wf[0].name
return res_dict
except Exception, e:
print "-------Exception for widget: " + my_widget.name
print str(e)
return {}
def print_widget(self, widget_dict, underline_char):
base_dir = "../"
self.f.write( self.print_section_title("\n" + 'Widget: ' + widget_dict["name"], underline_char) )
self.f.write( unicode( ".. image:: " + base_dir + widget_dict['img'] + "\n") )
self.f.write( unicode( " :width: 50" + "\n" + " :height: 50" + "\n") )
if len( widget_dict['desc'] ) > 0:
self.f.write( unicode(widget_dict['desc'] + "\n" + "\n" ) )
else:
print " '%s' <<---- missing doc." % (widget_dict["name"])
for my_inp in widget_dict['inp']:
self.f.write('* Input: ' + my_inp['text'] + "\n" )
if my_inp.has_key("possible_values"):
self.f.write(unicode("\n" + " * Possible values: " + "\n\n" ))
for val in my_inp["possible_values"]:
self.f.write(' * ' + val + "\n" )
for my_param in widget_dict['params']:
self.f.write('* Parameter: ' + my_param['text'] + "\n" )
if my_param.has_key("possible_values"):
self.f.write(unicode("\n" + " * Possible values: " + "\n\n" ))
for val in my_param["possible_values"]:
self.f.write(' * ' + val + "\n" )
if my_param.has_key("default_value"):
self.f.write(unicode("\n" + " * Default value: " + my_param["default_value"] + "\n" ))
for my_out in widget_dict['out']:
self.f.write('* Output: ' + my_out['text'] + "\n" )
if len( widget_dict['out'] ) == 0:
if widget_dict['interact']==True:
self.f.write(unicode('* Outputs: Interactive Popup window which shows widget\'s results and allows manipulation' + "\n" ))
else:
if widget_dict['vis']==True:
self.f.write(unicode('* Outputs: Popup window which shows widget\'s results' + "\n" ))
else:
print " --- --- --- w: " + widget_dict["name"] + " GreskaNoOuputsNotInteractiveNotVis "
if len(widget_dict['publ_wf']) > 0:
# `Python <http://www.python.org/>`_
self.f.write(unicode("* Example usage: `%s <%s>`_\n" % (widget_dict['publ_wf_n'], widget_dict['publ_wf']) ))
self.f.write( unicode("\n") )
| {
"repo_name": "xflows/textflows",
"path": "workflows/management/commands/generate_tf_user_doc.py",
"copies": "1",
"size": "8277",
"license": "mit",
"hash": 7079680889217982000,
"line_mean": 35.9508928571,
"line_max": 170,
"alpha_frac": 0.5213241513,
"autogenerated": false,
"ratio": 3.601827676240209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559121048603088,
"avg_score": 0.012806155787424429,
"num_lines": 224
} |
__author__ = 'DarkStar1'
import fileinput, glob, os, re, shutil, sys, urllib
from bs4 import BeautifulSoup
def encodeImgSrc(file, encodedFiles):
#Wanted to use the lxml lib but for some reason it was only finding 1 result within the test file.
soup = BeautifulSoup(file.read(), "html5lib")
for img in soup.find_all('img'):
# print("Found an image: " + img['src'])
#create a base64 link from a dictionary containing the encoded pngs
img['src'] = base64Prefix + encodedFiles[img['src']]
file.seek(0)
file.write(str(soup))
file.truncate()
#holds a map of the png file and it's base64 encoding in the form of {"xx.png":"hsbudbud..."}
encodedFiles = {}
#the string prefix to base64 encoded images
base64Prefix = "data:image/png;base64,"
#The pdf file to convert
SOURCE_FILE = os.path.abspath(sys.argv[1])
# change to the source file directory since we can't be sure alfresco would
os.chdir(os.path.split(SOURCE_FILE)[0])
# First call poppler's pdftohtml to convert the file from pdf to html
os.system("pdftohtml -s -c " + SOURCE_FILE )
#The pervious call adds a -html to the result of the conversion so we need to create an interim file name
# that takes this into account
INTERIM_FILE = os.path.splitext(SOURCE_FILE)[0]
INTERIM_FILE += "-html.html"
#The string is usually escaped so we need to remove the '\' from the string
INTERIM_FILE = INTERIM_FILE.replace("\ ", " ")
# print("\n\nThe INTERIM FILE name is now: " + INTERIM_FILE+"\n\n")
#Look in the current directory and base 64 encode all png files into a map with the original src values as keys
#(Extremely expensive. I know)
image_list = glob.glob("*.png")
if len(image_list) > 0:
for file in image_list:
with open(file, 'rb') as fh:
#add to the map
encodedFiles[file] = fh.read().encode('base64').replace('\n', '')
# Look for the and replace the urlencoded string in the html file
HTMLFILE = open(os.path.abspath(INTERIM_FILE), 'r+')
encodeImgSrc(HTMLFILE, encodedFiles)
HTMLFILE.close()
#For alfresco, change the name of the interim file to the target filename *argv[2]
shutil.move(INTERIM_FILE, sys.argv[2])
#clear the dictionary/map for sake of memory issues
encodedFiles.clear() | {
"repo_name": "magenta-aps/htmlthumbnail",
"path": "src/main/resources/alfresco/extension/scripts/python/pdfToHtml.py",
"copies": "1",
"size": "2234",
"license": "apache-2.0",
"hash": -7951289694446587000,
"line_mean": 36.8813559322,
"line_max": 111,
"alpha_frac": 0.7041181737,
"autogenerated": false,
"ratio": 3.344311377245509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9508398818887076,
"avg_score": 0.008006146411686484,
"num_lines": 59
} |
__author__ = 'DarkStar1'
import fileinput, glob, os, re, shutil, sys, urllib
#holds a map of the png file and it's base64 encoding in the form of {"xx.png":"hsbudbud..."}
encodedFiles = {}
base64Prefix = "data:image/png;base64,"
#The excel file to convert
SOURCE_FILE = os.path.basename(sys.argv[2])
# change to the source file directory since we can't be sure alfresco would
os.chdir(os.path.split(sys.argv[2])[0])
def isPresentInfile(key, file):
with open(file, 'r') as file:
if re.search(key, file.read()):
return True
else:
return False
def replaceAll(file, searchExp, replaceExp):
# print ("Looking for:" + searchExp + ": found in " + file)
if isPresentInfile(searchExp, file):
# print (searchExp + ": found in " + file + ", ==> Replacing....")
for line in fileinput.input(file, inplace=1, backup='.bak'):
if searchExp in line:
line = line.replace(searchExp, replaceExp)
sys.stdout.write(line)
else:
print (searchExp + ": not found in " + file + ". <=")
# First call open office with to convert the file
os.system(sys.argv[1] + " --headless --convert-to html:\"HTML (StarCalc)\" " + SOURCE_FILE + "")
#The interim file is the result of the soffice conversion
INTERIM_FILE = os.path.splitext(SOURCE_FILE)[0]
INTERIM_FILE += ".html"
INTERIM_FILE = INTERIM_FILE.replace("\ ", " ")
#print("\n\nThe INTERIM FILE name is now: " + INTERIM_FILE+"\n\n")
#Look in the current directory and base 64 encode all png files
for file in glob.glob("*.png"):
with open(file, 'rb') as fh:
#add to the map
encodedFiles[file] = fh.read().encode('base64').replace('\n', '')
# Look for the and replace the urlencoded string in the html file
for key in encodedFiles:
# print("\nThe key being searched: " + urllib.quote(key) + "\n")
replaceAll(INTERIM_FILE, urllib.quote(key), base64Prefix + encodedFiles[key])
#For alfresco, change the name of the interim file to the target filename *argv[2]
# os.system("mv " + INTERIM_FILE + " " + sys.argv[3])
shutil.move(INTERIM_FILE, sys.argv[3]) | {
"repo_name": "magenta-aps/htmlthumbnail",
"path": "src/main/resources/alfresco/extension/scripts/python/excel2html.py",
"copies": "1",
"size": "2123",
"license": "apache-2.0",
"hash": -2972254246506837500,
"line_mean": 38.3333333333,
"line_max": 96,
"alpha_frac": 0.6481394253,
"autogenerated": false,
"ratio": 3.246177370030581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4394316795330581,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Darwin Monroy'
from binascii import hexlify, unhexlify
class BaseCodec(object):
@staticmethod
def encode(data, alphabet, bs=1, pc='='):
"""
Encodes the given phrase using the alphabet.
:param data: value to encode
:param alphabet: alphabet to use in the encoding
:return: encoded string
"""
missing = 0
p = ''
data = '{0}'.format(data).encode()
if bs > 1:
missing = bs - (len(data) % bs)
if missing == bs:
missing = 0
data += b'\x00' * missing
p = pc * missing
# Get the hexadecimal representation of the binary data
hex_data = hexlify(data).decode('utf8')
# And the integer representation of the hex_data
data = int('0x0' + hex_data, 16)
if data == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while data:
rem = data % base
data = data // base
arr.append(alphabet[rem])
arr.reverse()
if bs == 1 or missing == 0:
return ''.join(arr)
return ''.join(arr[:-1* missing] + [p])
@staticmethod
def decode(string, alphabet, pc='='):
"""
Decodes the encoded string using the given alphabet.
:param encoded: encoded string
:param alphabet: alphabet to use in the decoding
:param pc: padding char
:return: decoded value
"""
base = len(alphabet)
strlen = len(string)
num = 0
pad = 0
if pc in string:
pad = len(string) - string.index(pc)
if pad > 0:
string = string[:-1*pad]
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
# Convert the integer to bytes
h = '%x' % num
if len(h) % 2:
h = '0' + h
res = unhexlify(h.encode('utf8'))
if pad:
res = res[:-1*pad]
return res.decode('utf8')
| {
"repo_name": "dmonroy/dmonroy.codec",
"path": "dmonroy/codec/base.py",
"copies": "1",
"size": "2136",
"license": "mit",
"hash": 1495946535606036500,
"line_mean": 23.5517241379,
"line_max": 63,
"alpha_frac": 0.4911048689,
"autogenerated": false,
"ratio": 4.14757281553398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513867768443398,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dasDachs'
__version__ = '0.1'
"""
The main part of the app. The center is the app factory that returns the Flask
app with all the setting needed to run in your environment.
"""
from flask import Flask
from flask_migrate import Migrate
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from config import config
api = Api()
db = SQLAlchemy()
# App factory
# ===========
# Read about it in Miguel Grinberg Flask Web Development
# or check out his talk at PyCon 2014 talk Flask by Example
# (https://github.com/miguelgrinberg/flask-pycon2014))
def app_factory(config_name):
"""
The app factory takes the configuration name (development, testing,
production) and returns a Flask instance.
First it creates an app instance. Then it loads the configurations.
Next it initialises the extensions and registers the blueprints.
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
db.init_app(app)
migrate = Migrate(app, db)
from .api import api
api.init_app(app)
# If you serve robots.txt,
# humans.txt and sitemap.xml
# from your webserver,
# unregister (delete) this
# Blueprint
# and follow the
# webserver's
# documentation.
from .robots import robots as robots_blueprint
app.register_blueprint(robots_blueprint)
return app
| {
"repo_name": "dasdachs/flask-blog",
"path": "backend/app/__init__.py",
"copies": "1",
"size": "1372",
"license": "mit",
"hash": 3202192684446380000,
"line_mean": 25.3846153846,
"line_max": 78,
"alpha_frac": 0.7004373178,
"autogenerated": false,
"ratio": 3.8217270194986073,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5022164337298607,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dash'
import os
import numpy as np
from PIL import Image
# from keras.preprocessing.sequence import pad_sequences
from collections import Counter
import cPickle
import random
class BucketData(object):
def __init__(self):
self.max_width = 0
self.max_label_len = 0
self.data_list = []
self.label_list = []
self.file_list = []
def append(self, datum, label, filename) :
self.data_list.append(datum)
self.label_list.append(label)
self.file_list.append(filename)
self.max_width = max(datum.shape[-1], self.max_width)
self.max_label_len = max(len(label), self.max_label_len)
return len(self.data_list)
def flush_out(self, bucket_specs, valid_target_length=float('inf'),
go_shift=1):
# print self.max_width, self.max_label_len
res = dict(bucket_id=None,
data=None, zero_paddings=None, encoder_mask=None,
decoder_inputs=None, target_weights=None, real_len=None)
# yaad karke change to 16 later
def get_bucket_id():
for idx in range(0, len(bucket_specs)):
if bucket_specs[idx][0] >= self.max_width / 16 - 1 \
and bucket_specs[idx][1] >= self.max_label_len:
return idx
return None
res['bucket_id'] = get_bucket_id()
if res['bucket_id'] is None:
self.data_list, self.label_list = [], []
self.max_width, self.max_label_len = 0, 0
return None
encoder_input_len, decoder_input_len = bucket_specs[res['bucket_id']]
# ENCODER PART
res['data'] = np.array(self.data_list)
real_len = max(self.max_width / 16 - 1, 0)
padd_len = encoder_input_len - real_len
res['zero_paddings'] = np.zeros([len(self.data_list), padd_len, 512],
dtype=np.float32)
encoder_mask = np.concatenate(
(np.ones([len(self.data_list), real_len], dtype=np.float32),
np.zeros([len(self.data_list), padd_len], dtype=np.float32)),
axis=1)
res['encoder_mask'] = [a[:, np.newaxis] for a in encoder_mask.T] # 32, (100, )
res['real_len'] = self.max_width
# DECODER PART
target_weights = []
for l_idx in range(len(self.label_list)):
label_len = len(self.label_list[l_idx])
if label_len <= decoder_input_len:
self.label_list[l_idx] = np.concatenate((
self.label_list[l_idx],
np.zeros(decoder_input_len - label_len, dtype=np.int32)))
one_mask_len = min(label_len - go_shift, valid_target_length)
target_weights.append(np.concatenate((
np.ones(one_mask_len, dtype=np.float32),
np.zeros(decoder_input_len - one_mask_len,
dtype=np.float32))))
else:
raise NotImplementedError
# self.label_list[l_idx] = \
# self.label_list[l_idx][:decoder_input_len]
# target_weights.append([1]*decoder_input_len)
res['decoder_inputs'] = [a.astype(np.int32) for a in
np.array(self.label_list).T]
res['target_weights'] = [a.astype(np.float32) for a in
np.array(target_weights).T]
#print (res['decoder_inputs'][0])
#assert False
assert len(res['decoder_inputs']) == len(res['target_weights'])
res['filenames'] = self.file_list
self.data_list, self.label_list, self.file_list = [], [], []
self.max_width, self.max_label_len = 0, 0
return res
def __len__(self):
return len(self.data_list)
def __iadd__(self, other):
self.data_list += other.data_list
self.label_list += other.label_list
self.max_label_len = max(self.max_label_len, other.max_label_len)
self.max_width = max(self.max_width, other.max_width)
def __add__(self, other):
res = BucketData()
res.data_list = self.data_list + other.data_list
res.label_list = self.label_list + other.label_list
res.max_width = max(self.max_width, other.max_width)
res.max_label_len = max((self.max_label_len, other.max_label_len))
return res
| {
"repo_name": "dashayushman/air-script",
"path": "src/data_util/bucketdata.py",
"copies": "1",
"size": "4424",
"license": "mit",
"hash": -5169069644023132000,
"line_mean": 37.8070175439,
"line_max": 87,
"alpha_frac": 0.5495027125,
"autogenerated": false,
"ratio": 3.533546325878594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9578579551476296,
"avg_score": 0.0008938973804595306,
"num_lines": 114
} |
__author__ = 'dat'
'''
generate descriptors from a protein in pdb format and a directory of ligands in mol2 format
'''
import os
import glob
import sys
import csv
import logging
from optparse import OptionParser
from rfscore.config import logger
from rfscore.credo import contacts
from rfscore.ob import get_molecule
def parse_options():
'''
'''
# PARSE COMMAND LINE
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--debug",
action = "store_true",
dest = "debug",
default = False,
help = 'Set logging level to debug and print more verbose output.')
parser.add_option("-B", "--binsize",
dest = "binsize",
type = float,
default = 2,
help = "Bin size (in Angstrom) to use for binning contacts based on inter-atomic distance.")
parser.add_option("-F", "--format",
dest = "format",
default = 'csv',
help = "Format to use for writing the SIFt of the protein-ligand complex.")
parser.add_option("-O", "--output",
dest = "output",
default = "/home/dat/WORK/DB/Inna/hm_2esm_all329_elements_c12b2.csv",#None,
help = "File to which the data will be written (default=STDOUT).")
parser.add_option("-L", "--inputligdir",
dest = "inputligdir",
default = "/home/dat/WORK/DB/Inna/201308_all329",
help = "Input directory of ligands in mol2 format (default=/home/).")
parser.add_option("-P", "--inputpdb",
dest = "inputpdb",
default = "/home/dat/WORK/DB/Inna/hm_2esm.pdb",
help = "Input of protein in pdb format (default=/home/).")
parser.add_option("-D", "--descriptor",
dest = "descriptor",
default = 'elements',
help = "Descriptor to use. Valid descriptors are 'credo', 'elements' and 'sybyl'.")
# GET COMMAND LINE OPTIONS
(options, args) = parser.parse_args()
if options.descriptor not in ('elements', 'credo', 'sybyl'):
logger.fatal("Invalid descriptor: {0}.".format(options.descriptor))
parser.print_help()
sys.exit(1)
return options
def main():
'''
'''
options = parse_options()
# THIS OPTION WILL PRODUCE MORE VERBOSE OUTPUT
if options.debug: logger.setLevel(logging.DEBUG)
if options.output: fh = open(options.output,'wb')
else: fh = sys.stdout
# CHOOSE HOW THE OUPTPUT DATA WILL BE WRITTEN
if options.format == 'csv':
writer = csv.writer(fh, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
HEADER = True
# iterate through all mol2 files in inputdir
for mol2file in glob.glob(os.path.join(options.inputligdir, '*.mol2')):
#lig_path = os.path.join(option.inputdir, file)
lig_path = mol2file
protein_path = options.inputpdb
if not os.path.isfile(protein_path):
logger.fatal("The protein file does not exist.".format(options.inputpdb))
sys.exit(1)
protein = get_molecule(protein_path)
ligand = get_molecule(lig_path)
# calculate descriptor based on the sum of interacting element pairs
if options.descriptor == 'elements':
# calculate element pair descriptor for this complex
descriptor, labels = contacts.element_descriptor(protein, ligand,
binsize=options.binsize)
# calculate descriptor based on the sum of interacting element pairs
elif options.descriptor == 'sybyl':
# calculate element pair descriptor for this complex
descriptor, labels = contacts.sybyl_atom_type_descriptor(protein, ligand,
binsize=options.binsize)
# calculate descriptor using structural interaction fingerprints
elif options.descriptor == 'credo':
# get the protein-ligand structural interaction fingerprint
descriptor, labels = contacts.sift_descriptor(protein, ligand,
binsize=options.binsize)
if HEADER:
# UPDATE COLUMN LABELS
labels.append('ligand')
writer.writerow(labels)
HEADER = False
if options.format == 'csv':
#ligandname = "\"" + os.path.basename(lig_path).split('.')[0] + "\""
ligandname = os.path.basename(lig_path).split('.')[0]
#print(ligandname)
# FIRST COLUMN OF OUTPUT ROW
row = descriptor.tolist() + [ligandname]
writer.writerow(row)
main()
| {
"repo_name": "mrknight/Py_ML-scoring",
"path": "bin/generate.desc.dir.py",
"copies": "1",
"size": "5026",
"license": "mit",
"hash": 7966719107200612000,
"line_mean": 34.3943661972,
"line_max": 117,
"alpha_frac": 0.5527258257,
"autogenerated": false,
"ratio": 4.259322033898305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5312047859598305,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dat'
'''
generate descriptors from a protein in pdb format and a directory of ligands in mol2 format
'''
import os
import re
import sys
import csv
import logging
from math import log10
from operator import itemgetter
from optparse import OptionParser
from rfscore.config import config, logger
from rfscore.credo import contacts
from rfscore.ob import get_molecule
def parse_options():
'''
'''
# PARSE COMMAND LINE
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--debug",
action = "store_true",
dest = "debug",
default = False,
help = 'Set logging level to debug and print more verbose output.')
parser.add_option("-B", "--binsize",
dest = "binsize",
type = float,
default = 0.0,
help = "Bin size (in Angstrom) to use for binning contacts based on inter-atomic distance.")
parser.add_option("-F", "--format",
dest = "format",
default = 'csv',
help = "Format to use for writing the SIFt of the protein-ligand complex.")
parser.add_option("-O", "--output",
dest = "output",
default = None,
help = "File to which the data will be written (default=STDOUT).")
parser.add_option("-D", "--descriptor",
dest = "descriptor",
default = 'elements',
help = "Descriptor to use. Valid descriptors are 'credo', 'elements' and 'sybyl'.")
# GET COMMAND LINE OPTIONS
(options, args) = parser.parse_args()
if options.descriptor not in ('elements', 'credo', 'sybyl'):
logger.fatal("Invalid descriptor: {0}.".format(options.descriptor))
parser.print_help()
sys.exit(1)
return options
def main():
'''
'''
options = parse_options()
# THIS OPTION WILL PRODUCE MORE VERBOSE OUTPUT
if options.debug: logger.setLevel(logging.DEBUG)
pdbbindconf = config['standard']
data = parse_index(options.pdbbind, options.index)
if options.output: fh = open(options.output,'wb')
else: fh = sys.stdout
# CHOOSE HOW THE OUPTPUT DATA WILL BE WRITTEN
if options.format == 'csv':
writer = csv.writer(fh, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
HEADER = True
# ITERATE THROUGH ALL PROTEIN-LIGAND COMPLEXES
for pdb in data:
# NORMALISE ACTIVITY TO NANOMOLAR
pkd = get_pkd(float(data[pdb]['value']), data[pdb]['unit'])
# THE PDBBIND DIRECTORY CONTAINING ALL THE STRUCTURES FOR THIS PDB ENTRY
entry_dir = os.path.join(options.pdbbind,pdb)
# CHECK IF THE DIRECTORY ACTUALLY EXISTS
if not os.path.exists(entry_dir):
logger.error("The PDBbind directory for PDB entry {0} does not exist.".format(pdb))
continue
# CREATE THE PATHS TO THE PROTEIN AND LIGAND USING THE SPECIFIC _<POCKET,PROTEIN,LIGAND,ZINC> LABEL
prot_path = os.path.join(entry_dir,'{0}_{1}.pdb'.format(pdb,pdbbindconf['protein']))
lig_path = os.path.join(entry_dir,'{0}_{1}.mol2'.format(pdb,pdbbindconf['ligand']))
if not os.path.exists(prot_path):
logger.error("The protein pocket structure for PDB entry {0} cannot be found.".format(pdb))
continue
elif not os.path.exists(lig_path):
logger.error("The ligand structure for PDB entry {0} cannot be found.".format(pdb))
continue
protein = get_molecule(prot_path)
ligand = get_molecule(lig_path)
# CALCULATE DESCRIPTOR USING STRUCTURAL INTERACTION FINGERPRINTS
if options.descriptor == 'credo':
# GET THE PROTEIN-LIGAND STRUCTURAL INTERACTION FINGERPRINT
descriptor, labels = contacts.sift_descriptor(protein, ligand, binsize=options.binsize)
# CALCULATE DESCRIPTOR BASED ON THE SUM OF INTERACTING ELEMENT PAIRS
elif options.descriptor == 'elements':
# CALCULATE ELEMENT PAIR DESCRIPTOR FOR THIS COMPLEX
descriptor, labels = contacts.element_descriptor(protein, ligand, binsize=options.binsize)
# CALCULATE DESCRIPTOR BASED ON THE SUM OF INTERACTING ELEMENT PAIRS
elif options.descriptor == 'sybyl':
# CALCULATE ELEMENT PAIR DESCRIPTOR FOR THIS COMPLEX
descriptor, labels = contacts.sybyl_atom_type_descriptor(protein, ligand, binsize=options.binsize)
if HEADER:
# UPDATE COLUMN LABELS
labels.insert(0,'pKd/pKi')
labels.append('pdb')
writer.writerow(labels)
HEADER = False
if options.format == 'csv':
# KEEP ONLY THE TWO MOST SIGNIFICANT BITS
pkdstring = "{0:.2f}".format(pkd)
# FIRST COLUMN OF OUTPUT ROW
row = [pkdstring] + descriptor.tolist() + [pdb]
writer.writerow(row)
main()
| {
"repo_name": "mrknight/Py_ML-scoring",
"path": "bin/generate.desc.py",
"copies": "1",
"size": "5132",
"license": "mit",
"hash": 944375666518962800,
"line_mean": 32.7631578947,
"line_max": 117,
"alpha_frac": 0.597817615,
"autogenerated": false,
"ratio": 3.870286576168929,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9882168702506935,
"avg_score": 0.01718709773239886,
"num_lines": 152
} |
__author__ = 'dat'
import os
import re
import sys
import csv
import logging
from libRMSD import *
from operator import itemgetter
from optparse import OptionParser
#from config import config, logger
#from credo import contacts
#from ob import get_molecule
from rfscore.config import config, logger
from rfscore.credo import contacts
from rfscore.ob import get_molecule
def parse_options():
'''
'''
# PARSE COMMAND LINE
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--debug",
action = "store_true",
dest = "debug",
default = False,
help = 'Set logging level to debug and print more verbose output.')
parser.add_option("-B", "--binsize",
dest = "binsize",
type = float,
default = 0.0,
help = "Bin size (in Angstrom) to use for binning contacts based on inter-atomic distance.")
parser.add_option("-F", "--format",
dest = "format",
default = 'csv',
help = "Format to use for writing the SIFt of the protein-ligand complex.")
parser.add_option("-O", "--output",
dest = "output",
default = "/home/dat/WORK/DB/DESCRIPTORS/CASF2014-refined_SIFt_RMSD.csv",#None,
help = "File to which the data will be written (default=STDOUT).")
parser.add_option("-P", "--pdbbind-dir",
dest = "pdbbind",
default = "/home/dat/WORK/DB/PDBbind/v2014-refined/",#None,
help = "PDBbind directory.")
parser.add_option("-I", "--index",
dest = "index",
default = "/home/dat/WORK/DB/PDBbind/v2014-refined/INDEX_refined_data.2014",#None,
help = "PDBbind data index file for a specific data set (core,refined,general).")
parser.add_option("-D", "--descriptor",
dest = "descriptor",
default = 'credo',
help = "Descriptor to use. Valid descriptors are 'credo', 'elements' and 'sybyl'.")
# GET COMMAND LINE OPTIONS
(options, args) = parser.parse_args()
if not options.pdbbind:
logger.error("The PDBbind directory must be provided.")
parser.print_help()
sys.exit(1)
elif not os.path.exists(options.pdbbind):
logger.fatal("The specified PDBbind directory does not exist.")
sys.exit(1)
if not options.index:
logger.error("A path to a PDBbind data index file must be provided.")
parser.print_help()
sys.exit(1)
elif not os.path.exists(options.index):
logger.fatal("The specified PDBbind data index file does not exist.")
sys.exit(1)
if options.descriptor not in ('elements', 'credo', 'sybyl'):
logger.fatal("Invalid descriptor: {0}.".format(options.descriptor))
parser.print_help()
sys.exit(1)
return options
def parse_index(path, index):
'''
'''
regexp = r"""^
(?P<pdb>\w{4})\s+
(?P<resolution>\d[.]\d{2}|NMR)\s+
(?P<year>\d{4})\s+
(?P<pKx>\d{1,2}[.]\d{2})\s+
(?P<type>\w{2,4})
(?P<relation>[<>=~]{1,2})
(?P<value>\d+[.]\d+|\d+)
(?P<unit>\w{2}).+"""
pattern = re.compile(regexp, re.VERBOSE)
data = {}
for line in open(index):
if not line.startswith('#'):
match = pattern.match(line)
# PRINT A WARNING IF REGULAR EXPRESSION FAILED ON A LINE
if not match:
logger.warn("Could not parse line: {0}".format(line))
continue
rowdata = match.groupdict()
pdb = rowdata.pop('pdb')
data[pdb] = rowdata
return data
def listFiles(path, pattern):
if not os.path.exists(path):
print(path + " not exists")
return []
match = [x for x in os.listdir(path) if x.find(pattern) > -1]
return (match)
dockingMethods = ['asp', 'plp', 'chemscore']#, 'goldscore']
posesDir = "/home/dat/WORK/output/v2014-refined/"
def main():
'''
'''
options = parse_options()
# THIS OPTION WILL PRODUCE MORE VERBOSE OUTPUT
if options.debug: logger.setLevel(logging.DEBUG)
pdbbindconf = config['pdbbind']
data = parse_index(options.pdbbind, options.index)
if options.output: fh = open(options.output,'wb')
else: fh = sys.stdout
# CHOOSE HOW THE OUPTPUT DATA WILL BE WRITTEN
if options.format == 'csv':
writer = csv.writer(fh, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
HEADER = True
counter = 0
# ITERATE THROUGH ALL PROTEIN-LIGAND COMPLEXES
for pdb in data:
# THE PDBBIND DIRECTORY CONTAINING ALL THE STRUCTURES FOR THIS PDB ENTRY
entry_dir = os.path.join(options.pdbbind,pdb)
# CHECK IF THE DIRECTORY ACTUALLY EXISTS
if not os.path.exists(entry_dir):
logger.error("The PDBbind directory for PDB entry {0} does not exist.".format(pdb))
continue
# CREATE THE PATHS TO THE PROTEIN AND LIGAND USING THE SPECIFIC _<POCKET,PROTEIN,LIGAND,ZINC> LABEL
prot_path = os.path.join(entry_dir,'{0}_{1}.pdb'.format(pdb,pdbbindconf['protein']))
ref_lig_path = os.path.join(entry_dir,'{0}_{1}.mol2'.format(pdb,pdbbindconf['ligand']))
#for each protein, the ligand gets generated docking poses from x docking methods,
#
if not os.path.exists(prot_path):
logger.error("The protein pocket structure for PDB entry {0} cannot be found.".format(pdb))
continue
for score in dockingMethods:
pose_path = os.path.join(posesDir, score, pdb)
# \TODO: add pattern for each docking method, right now only works with gold
lig_pattern = "gold_soln"
# RMSD dict for all poses
counter = counter + 1
print("Calculating RMSDs for ligand " + pdb + ", docking method " + score)
RMSDs = calcRMSDPoses(ref_lig_path, pose_path, lig_pattern)
for pose in listFiles(pose_path, lig_pattern):
lig_path = os.path.join(posesDir, score, pdb, pose)
poseRMSD = RMSDs[pose]
poseID = pose.split('.')[0] + '_' + score
if not os.path.exists(lig_path):
logger.error("The ligand structure for PDB entry {0} cannot be found.".format(pdb))
continue
protein = get_molecule(prot_path)
ligand = get_molecule(lig_path)
# CALCULATE DESCRIPTOR USING STRUCTURAL INTERACTION FINGERPRINTS
if options.descriptor == 'credo':
# GET THE PROTEIN-LIGAND STRUCTURAL INTERACTION FINGERPRINT
descriptor, labels = contacts.sift_descriptor(protein, ligand, binsize=options.binsize)
# CALCULATE DESCRIPTOR BASED ON THE SUM OF INTERACTING ELEMENT PAIRS
elif options.descriptor == 'elements':
# CALCULATE ELEMENT PAIR DESCRIPTOR FOR THIS COMPLEX
descriptor, labels = contacts.element_descriptor(protein, ligand, binsize=options.binsize)
# CALCULATE DESCRIPTOR BASED ON THE SUM OF INTERACTING ELEMENT PAIRS
elif options.descriptor == 'sybyl':
# CALCULATE ELEMENT PAIR DESCRIPTOR FOR THIS COMPLEX
descriptor, labels = contacts.sybyl_atom_type_descriptor(protein, ligand, binsize=options.binsize)
if HEADER:
# UPDATE COLUMN LABELS
labels.insert(0,'RMSD')
labels.append('ligandID')
writer.writerow(labels)
HEADER = False
if options.format == 'csv':
# KEEP ONLY THE TWO MOST SIGNIFICANT BITS
#pkdstring = "{0:.2f}".format(pkd)
# FIRST COLUMN OF OUTPUT ROW
row = [poseRMSD] + descriptor.tolist() + [poseID]
writer.writerow(row)
main()
| {
"repo_name": "mrknight/Py_ML-scoring",
"path": "bin/generate.CASF.poses.py",
"copies": "1",
"size": "8388",
"license": "mit",
"hash": 9171349089458772000,
"line_mean": 34.0962343096,
"line_max": 118,
"alpha_frac": 0.5577014783,
"autogenerated": false,
"ratio": 3.8389016018306634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9828552133352753,
"avg_score": 0.013610189355582126,
"num_lines": 239
} |
__author__ = 'dat'
import subprocess
import os
import csv
TMP_FILE = '/home/dat/rmsd.tmp'
def calcRMSD(refLigand, calcLigand):
f = open(TMP_FILE, "w")
run_cmd = "rms_analysis " + refLigand + " " + calcLigand
subprocess.call(run_cmd.split(), stdout=f)
def parseRMSDoutput(outputFile = TMP_FILE):
FILE = open(outputFile, 'r')
for line in FILE:
if line.find('Distance') > -1:
pass
#break # found the identified line
#line = FILE.readline() # read the next line
return(line.split()[0])
# return a list of RMSDs for all poses in poseDir, pose files are assumed to start with prefix gold_soln
# \TODO: change the prefix
def calcRMSDPoses(refLigand, poseDir, pattern = "gold_soln"):
RMSDs = {}
for ligand in os.listdir(poseDir):
if ligand.startswith(pattern):
rmsd = calcRMSD(refLigand, os.path.join(poseDir, ligand))
RMSDs[ligand] = parseRMSDoutput()
return (RMSDs)
#
def writeRMSD2CSV(RMSDs, output):
FILE = open(output, 'w')
CSV = csv.writer(FILE, delimiter=',')
# write the csv header
CSV.writerow(["ID", "RMSDs"])
for ligandID in RMSDs.keys():
entry = [ligandID] + [RMSDs[ligandID]]
CSV.writerow(entry)
FILE.close()
| {
"repo_name": "mrknight/Py_ML-scoring",
"path": "bin/libRMSD.py",
"copies": "1",
"size": "1273",
"license": "mit",
"hash": 6409745624911895000,
"line_mean": 28.6046511628,
"line_max": 104,
"alpha_frac": 0.6276512176,
"autogenerated": false,
"ratio": 2.9742990654205608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41019502830205606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'davburge'
import collections
import Tkinter as tk
shipClass = None
shipMods = None
damageType = None
resistType = None
focusSkill = None
focusLevel = None
classSkill = None
classLevel = None
subSkill_1 = None
subSkill_2 = None
subSkill_3 = None
subskill_1Level = None
subskill_2Level = None
subskill_3Level = None
augTweakLevel = None
impTweakLevel = None
shieldBank = None
shieldCharge = None
energyBank = None
energyCharge = None
hull = None
speed = None
damage = None
RoF = None
range = None
vis = None
baseInputs = None
augNumber = None
augs = None
def setup():
global shipClass
shipClass = tk.StringVar()
global shipMods
shipMods = collections.OrderedDict([
('shieldBank', tk.StringVar()),
('shieldCharge', tk.StringVar()),
('energyBank', tk.StringVar()),
('energyCharge', tk.StringVar()),
('hull', tk.StringVar()),
('speed', tk.StringVar()),
('damage', tk.StringVar()),
('RoF', tk.StringVar()),
('range', tk.StringVar()),
('vis', tk.StringVar()),
('resist', tk.StringVar()),
('elecTemp', tk.StringVar()),
('inbuiltElec', tk.StringVar()),
])
global damageType
damageType = tk.StringVar()
global resistType
resistType = tk.StringVar()
global focusSkill
focusSkill = tk.StringVar()
global focusLevel
focusLevel = tk.StringVar()
global classSkill
classSkill = tk.StringVar()
global classLevel
classLevel = tk.StringVar()
global subSkill_1
subSkill_1 = tk.StringVar()
global subSkill_2
subSkill_2 = tk.StringVar()
global subSkill_3
subSkill_3 = tk.StringVar()
global subskill_1Level
subskill_1Level = tk.StringVar()
global subskill_2Level
subskill_2Level = tk.StringVar()
global subskill_3Level
subskill_3Level = tk.StringVar()
global augTweakLevel
augTweakLevel = tk.StringVar()
global impTweakLevel
impTweakLevel = tk.StringVar()
shieldBank = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
shieldCharge = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
energyBank = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
energyCharge = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
hull = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
speed = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
damage = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
RoF = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
range = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
vis = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
resist = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
elecTemp = {
'initial': tk.StringVar(),
'bonus': tk.StringVar(),
'overall': tk.StringVar(),
}
global baseInputs
baseInputs = collections.OrderedDict([
('shieldBank', shieldBank),
('shieldCharge', shieldCharge),
('energyBank', energyBank),
('energyCharge', energyCharge),
('hull', hull),
('speed', speed),
('damage', damage),
('RoF', RoF),
('range', range),
('vis', vis),
('resist', resist),
('elecTemp', elecTemp),
])
global augNumber
augNumber = tk.StringVar()
global augs
augs = []
setDefaults()
def setDefaults():
global shipClass
shipClass.set('lfi')
global damageType
damageType.set('Energy')
global resistType
resistType.set('Energy')
global focusSkill
focusSkill.set('combat_focus')
global classSkill
classSkill.set('berserker')
global subSkill_1
subSkill_1.set('0')
global subSkill_2
subSkill_2.set('0')
global subSkill_3
subSkill_3.set('0')
global augTweakLevel
augTweakLevel.set('0')
global impTweakLevel
impTweakLevel.set('0')
global augNumber
augNumber.set('0')
class augmenter(collections.OrderedDict):
def __init__(self):
collections.OrderedDict.__init__(self)
self.update([
('name', tk.StringVar()),
('shieldBank', tk.StringVar()),
('shieldCharge', tk.StringVar()),
('energyBank', tk.StringVar()),
('energyCharge', tk.StringVar()),
('hull', tk.StringVar()),
('speed', tk.StringVar()),
('damage', tk.StringVar()),
('RoF', tk.StringVar()),
('range', tk.StringVar()),
('vis', tk.StringVar()),
('resist', tk.StringVar()),
('elecTemp', tk.StringVar()),
]) | {
"repo_name": "dburgess560/sscalc",
"path": "ss_inputs.py",
"copies": "1",
"size": "5513",
"license": "apache-2.0",
"hash": 8886802175397620000,
"line_mean": 21.9782608696,
"line_max": 46,
"alpha_frac": 0.5405405405,
"autogenerated": false,
"ratio": 3.6389438943894388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4679484434889439,
"avg_score": null,
"num_lines": null
} |
__author__ = 'davburge'
import collections
ships = {
'lfi': "Light Fighter",
'hfi': "Heavy Fighter",
'sfr': "Support Freighter",
'ifr': "Industrial Freighter",
'cap': "Capital Ship",
'all': "All",
}
skill_tree = {
'combat_focus': {
'name': "Combat Focus",
'berserker': {
'name': "Berserker",
'impervious_armor': "Impervious Armor",
'ordinance_supremacy': "Ordinance Supremacy",
'arsenal_expertise': "Arsenal Expertise"
},
'sniper': {
'name': "Sniper",
'stalking': "Stalking",
'sharpshooting': "Sharpshooting",
'efficiency': "Efficiency"
}
},
'recon_focus': {
'name': "Recon Focus",
'speed_demon': {
'name': "Speed Demon",
'speedy_movement': "Speedy Movement",
'speedy_firing': "Speedy Firing",
'dogfighting': "Dogfighting",
},
'seer': {
'name': "Seer",
'psionic_shrouding': "Psionic Shrouding",
'psychic_sight': "Psychic Sight",
'shadow_ambush': "Shadow Ambush",
}
},
'support_focus': {
'name': "Support Focus",
'shield_monkey': {
'name': "Shield Monkey",
'shield_boosting': "Shield Boosting",
'shield_manipulation': "Shield Manipulation",
'shield_transference': "Shield Transference",
},
'engineer': {
'name': "Engineer",
'drone_mastery': "Drone Mastery",
'beam_mastery': "Beam Mastery",
'damage_control': "Damage Control",
}
},
'fleet_focus': {
'name': "Fleet Focus",
'fleet_commander': {
'name': "Fleet Commander",
'slave_mastery': "Slave Mastery",
'radiation_expert': "Radiation Expert",
'flight_controller': "Flight Controller"
},
'gunner': {
'name': "Gunner",
'big_guns': "Big Guns",
'destruction': "Destruction",
'missile_mastery': "Missile Mastery",
}
},
}
calculatedStats = [
'shieldBank',
'shieldCharge',
'energyBank',
'energyCharge',
'hull',
'speed',
'damage',
'RoF',
'range',
'vis',
'elecTemp',
'resist',
]
statNames = {
'name': "Name:",
'shieldBank': "Shield:",
'shieldCharge': "Shield Charge:",
'energyBank': "Energy:",
'energyCharge': "Energy Charge:",
'hull': "Hull:",
'speed': "Speed:",
'damage': "Damage:",
'RoF': "Rate of Fire:",
'range': "Range:",
'vis': "Vis:",
'elecTemp': "Elec Tempering:",
'resist': "Resistance:",
'inbuiltElec': "Inbuilt Elec:",
'damageType': "Damage Type:",
'resistType': "Resist Type:",
}
#This dictionary is reversed item: key order due to the way the menus work for choosing resists/damage types
elementTypes = collections.OrderedDict([
("Energy", 'energy'),
("Laser", 'laser'),
("Heat", 'heat'),
("Physical", 'phys'),
("Radiation", 'rad'),
("Surgical", 'surg'),
("Mining", 'mine'),
("Transference", 'trans'),
])
shipHelp = "This includes hull inbuilt bonus (As listed on the ship tab when buying a ship) as well as " \
"any inbuilt bonuses in items (OL and Diffusers too) that do not have the star label (*). " \
"Add all of the same stat bonuses together.\n\ne.g. if you have a cloak with +5% speed and a " \
"shield with +35% speed, enter in +40%.\n\nFor negative bonuses, make sure to do " \
"Bonus/(1+Bonus) before adding to other bonuses. Sorry for the inconvenience." | {
"repo_name": "dburgess560/sscalc",
"path": "ss_constants.py",
"copies": "1",
"size": "3857",
"license": "apache-2.0",
"hash": -1578861088436656000,
"line_mean": 28.3858267717,
"line_max": 111,
"alpha_frac": 0.5040186674,
"autogenerated": false,
"ratio": 3.40423654015887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44082552075588705,
"avg_score": null,
"num_lines": null
} |
__author__ = 'davburge'
import re
percentRegex = re.compile('(^[-+]?[0]?[.]{1}[\d]*|^[-+]?[123456789]{1,}[\d]*[.]?[\d]*|^[-+0]{1}|^[-+]?[0]?)\Z')
# Floats with +- signs, no leading 0 unless +-0.### or 0.###
bankRegex = re.compile('(^[123456789]{1,}[\d]*)\Z')
# Only ints, no leading 0
decimalRegex = re.compile('(^[123456789]{1,}[\d]*[.]?[\d]*|^[0]?[.]{1}[\d]*|^[0]{1})\Z')
# Floats, no leading 0 unless 0.###
auraRegex = re.compile('([-+]?[0]?[.]{1}[\d]*[%]?|[-+]?[123456789]{1,}[\d]*[.]?[\d]*[%]?|[-+]?[0]?)\Z')
# + or - floats that may be percentages
def shipStatValidate(action, index, value_if_allowed, prior_value, text):
if action == '0': # If Delete then allow
return True
if bankRegex.match(value_if_allowed) is not None:
return True
else:
return False
def shipDecimalValidate(action, index, value_if_allowed, prior_value, text):
if action == '0': # If Delete then allow
return True
if decimalRegex.match(value_if_allowed) is not None:
return True
else:
return False
# def validate(self, action, index, value_if_allowed,
# prior_value, text, validation_type, trigger_type, widget_name):
def bonusStatValidate(action, index, value_if_allowed, prior_value, text):
if action == '0': # If Delete then allow
return True
if percentRegex.match(value_if_allowed) is not None:
return True
else:
return False
def auraValidate(action, index, value_if_allowed, prior_value, text):
if action == '0': # If Delete then allow
return True
if auraRegex.match(value_if_allowed) is not None:
return True
else:
return False
| {
"repo_name": "dburgess560/sscalc",
"path": "ss_validators.py",
"copies": "1",
"size": "1725",
"license": "apache-2.0",
"hash": 1952602333283602000,
"line_mean": 36.3333333333,
"line_max": 111,
"alpha_frac": 0.5779710145,
"autogenerated": false,
"ratio": 3.218283582089552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4296254596589552,
"avg_score": null,
"num_lines": null
} |
__author__ = 'davburge'
import ss_constants
import ss_inputs
import ss_math
import ss_validators
import Tkinter as tk
from Tkconstants import *
class Application(tk.Frame):
def __init__(self, master=None):
'''Main frame of the application'''
tk.Frame.__init__(self, master)
self.master = master
self.grid(column=0, row=0, sticky=(N, W, E, S))
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# Ensures that tk variables are setup before use
ss_inputs.setup()
# These two Widgets get deleted and reformed, start as None to prevent errors
self.classWidget = None
self.subskillWidget = None
self.createWidgets()
def createWidgets(self):
'''Builds gui widgets'''
self.buildShipClass()
self.buildSkillDisplay()
self.buildStatDisplay()
self.buildAugDisplay()
self.buildModDisplay()
self.buildCalcButton()
self.buildQuitButton()
def buildShipClass(self):
'''Builds the ship class radiobuttons and edit ship button'''
shipClassLabel = tk.LabelFrame(self.master, text='Ship Type')
shipClassLabel.grid(column=0, row=0, rowspan=2, sticky=N+S, padx=2)
for key, value in ss_constants.ships.items():
if key != 'all':
radiobutton = tk.Radiobutton(shipClassLabel, text=value, variable=ss_inputs.shipClass, value=key)
if key == 'lfi':
i=0
elif key == 'hfi':
i=1
elif key == 'sfr':
i=2
elif key == 'ifr':
i=3
elif key == 'cap':
i=4
else:
i=-1
radiobutton.grid(column=0, row=i, sticky=W)
shipEditButton = tk.Button(shipClassLabel, text="Edit Ship Mods", command=self.buildShipEditDisplay)
shipEditButton.grid(column=0, row=5)
def buildShipEditDisplay(self):
'''Builds edit ship popup'''
if hasattr(self, 'shipEditWindow') and self.shipEditWindow is not None:
self.shipEditWindow.deiconify()
else:
self.shipEditWindow = tk.Toplevel(self)
self.shipEditWindow.resizable(0,0)
self.shipEditWindow.title("Ship")
self.shipEditWindow.protocol("WM_DELETE_WINDOW", self.shipEditWindow.withdraw)
inbuiltStatsLabel = tk.LabelFrame(self.shipEditWindow, text="Ship Inbuilt Stats")
inbuiltStatsLabel.grid(column=0, row=0, columnspan=2, padx=1, sticky=W+E)
vcmd_all = (self.shipEditWindow.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
i=0
for key, value in ss_inputs.shipMods.items():
# Uses a percent sign for all built inbuilt elec charge
if key == 'inbuiltElec':
symbol = '/sec'
vcmd = (self.shipEditWindow.register(ss_validators.shipStatValidate),
'%d', '%i', '%P', '%s', '%S')
else:
symbol = '%'
vcmd = vcmd_all
statLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames[key], padx=3)
statLabel.grid(column=0, row=i, sticky=E)
statEntry = tk.Entry(inbuiltStatsLabel, width=6, justify=RIGHT, textvariable=value,
validate='key', validatecommand=vcmd)
statEntry.grid(column=1, row=i)
percentLabel = tk.Label(inbuiltStatsLabel, text=symbol, padx=3)
percentLabel.grid(column=2, row=i)
i+=1
damageLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames['damageType'], padx=3)
damageLabel.grid(column=0, row=i, sticky=E)
damageMenubutton = tk.Menubutton(inbuiltStatsLabel, textvariable=ss_inputs.damageType, relief=RAISED)
damageMenubutton.grid(column=1, columnspan=2, row=i)
damageMenubutton.menu = tk.Menu(damageMenubutton, tearoff=0)
damageMenubutton['menu'] = damageMenubutton.menu
for key, value in ss_constants.elementTypes.items():
damageMenubutton.menu.add_radiobutton(label=key, variable=ss_inputs.damageType)
i+=1
resistLabel = tk.Label(inbuiltStatsLabel, text=ss_constants.statNames['resistType'], padx=3)
resistLabel.grid(column=0, row=i, sticky=E)
resistMenubutton = tk.Menubutton(inbuiltStatsLabel, textvariable=ss_inputs.resistType, relief=RAISED)
resistMenubutton.grid(column=1, columnspan=2, row=i)
resistMenubutton.menu = tk.Menu(resistMenubutton, tearoff=0)
resistMenubutton['menu'] = resistMenubutton.menu
for key, item in ss_constants.elementTypes.items():
resistMenubutton.menu.add_radiobutton(label=key, variable=ss_inputs.resistType)
i+=1
helpButton = tk.Button(self.shipEditWindow, text="Help", command=self.shipHelp)
helpButton.grid(column=0, row=i)
updateButton = tk.Button(self.shipEditWindow, text="Update and Close", command=self.shipEditWindow.withdraw)
updateButton.grid(column=1, row=i, pady=2)
def shipHelp(self):
'''Builds help popup for ship edit popup'''
if hasattr(self, 'helpWindow') and self.helpWindow is not None:
self.helpWindow.deiconify()
else:
self.helpWindow = tk.Toplevel(self)
self.helpWindow.resizable(0,0)
self.helpWindow.title("Help")
self.helpWindow.protocol("WM_DELETE_WINDOW", self.helpWindow.withdraw)
helpMessage = tk.Message(self.helpWindow, text=ss_constants.shipHelp, justify=CENTER)
helpMessage.grid(column=0, row=0)
def buildSkillDisplay(self):
'''Builds the skill display section'''
self.skillDisplayLabel = tk.LabelFrame(self.master, text='Skills')
self.skillDisplayLabel.grid(column=1, row=0, columnspan=3, rowspan=2, sticky=N, padx=2)
self.buildFocusSkill()
self.setDefaultClass()
self.buildSubskills()
self.buildMiscSkills()
def buildFocusSkill(self):
'''Builds the focus skill radiobuttons and level entry'''
focusLabel = tk.LabelFrame(self.skillDisplayLabel, text='Focus Skill')
focusLabel.grid(column=0, row=0, sticky=N+S)
for key, value in ss_constants.skill_tree.items():
radiobutton = tk.Radiobutton(focusLabel, text=value['name'], variable=ss_inputs.focusSkill,
value=key, command=self.setDefaultClass)
if key == 'combat_focus':
i=0
elif key =='recon_focus':
i=1
elif key == 'support_focus':
i=2
elif key == 'fleet_focus':
i=3
else: # Unused
i=-1
radiobutton.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(focusLabel, text='Level (0-22):')
level = tk.Spinbox(focusLabel, from_=0, to=22, width=3, textvariable=ss_inputs.focusLevel)
label.grid(column=0, row=4, sticky=W)
level.grid(column=1, row=4, sticky=E, padx=4)
def buildClassSkill(self):
'''Builds the class skill radiobuttons and level entry'''
if self.classWidget is not None:
# Removes widget entirely to reset proper skill selection
self.classWidget.grid_remove()
classLabel = tk.LabelFrame(self.skillDisplayLabel, text='Class Skill')
classLabel.grid(column=1, row=0, columnspan=3, sticky=N+S)
for key, value in ss_constants.skill_tree[ss_inputs.focusSkill.get()].items():
if key != 'name':
radiobutton = tk.Radiobutton(classLabel, text=value['name'], variable=ss_inputs.classSkill,
value=key, command=self.buildSubskills)
# Chooses index based upon typical in-game listing of which skills are first
if (key == 'berserker' or key == 'speed_demon'
or key == 'shield_monkey' or key == 'fleet_commander'):
i=0
else:
i=1
radiobutton.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(classLabel, text='Level (0-22):')
level = tk.Spinbox(classLabel, from_=0, to=22, width=3, textvariable=ss_inputs.classLevel)
label.grid(column=0, row=2, sticky=W)
level.grid(column=1, row=2, sticky=E, padx=4)
self.classWidget = classLabel
# Runs buildSubskills() to reset subskills radiobuttons based upon new chosen first index
self.buildSubskills()
def buildSubskills(self):
'''Builds the subskill radiobuttons and level entries'''
if self.subskillWidget is not None:
# Removes widget entirely to reset proper skill selection
self.subskillWidget.grid_remove()
subskillLabel = tk.LabelFrame(self.skillDisplayLabel, text='Subskills')
subskillLabel.grid(column=4, row=0, sticky=NSEW)
i=0
for key, value in ss_constants.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()].items():
if key != 'name':
skill = None
skill_level = None
# Uses subSkill_#/subSkill_#Level to reduce variable usage
if i==0:
ss_inputs.subSkill_1.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_1Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
elif i==2:
ss_inputs.subSkill_2.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_2Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
elif i==4:
ss_inputs.subSkill_3.set(key)
skillName = tk.Label(subskillLabel, text=value)
skillName.grid(column=0, columnspan=2, row=i, sticky=W)
label = tk.Label(subskillLabel, text='Level (0-20):')
level = tk.Spinbox(subskillLabel, from_=0, to=20, width=3, textvariable=ss_inputs.subskill_3Level)
label.grid(column=0, row=i+1, sticky=W)
level.grid(column=1, row=i+1, sticky=E, padx=4)
i+=2
self.subskillWidget = subskillLabel
def buildMiscSkills(self):
'''Builds aug tweak and imperial tweak inputs'''
augTweakLabel = tk.Label(self.skillDisplayLabel, text="Aug Tweaking (0-25):")
augTweakLabel.grid(column=0, row=1, sticky=E)
augTweaklevel = tk.Spinbox(self.skillDisplayLabel, from_=0, to=25, width=3,
textvariable=ss_inputs.augTweakLevel)
augTweaklevel.grid(column=1, row=1, sticky=W)
impTweakLabel = tk.Label(self.skillDisplayLabel, text="Imperial Tweaking (0-5):")
impTweakLabel.grid(column=2, row=1, columnspan=3, padx=40, sticky=E)
impTweaklevel = tk.Spinbox(self.skillDisplayLabel, from_=0, to=10, width=3,
textvariable=ss_inputs.impTweakLevel)
impTweaklevel.grid(column=4, row=1, padx=6, sticky=E)
def setDefaultClass(self):
'''Called by choosing a focus skill, resets proper pre-selected class'''
if (ss_inputs.focusSkill.get() == 'combat_focus'):
ss_inputs.classSkill.set('berserker')
elif (ss_inputs.focusSkill.get() == 'recon_focus'):
ss_inputs.classSkill.set('speed_demon')
elif (ss_inputs.focusSkill.get() == 'support_focus'):
ss_inputs.classSkill.set('shield_monkey')
elif (ss_inputs.focusSkill.get() == 'fleet_focus'):
ss_inputs.classSkill.set('fleet_commander')
self.buildClassSkill()
def buildStatDisplay(self):
'''Builds stat inputs, bonus displays, and total displays'''
displayLabel = tk.LabelFrame(self.master, text='Input and Output')
displayLabel.grid(column=0, row=2, columnspan=2, sticky=W+E, padx=2)
self.buildBaseInput(displayLabel)
self.buildBonusAmount(displayLabel)
self.buildFinalDisplay(displayLabel)
def buildBaseInput(self, master):
'''Builds stat inputs with validator'''
label = tk.Label(master, text='Base Input', padx=20)
baseInputLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
baseInputLabel.grid(column=0, row=0, sticky=W+E)
vcmd_all = (master.register(ss_validators.shipStatValidate),
'%d', '%i', '%P', '%s', '%S')
i=0
for key, value in ss_inputs.baseInputs.items():
statName = tk.Label(baseInputLabel, text=ss_constants.statNames[key])
statName.grid(column=0, row=i, sticky=W)
if key == 'shieldCharge' or key == 'energyCharge' or key == 'RoF' or key == 'resist':
width = 5
vcmd = (master.register(ss_validators.shipDecimalValidate),
'%d', '%i', '%P', '%s', '%S')
if key == 'RoF':
unitText = 's'
elif key == 'shieldCharge':
unitText = '/s'
elif key == 'energyCharge':
unitText = '/1.2s'
elif key == 'resist':
unitText = '%'
vcmd = (master.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
else:
unitText = 'err'
unit = tk.Label(baseInputLabel, text=unitText)
unit.grid(column=2, row=i, sticky=W)
else:
width = 10
vcmd = vcmd_all
amount = tk.Entry(baseInputLabel, width=width, justify=RIGHT, textvariable=value['initial'],
validate='key', validatecommand=vcmd)
# Width/5 so that larger entries span 2 columns
amount.grid(column=1, row=i, columnspan=(width/5), sticky=W, padx=2)
if key == 'elecTemp':
statName.configure(text="Firing Energy:")
statName.update()
i+=1
def buildBonusAmount(self, master):
'''Builds the display for bonus amount'''
label = tk.Label(master, text='Bonus', padx=10)
bonusAmountLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
bonusAmountLabel.grid(column=1, row=0, sticky=NW+E)
i=0
for key, value in ss_inputs.baseInputs.items():
mathSign = u"\u2715" # is the multiplication sign
if key == 'RoF':
mathSign = u"\u00F7" # is the division sign
symbol = tk.Label(bonusAmountLabel, text=mathSign)
amount = tk.Entry(bonusAmountLabel, width=4, state="readonly",
justify=RIGHT, textvariable=value['bonus'])
symbol.grid(column=0, row=i, sticky=W+E)
amount.grid(column=1, row=i, sticky=E, padx=6)
i+=1
def buildFinalDisplay(self, master):
'''Builds the display for the total amount'''
label = tk.Label(master, text='Overall', padx=15)
finalLabel = tk.LabelFrame(master, labelwidget=label, borderwidth=0)
finalLabel.grid(column=2, row=0, sticky=W+E)
i=0
for key, value in ss_inputs.baseInputs.items():
equalsSign = tk.Label(finalLabel, text="=")
equalsSign.grid(column=0, row=i, sticky=W)
width = 10
if key == 'shieldCharge' or key == 'energyCharge' or key == 'RoF':
width = 5
amount = tk.Entry(finalLabel, width=width, justify=RIGHT, state="readonly", textvariable=value['overall'])
# Width/5 so that larger entries span 2 columns
amount.grid(column=1, row=i, columnspan=(width/5), sticky=W, padx=2)
if width == 5:
if key == 'shieldCharge':
unitText = '/s'
elif key == 'energyCharge':
unitText = '/1.2s'
elif key == 'RoF':
unitText = 's'
else:
unitText = 'err'
unit = tk.Label(finalLabel, text=unitText)
unit.grid(column=2, row=i, sticky=W)
i+=1
def buildAugDisplay(self):
'''Builds aug display section'''
self.augDisplayLabel = tk.LabelFrame(self.master, text='Augmenters')
self.augDisplayLabel.grid(column=2, row=2, columnspan=2, rowspan=3, sticky=NE+W, padx=2)
self.buildAugConfig()
def buildAugConfig(self):
'''Builds aug number change input and freeze box'''
self.augLabelList = []
self.augButtonList = []
self.augClearButtonList = []
label = tk.Label(self.augDisplayLabel, text='Number:')
label.grid(column=0, row=0, padx=4, sticky=E)
self.augNumber = tk.Spinbox(self.augDisplayLabel, from_=0, to=6, width=2, state="readonly",
textvariable=ss_inputs.augNumber, command=self.changeAugAmount)
self.augNumber.grid(column=1, row=0, sticky=E)
self.freezeAugsCheck = tk.IntVar()
checkbutton = tk.Checkbutton(self.augDisplayLabel, text="Freeze",
variable=self.freezeAugsCheck, command=self.freezeAugs)
checkbutton.grid(column=2, row=0, columnspan=2, sticky=E)
def changeAugAmount(self):
'''Adds or removes a set of aug edit/reset buttons'''
if int(ss_inputs.augNumber.get()) > len(self.augButtonList):
numberToAdd = int(ss_inputs.augNumber.get())-(len(self.augButtonList))
for i in range(numberToAdd):
position = len(self.augButtonList) + 1
name = "Aug " + str(position) + ":"
augLabel = tk.Label(self.augDisplayLabel, width=15, anchor=W, text=name)
augLabel.grid(column=0, row=position, columnspan=3, padx=4, sticky=W)
self.augLabelList.append(augLabel)
editButton = tk.Button(self.augDisplayLabel, text="Edit",
command=lambda: self.editAug(position))
editButton.grid(column=3, row=position, sticky=E)
self.augButtonList.append(editButton)
resetButton = tk.Button(self.augDisplayLabel, text="Reset",
command=lambda: self.resetAug(position))
resetButton.grid(column=4, row=position, padx=4, sticky=W)
self.augClearButtonList.append(resetButton)
ss_inputs.augs.append(ss_inputs.augmenter())
else:
for i in range(len(self.augButtonList)-(int(ss_inputs.augNumber.get()))):
toRemove = self.augLabelList.pop()
toRemove.grid_remove()
toRemove = self.augButtonList.pop()
toRemove.grid_remove()
toRemove = self.augClearButtonList.pop()
toRemove.grid_remove()
toRemove = None
ss_inputs.augs.pop()
def editAug(self, augNum):
'''Edit augmenter popup'''
augNum = augNum - 1
if not hasattr(self, 'augWindows'):
self.augWindows = []
if len(self.augWindows) > augNum and self.augWindows[augNum] is not None:
self.augWindows[augNum].deiconify()
else:
popup = augToplevel(master=self, augNum=augNum)
popup.resizable(0,0)
self.augWindows.insert(augNum, popup)
title = ss_inputs.augs[augNum]['name'].get()
if title == "":
title = "Aug " + str(augNum + 1)
popup.title(title)
statLabel = tk.Label(popup, text=ss_constants.statNames['name'], padx=3)
statLabel.grid(column=0, row=0, sticky=W+E)
statEntry = tk.Entry(popup, width=15, justify=RIGHT, textvariable=ss_inputs.augs[augNum].get('name'))
statEntry.grid(column=1, row=0, columnspan=3)
augFrame= tk.LabelFrame(popup, text="Augmenter Stats")
augFrame.grid(column=0, row=1, columnspan=4, padx=1, sticky=W+E)
vcmd = (popup.register(ss_validators.bonusStatValidate),
'%d', '%i', '%P', '%s', '%S')
# vcmd = (popup.register(self.bonusStatValidate),
# '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
# %v = validation_type, %V = trigger_type, %W = widget_name
i=1
for key, value in ss_inputs.augs[augNum].items():
if key != 'enabled' and key != 'name':
statLabel = tk.Label(augFrame, text=ss_constants.statNames[key], padx=3)
statLabel.grid(column=0, row=i, sticky=E)
statEntry = tk.Entry(augFrame, width=6, justify=RIGHT, textvariable=value,
validate='key', validatecommand=vcmd)
statEntry.grid(column=1, row=i)
percentLabel = tk.Label(augFrame, text="%", padx=3)
percentLabel.grid(column=2, row=i)
i+=1
resetButton = tk.Button(popup, text="Reset", command=lambda: self.resetAug(augNum))
resetButton.grid(column=0, row=2)
updateButton = tk.Button(popup, text="Update and Close", command=lambda: popup.destroy())
updateButton.grid(column=1, row=2, columnspan=3)
def updateAug(self, augNum):
'''Update aug name in main frame'''
name = ss_inputs.augs[augNum]['name'].get()
if name == "":
name = "Aug " + str(augNum + 1) + ":"
self.augLabelList[augNum].configure(text=name)
self.augLabelList[augNum].update()
def resetAug(self, augNum):
'''Reset all values to null for the aug required'''
for key, value in ss_inputs.augs[augNum-1].items():
if key != 'enabled':
value.set("")
self.augLabelList[augNum-1].configure(text="Aug " + str(augNum) + ":")
self.augLabelList[augNum-1].update()
def freezeAugs(self):
'''Toggles the freezing of the amount of augs available and the input being disabled'''
if self.freezeAugsCheck.get(): # frozen
self.augNumber.configure(state=DISABLED)
self.augNumber.update()
else: # unfrozen
self.augNumber.configure(state="readonly")
self.augNumber.update()
def buildModDisplay(self):
'''Builds the buttons for item mods and auras'''
modDisplayLabel = tk.LabelFrame(self.master, text='Misc. Mods')
modDisplayLabel.grid(column=2, row=1, columnspan=3, rowspan=3, sticky=SW+SE)
button = tk.Button(modDisplayLabel, text="Edit Item Mods", command=self.buildItemModInput)
button.grid(column=0, row=0, sticky=E, padx=1, pady=1)
button = tk.Button(modDisplayLabel, text="Edit Auras", command=self.buildAuraInput)
button.grid(column=1, row=0, sticky=E, padx=1, pady=1)
def buildItemModInput(self):
'''Builds item mod input popup'''
itemModPopup = tk.Toplevel(master=self)
itemModPopup.resizable(0,0)
itemModPopup.title("Item Mods")
addModButton = tk.Button(itemModPopup, text="Add Item Mod", command=self.addItemMod)
addModButton.grid(column=0, row=0, pady=2, padx=2)
resetAllButton = tk.Button(itemModPopup, text="Reset and Remove All Mods", command=self.resetItemMods)
resetAllButton.grid(column=1, row=0, pady=2, padx=2)
self.modDisplayFrame = tk.LabelFrame(itemModPopup, text="Mods:")
self.modDisplayFrame.grid(column=0, row=1, columnspan=2, padx=2, sticky=NSEW)
message = tk.Message(self.modDisplayFrame, name='helpMessage', text='Section Disabled')
message.grid(column=0, row=0)
def addItemMod(self):
for name, widget in self.modDisplayFrame.children.items():
if name == 'helpMessage':
if widget.grid_info():
widget.grid_remove()
editModPopup = itemModToplevel(master=self)
editModPopup.resizable(0,0)
editModPopup.title("This function is disabled")
def resetItemMods(self):
for name, widget in self.modDisplayFrame.children.items():
if name == 'helpMessage':
widget.grid(column=0, row=0)
pass
def updateMods(self):
print "test"
def buildAuraInput(self):
'''Builds aura input popup'''
popup = tk.Toplevel(master=self)
popup.resizable(0,0)
popup.title("This function is disabled")
message = tk.Message(master=popup, text="(July 11, 2014): This is disabled until I can make the gui for it")
message.pack()
def buildCalcButton(self):
'''Builds the calculate button'''
calcButton = tk.Button(self.master, text='Calculate', command=ss_math.calculate)
calcButton.grid(column=0, row=4, sticky=NE, pady=2)
def buildQuitButton(self):
'''Builds the quit button'''
quitButton = tk.Button(self.master, text='Quit', command=self.quit)
quitButton.grid(column=1, row=4, sticky=NE, pady=2)
def destroy(self):
'''Overrides destroy due to possible exception, forces a hard quit'''
try:
tk.Frame.destroy(self)
except tk.TclError or TypeError:
pass
class augToplevel(tk.Toplevel):
'''Extension of Tkinter.Toplevel to allow for updating an aug on destroy of the popup'''
def __init__(self, master, augNum=None, *args, **kwargs):
tk.Toplevel.__init__(self, master=master)
self.master = master
self.augNum = augNum
def destroy(self):
self.master.updateAug(self.augNum)
tk.Toplevel.withdraw(self)
class itemModToplevel(tk.Toplevel):
'''Extension of Tkinter.Toplevel to allow for updating the iteMod list on destroy of the popup'''
def __init__(self, master, *args, **kwards):
tk.Toplevel.__init__(self, master=master)
self.master = master
def destroy(self):
self.master.updateMods()
tk.Toplevel.withdraw(self) | {
"repo_name": "dburgess560/sscalc",
"path": "ss_gui.py",
"copies": "1",
"size": "27774",
"license": "apache-2.0",
"hash": -4543361302779968000,
"line_mean": 45.2380952381,
"line_max": 120,
"alpha_frac": 0.5681212645,
"autogenerated": false,
"ratio": 3.7634146341463413,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9807742624201468,
"avg_score": 0.0047586548889748176,
"num_lines": 588
} |
__author__ = 'davburge'
import ss_constants
import ss_inputs
import ss_skills
def calculate():
atBonus = getATBonus()
for stat in ss_constants.statNames.keys():
if stat in ss_constants.calculatedStats:
baseAmount = getBaseAmount(stat)
if baseAmount != 0:
augBonus = getAugBonus(stat, atBonus)
skillBonus = getSkillsBonus(stat)
itemBonus = 1
auraBonus = getAuraBonus(stat)
getOverallBonus(stat, baseAmount, augBonus, skillBonus, itemBonus, auraBonus)
else:
setBonusField(stat, "")
setFinalField(stat, "")
def getBaseAmount(stat):
try:
baseAmount = ss_inputs.baseInputs[stat]['initial'].get()
if baseAmount != '':
baseAmount = baseAmount.replace(',', '')
if baseAmount == '.':
baseAmount = '0'
baseAmount = float(baseAmount)
else:
baseAmount = 0
except:
baseAmount = 1
return baseAmount
def getATBonus():
at = ss_inputs.augTweakLevel.get()
it = ss_inputs.impTweakLevel.get()
if at == "":
at = '0'
if it == "":
it = '0'
atBonus = (0.04 * int(at)) + (0.02 * int(it))
if ss_inputs.classSkill.get() == 'engineer':
ec = ss_inputs.classLevel.get()
atBonus += (ss_skills.engineer['augTweak'] * int(ec))
return atBonus + 1
def getAugBonus(stat, atBonus):
augBonus = 0
for aug in ss_inputs.augs:
augStat = aug[stat].get()
if augStat != '' and augStat != '.' and augStat != '-' and augStat != '+':
augStat = float(augStat) / 100 # change from percentage
if augStat > 0: # positive
augBonus += augStat
elif augStat < 0: # negative
augBonus += (augStat / (1 - abs(augStat)))
augBonus = round(augBonus, 2)
if augBonus > 0:
augBonus *= atBonus
augBonus = augBonus + 1
elif augBonus < 0:
augBonus *= atBonus
augBonus = augBonus / (1 - augBonus)
augBonus += 1
else: # If equals 0, then no bonus
augBonus = 1
return getAugMods(stat, round(augBonus, 2))
def getAugMods(stat, augBonus):
shipMod = ss_inputs.shipMods[stat].get()
if shipMod == "":
shipMod = '0'
shipMod = float(shipMod) / 100
overalAugBonus = augBonus + shipMod
return overalAugBonus
def getSkillsBonus(stat):
overallSkillBonus = 1
if ss_inputs.focusLevel.get() != '':
focusBonus = getFocusBonus(stat)
classBonus = getClassBonus(stat)
subSkill1Bonus = 1
subSkill2Bonus = 1
subSkill3Bonus = 1
# All 1s here
subSkill_1 = ss_inputs.subSkill_1.get()
subSkill_1Level = ss_inputs.subskill_1Level.get()
if subSkill_1Level != '' and subSkill_1Level != '0':
subSkill1Bonus = getSubSkillBonus(stat, subSkill_1, int(subSkill_1Level))
# All 2s here
subSkill_2 = ss_inputs.subSkill_2.get()
subSkill_2Level = ss_inputs.subskill_2Level.get()
if subSkill_2Level != '' and subSkill_2Level != '0':
subSkill2Bonus = getSubSkillBonus(stat, subSkill_2, int(subSkill_2Level))
# All 3s here
subSkill_3 = ss_inputs.subSkill_3.get()
subSkill_3Level = ss_inputs.subskill_3Level.get()
if subSkill_3Level != '' and subSkill_3Level != '0':
subSkill3Bonus = getSubSkillBonus(stat, subSkill_3, int(subSkill_3Level))
overallSkillBonus = focusBonus * classBonus * subSkill1Bonus * subSkill2Bonus * subSkill3Bonus
return round(overallSkillBonus, 2)
def getFocusBonus(stat):
focusBonus = 1
focusLevel = int(ss_inputs.focusLevel.get())
try: # check if class type in focus skill
shipClassStats = ss_skills.skill_tree[ss_inputs.focusSkill.get()]['shipClass'][ss_inputs.shipClass.get()]
try: # check if stat has bonus and apply
focusBonus += (focusLevel * shipClassStats[stat])
return focusBonus
except KeyError: # no bonuses to this stat
return focusBonus
except KeyError: # use all class type
shipClassStats = ss_skills.skill_tree[ss_inputs.focusSkill.get()]['shipClass']['all']
if shipClassStats is None: # no bonuses to this ship
return 1
else:
try:
focusBonus += (focusLevel * shipClassStats[stat])
return focusBonus
except KeyError: # no bonuses to this stat
return 1
def getClassBonus(stat):
classBonus = 1
classLevel = ss_inputs.classLevel.get()
if classLevel != '':
classLevel = int(classLevel)
try:
classStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][stat]
classBonus += (classLevel * classStat)
except KeyError:
pass
if stat == 'damage':
try:
damageType = ss_constants.elementTypes[ss_inputs.damageType.get()] + "Damage"
classStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][damageType]
classBonus += (classLevel * classStat)
except KeyError:
pass
elif stat == 'resist':
try:
resistType = ss_constants.elementTypes[ss_inputs.resistType.get()] + "Resist"
classStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][resistType]
classBonus += (classLevel * classStat)
except KeyError:
pass
return classBonus
def getSubSkillBonus(stat, subSkill, subSkillLevel):
subSkillBonus = 1
try:
subSkillStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][subSkill][stat]
subSkillBonus += (subSkillLevel * subSkillStat)
except KeyError:
pass
if stat == 'damage':
try:
damageType = ss_constants.elementTypes[ss_inputs.damageType.get()] + "Damage"
subSkillStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][subSkill][damageType]
subSkillBonus += (subSkillLevel * subSkillStat)
except KeyError:
pass
elif stat == 'resist':
try:
resistType = ss_constants.elementTypes[ss_inputs.resistType.get()] + "Resist"
subSkillStat = ss_skills.skill_tree[ss_inputs.focusSkill.get()][ss_inputs.classSkill.get()][subSkill][resistType]
subSkillBonus += (subSkillLevel * subSkillStat)
except KeyError:
pass
return subSkillBonus
def getAuraBonus(stat):
'''Returns a tuple of (isPercent, amount)'''
return (True, 0)
def getOverallBonus(stat, baseAmount, augBonus, skillBonus, itemBonus, auraBonus):
overallBonus = augBonus * skillBonus * itemBonus
if stat == 'energyCharge':
baseAmount += getInbuiltElec()
if auraBonus[0] == True and auraBonus[1] != 0:
overallBonus *= auraBonus[1]
if stat == 'resist':
baseAmount = baseAmount / 100
if baseAmount > 0:
baseAmount = 1 - baseAmount
finalStat = baseAmount / overallBonus
finalStat = 1 - finalStat
else:
finalStat = baseAmount / overallBonus
overallBonus = "???"
finalStat *= 100
finalStat = str(round(finalStat, 2)) + "%"
elif stat == 'RoF':
finalStat = baseAmount / overallBonus
finalStat = str(round(finalStat, 3))
else:
finalStat = baseAmount * overallBonus
finalStat = str(round(finalStat, 3))
if auraBonus[0] == False:
finalStat += float(auraBonus)
setBonusField(stat, overallBonus)
setFinalField(stat, finalStat)
def getInbuiltElec():
addElec = ss_inputs.shipMods['inbuiltElec'].get()
if addElec != '':
addElec = addElec.replace(',', '')
addElec = float(addElec)
addElec *= 1.2
return addElec
else:
return 0
def setBonusField(stat, bonus):
ss_inputs.baseInputs[stat]['bonus'].set(str(bonus))
def setFinalField(stat, final):
ss_inputs.baseInputs[stat]['overall'].set(final) | {
"repo_name": "dburgess560/sscalc",
"path": "ss_math.py",
"copies": "1",
"size": "8519",
"license": "apache-2.0",
"hash": 9121442646172817000,
"line_mean": 35.047826087,
"line_max": 125,
"alpha_frac": 0.5810541143,
"autogenerated": false,
"ratio": 3.334246575342466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4415300689642466,
"avg_score": null,
"num_lines": null
} |
__author__ = 'davburge'
#Stealth?
#Crits
#Recoil?
#Firing Energy?
#Shadow Ambush (Seer)
stats = [
'shieldBank',
'shieldCharge',
'energyBank',
'energyCharge',
'hull',
'speed',
'damage',
'RoF',
'range',
'vis',
'multifire',
'docking',
'firingEnergy',
'hostility',
'resist',
'physDamage',
'ethDamage',
]
unused_stats = [
'tracking',
'weaponHold',
'projVelocity',
'equipSize',
'critChance',
'critResist',
'critDamage',
'radar',
'thrust',
'agility',
'stealth',
'docking',
'defDroneOps',
'offDroneOps',
'lastDroneOps',
]
resists = [
'energy',
'laser',
'heat',
'phys',
'rad',
'surj',
'mine',
'trans',
'all',
]
# Berserker Skills
impervious_armor = {
'resist':0.05,
'speed':-0.02
}
ordinance_supremacy = {
'energyBank':0.04,
'damage':0.01
}
arsenal_expertise = {
'multifire':0.05,
'weaponHold':0.025,
'damage':0.01
}
# Sniper Skills
stalking = {
'tracking':0.06,
'stealth':0.03,
'damage':0.02
}
sharpshooting = {
'range':0.06,
'damage':0.01,
'critChange':0.01,
'projVeloc':0.05,
'recoil':0.05
}
efficiency = {
'damage':0.05,
'recoil':0.025
}
# Speed Demon Skills
speedy_movement = {
'speed':0.02,
'thrust':0.03
}
speedy_firing = {
'RoF':0.03,
'damage':0.02,
'critChance':0.01
}
dogfighting = {
'damage':0.02,
'tracking':0.02,
'turning':0.02,
'range':-0.02
}
# Seer Skills
psionic_shrouding = {
'stealth':0.05,
'critResist':0.05,
}
psychic_sight = {
'radar':0.1,
'critDamage':0.05,
'damage':0.02,
}
shadow_ambush = {
'critChance':0.01, # May be 0.03. There is a +2% crit Chance when unseen by target on top of this
'damage':0.02,
}
# Shield Monkey Skills
shield_boosting = {
'shieldBank':0.025,
'shieldCharge':0.025,
}
shield_manipulation = {
'damage':0.015,
'shieldCharge':0.01,
'slaves': {
'damage':0.03,
}
}
shield_transference = {
'transDamage':0.065,
'transResist':0.04,
}
# Engineer Skills
drone_mastery = {
'defDroneOps':0.01,
'offDroneOps':0.01,
'lastDroneOps':0.01,
}
beam_mastery = {
'beamPower':0.05,
'beamRange':0.05,
}
damage_control = {
'resist':0.02,
'critResist':0.02,
}
# Fleet Commander Skills
slave_mastery = {
'slaves': {
'shieldBank':0.0015,
'energyBank':0.0015,
'speed':0.0015,
'turning':0.0015,
'damage':0.0015,
'shieldCharge':0.0015,
'energyCharge':0.0015,
'thrust':0.0015,
'vis':0.0015,
'hull':0.0015,
'weight':0.0015,
'tracking':0.0015,
'radar':0.0015,
'range':0.0015,
}
}
radiation_expert = {
'radDamage':0.1,
'wildSlaves':0.03,
}
flight_controller = {
'fighters':0.02,
'numFighters':0.05,
}
# Gunner Skills
big_guns = {
#TODO: FIX THIS SHIT
#So this skill is only cap ships
#Wiki states: +3% Damage, +1% Mining for only cap ships
#And +2% for other ship classes
'damage':0.03,
'mineDamage':0.01,
'otherDamage':0.02,
}
destruction = {
#TODO: FIX THIS SHIT
#Wtf does this skill even mean?
}
missile_mastery = {
'missileDamage':0.025,
}
#TODO: Finish Adv. Subskills
# General Subskills
shake_it_off = 0
strategic_deployment = 0
centered = 0
slave_PhD = 0
resilient = 0
droney = 0
# Focus Subskills
big_banker = 0
expansionist = 0
really_super = 0
extra_tweaked = 0
# Berserker Subskill
berserker_classic = 0
weapons_master = 0
berserking_berseker = 0
eye_for_an_eye = 0
# Sniper Subskill
long_scope = 0
quick_scope = 0
grooved_bore = 0
marksman = 0
# Speed Demon Subskill
master_scout = 0
acrobat = 0
fighter_ace = 0
lucky_devil = 0
# Seer Subskill
wraith = 0
clairvoyant = 0
assassin = 0
brooding_gaze = 0
# Shield Monkey Subskill
funky_monkey = 0
flying_monkey = 0
tanky_monkey = 0
enveloping_monkey = 0
# Engineer Subskill
mechanical_engineer = 0
computer_engineer = 0
electrical_engineer = 0
civil_engineer = 0
# I'm one of those ^
# Fleet Commander Subskill
fleet_protector = 0
fleet_admiral = 0
wild_man = 0
advanced_flight_controller = 0
# Gunner Subskill
advanced_targeting_computers = 0
automated_reloading = 0
mass_destruction = 0
gunboat_diplomat = 0
# Combat Focus
berserker = {
'multifire':None,
'hostility':0.02,
'damage':0.03,
'equipSize':-0.01,
'impervious_armor': impervious_armor,
'ordinance_supremacy': ordinance_supremacy,
'arsenal_expertise': arsenal_expertise,
}
sniper = {
'damage':0.04,
'elecTemp':0.025,
'physDamage':0.05,
'stalking': stalking,
'sharpshooting': sharpshooting,
'efficiency': efficiency,
}
# Recon Focus
speed_demon = {
'speed':0.02,
'RoF':0.03,
'docking':0.1,
'elecTemp':-0.01,
'speedy_movement': speedy_movement,
'speedy_firing': speedy_firing,
'dogfighting': dogfighting,
}
seer = {
'damage':0.02,
'ethDamage':0.01,
'radar':0.03,
'stealth':0.02,
'psionic_shrouding': psionic_shrouding,
'psychic_sight': psychic_sight,
'shadow_ambush': shadow_ambush,
}
# Support Focus
shield_monkey = {
'shieldBank':0.05,
'shieldCharge':0.03,
'shield_boosting': shield_boosting,
'shield_manipulation': shield_manipulation,
'shield_transference': shield_transference,
}
engineer = {
'shieldBank':0.005,
'energyBank':0.005,
'speed':0.005,
'turning':0.005,
'damage':0.005,
'shieldCharge':0.005,
'energyCharge':0.005,
'thrust':0.005,
'tracking':0.005,
'docking':0.005,
'RoF':0.005,
'radar':0.005,
'range':0.005,
'elecTemp':-0.005,
'firingEnergy':-0.005,
'augTweak':0.005,
'drone_mastery': drone_mastery,
'beam_mastery': beam_mastery,
'damage_control': damage_control,
}
# Fleet Focus
fleet_commander = {
'auraPower':0.04,
'slave_mastery': slave_mastery,
'radiation_expert': radiation_expert,
'flight_controller': flight_controller,
}
gunner = {
'multifire':None,
'weaponHold':0.05,
'damage':0.02,
'big_guns': big_guns,
'destruction': destruction,
'missile_mastery': missile_mastery,
}
# Focus Skills
combat_focus = {
'shipClass': {
'hfi': {
'shieldBank':0.03,
'damage':0.01
},
'all': {
'shieldBank':0.01,
'equipSize':-0.02,
}
},
'berserker': berserker,
'sniper': sniper
}
recon_focus = {
'shipClass': {
'lfi': {
'agility':0.1,
'thrust':0.1,
'radar':0.05,
'hull':0.015,
},
'all': {
'agility':0.05,
'thrust':0.05,
}
},
'speed_demon': speed_demon,
'seer': seer
}
support_focus = {
'shipClass': {
'sfr': {
'hull':0.09,
'weight':-0.02,
},
'ifr': {
'hull':0.09,
'weight':-0.02,
},
'hfi': {
'hull':0.04
},
'cap': {
'hull':0.04
},
'all': None
},
'shield_monkey': shield_monkey,
'engineer': engineer
}
fleet_focus = {
'shipClass': {
'cap': {
'non-offensive-stats':0.02,
'hull':0.08
},
'hfi': {
'basic-stats':0.01,
'hull':0.04
},
'sfr': {
'basic-stats':0.01,
'hull':0.04
},
'ifr': {
'basic-stats':0.01,
'hull':0.04
},
'all': None
},
'fleet_commander': fleet_commander,
'gunner': gunner
}
# Skill Tree
skill_tree = {
'combat_focus': combat_focus,
'recon_focus': recon_focus,
'support_focus': support_focus,
'fleet_focus': fleet_focus,
} | {
"repo_name": "dburgess560/sscalc",
"path": "ss_skills.py",
"copies": "1",
"size": "8340",
"license": "apache-2.0",
"hash": 49329021847712900,
"line_mean": 16.0562770563,
"line_max": 101,
"alpha_frac": 0.5220623501,
"autogenerated": false,
"ratio": 2.6808100289296046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3702872379029604,
"avg_score": null,
"num_lines": null
} |
from __future__ import division
import numpy as np
def boundary_separation(array, idx):
"""
Computes the distance between boundary points in the 4, partitioned
closed sets on S1.
:param array: list of each sub-array for the non-empty quadrants (tuple)
:param idx: index of the current array (int)
:return: (float)
"""
length = array[idx]
l_next = array[(idx + 1) % len(array)]
if l_next[0] > length[-1]:
return l_next[0] - length[-1]
else:
return l_next[0] - (length[-1] - (2 * np.pi))
def min_arc_length(array):
"""
Computes the shortest path connecting all points in the input array
constrained to the unit circle. Equivalently, this is computing the
minimum closed set containing all elements in the standard S1 topology.
:param array: array of angles (list)
:return: shortest path length (float)
"""
pi = np.pi
q1, q2, q3, q4 = [], [], [], []
# split circle into 4 quadrants, populate each quadrant depending on incoming value
# modulo on 2pi is handled to avoid winding numbers
for i in range(len(array)):
angle = array[i]
mod_angle = angle % (2 * pi)
if 0 <= mod_angle < pi / 2:
q1.append(mod_angle)
elif pi / 2 <= mod_angle < pi:
q2.append(mod_angle)
elif pi <= mod_angle < (3 * pi / 2):
q3.append(mod_angle)
else:
q4.append(mod_angle)
# sort each quadrant, separately, as ascending angle
q1.sort()
q2.sort()
q3.sort()
q4.sort()
# get the maximum arc length between adjacent points, for each quadrant respectively
max_length = max(len(q1), len(q2), len(q3), len(q4))
l1, l2, l3, l4 = 0, 0, 0, 0
for i in range(max_length - 1):
if i + 1 < len(q1):
d = q1[i + 1] - q1[i]
l1 = d if d > l1 else l1
if i + 1 < len(q2):
d = q2[i + 1] - q2[i]
l2 = d if d > l2 else l2
if i + 1 < len(q3):
d = q3[i + 1] - q3[i]
l3 = d if d > l3 else l3
if i + 1 < len(q4):
d = q4[i + 1] - q4[i]
l4 = d if d > l4 else l4
length = max(l1, l2, l3, l4)
# get the max distance between boundary points of the adjacent quadrants
t = [arr for arr in (q1, q2, q3, q4) if arr]
bd = [boundary_separation(t, i) for i in range(len(t))]
length = max(bd + [length]) # max distance from either boundary points or the max within the quadrants
return (2 * pi) - length
def translate(arr, n_permute):
"""
:param arr: (list)
:param n_permute: (int) order of the permutation
:return: (list) permuted array
"""
return [item for item in arr[n_permute:]] + [item for item in arr[:n_permute]]
def loss(m1, m2):
"""
:param m1: slope of line 1 (float)
:param m2: slope of line 2 (float)
:return: (float)
"""
if not isinstance(m1, np.ndarray) or not isinstance(m2, np.ndarray):
raise ValueError('TypeError: m1 and m2 must be numpy arrays.')
ratio = m1 / m2
ratio[m1 == 0] = 0
ratio[m2 == 0] = 1e10
return np.arctan(ratio - 1)
def health(population, metric='L2', order=1):
"""
:param population: loss functions for set of objects (numpy array)
:param metric: metric space measure (string, optional, 'L2' by default, accepts: 'L1', 'minkowski',
'vector_length', 'arc_length')
:param order: (float, optional, only used with Minkowski p-adic measure, 1 by default)
:return: metric space norm of population (float)
"""
assert isinstance(population, np.ndarray)
if metric == 'L2':
return np.sqrt(np.dot(population, population))
elif metric == 'L1':
return np.sum(abs(population))
elif metric == 'minkowski':
t = np.sum(abs(population) ** order)
return t ** (1. / order)
elif metric == 'vector_length':
v = np.array((np.cos(population).sum(), np.sin(population).sum()))
return np.sqrt(np.dot(v, v)) / len(v)
elif metric == 'arc_length':
return min_arc_length(population)
| {
"repo_name": "brainsqueeze/Image_correction",
"path": "src/workers/optimization_utils.py",
"copies": "1",
"size": "4137",
"license": "mit",
"hash": -201138871256458800,
"line_mean": 31.0697674419,
"line_max": 107,
"alpha_frac": 0.5806139715,
"autogenerated": false,
"ratio": 3.2574803149606297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9335593843086423,
"avg_score": 0.0005000886748415143,
"num_lines": 129
} |
import cv2
from skimage import io
from skimage.transform import probabilistic_hough_line
import matplotlib.pyplot as plt
import os
import warnings
import random
import numpy as np
warnings.filterwarnings('ignore', category=RuntimeWarning)
class CorrectImage(object):
def __init__(self):
self.path = ""
self.name = ""
self.image = None
self.edges = None
self.lines = None
def _load_image(self, image):
"""
:param image: image file name (str)
:return: skimage image data
"""
filename = os.path.join(self.path, image)
return io.imread(filename)
def add_path(self, image_path):
"""
Adds image to the list of images
:param image_path: (string)
"""
self.path = image_path + '/'
def add_image(self, filename):
"""
Adds image to the list of images
:param filename: (string)
"""
self.name = filename
self.hough_transform()
def _detect_edges(self, image, vary=False, plot=False):
"""
:param image: image file name (str)
:param vary: turn tunable plotting on
:param plot: turn plotting on
:return: detected edges with variable filters
"""
self.image = self._load_image(image)
if vary:
def nothing(x):
pass
cv2.namedWindow('image')
cv2.createTrackbar('th1', 'image', 0, 255, nothing)
cv2.createTrackbar('th2', 'image', 0, 255, nothing)
while True:
th1 = cv2.getTrackbarPos('th1', 'image')
th2 = cv2.getTrackbarPos('th2', 'image')
edges = cv2.Canny(self.image, th1, th2)
cv2.imshow('image', edges)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
edges = cv2.Canny(self.image, 255, 255)
if plot:
cv2.namedWindow('image')
cv2.imshow('image', edges)
cv2.waitKey(5000)
cv2.destroyAllWindows()
return edges
def hough_transform(self, vary=False, plot=False):
"""
:param vary: turn edge detection tunable plotting on
:param plot: turn plotting on
:return: numpy array of probabilistically found straight lines
"""
if self.name == "":
raise ValueError('Missing image: you need to specify the image file using add_image.')
self.edges = self._detect_edges(self.name, vary=vary, plot=plot)
self.lines = probabilistic_hough_line(self.edges, threshold=10, line_length=5, line_gap=3)
if plot:
for line in self.lines:
p0, p1 = line
plt.plot((p0[0], p1[0]), (p0[1], p1[1]))
plt.show()
@staticmethod
def slope(lines):
"""
:param lines: array of coordinates (ie. [((x0, y0), (xf, yf)), ...]
:return: array of slope values with the same number of entries as lines
"""
# for doing vectorized subtraction across all line pairs,
# we need the first line of each pair to be the negative of itself
sign_op = np.ones_like(lines)
sign_op[:, :, 0] *= -1
# get the differences between x and y coordinates (start, end), respectively
slopes = np.sum(sign_op * lines, axis=2)
# compute the slopes of each line for every line pair
slopes = slopes[:, :, 0] / slopes[:, :, 1]
# turn infinite values to a finite, but very large value
slopes[np.isinf(slopes)] = 1e6
# this catches cases when the line - as defined - is actually a point and the slope doesn't exist
slopes[np.isnan(slopes)] = 0
return slopes
def line_pair(self, num_pairs):
"""
:param num_pairs: number of line pairs to take (int)
:return: line pairs (array)
"""
idx = np.random.randint(len(self.lines), size=num_pairs * 2)
lines = np.array(self.lines)[idx]
return lines.reshape(num_pairs, 2, 2, 2)
@staticmethod
def mutation(pairs, p_mutate=0.01):
"""
:param pairs: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines
:param p_mutate: (float) probability of a mutation
:return: (numpy array with dimensions (n_pairs, 2, 2, 2)) pairs of lines with mutations
"""
for i in range(len(pairs)):
if p_mutate > random.random():
# column = np.random.randint(low=0, high=2)
for column in [0, 1]:
t = pairs[i, :, :, column]
low, high = np.min(t), np.max(t)
if high == low:
high *= 2
pairs[i, :, :, column] = np.random.randint(low=low, high=high, size=t.shape)
return pairs
| {
"repo_name": "brainsqueeze/Image_correction",
"path": "src/workers/correct.py",
"copies": "1",
"size": "4955",
"license": "mit",
"hash": -6673365139962833000,
"line_mean": 31.8145695364,
"line_max": 105,
"alpha_frac": 0.550554995,
"autogenerated": false,
"ratio": 3.847049689440994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9895162481118396,
"avg_score": 0.0004884406645195391,
"num_lines": 151
} |
__author__="daveshepard"
__date__ ="$Jun 10, 2011 4:11:10 PM$"
import MySQLdb
import threading
import time
import simplejson
import logging
host = "localhost"
username = "root"
password = "GIS4ucla"
database = "hcnow"
TWITTER_TABLE = "tweets"
#connection = None
def get_connection():
connection = MySQLdb.connect(
host=host,user=username,passwd=password,db=database,
use_unicode=True,charset="utf8"
)
connection.set_character_set("utf8")
return connection
def get_cursor(conn=None):
if conn is not None:
return conn.cursor(MySQLdb.cursors.DictCursor)
return get_connection().cursor(MySQLdb.cursors.DictCursor)
class TweetStore(threading.Thread):
TWEET_TABLE = 'tweets'
def __init__(self, commit_threshold=10, interval=60, daemon=True):
self.commit_threshold = commit_threshold
self.tweets = []
self.interval = interval
self.logger = logging.getLogger('database')
def generator(key):
return lambda item: item[key]
self.field_correlations = {
'from_user_id': '',
'profile_image_url': None,
}
threading.Thread.__init__(self)
def log(self, message):
print "[Database] " + message
#self.logger.info({'query_url_id': 'Database', 'message': message})
#self.logger.info(message,extra={ 'query_url_id':'Database'})
# self.logger.info("[Database] " + message)
def add_tweets(self, items):
self.tweets.append(items)
def run(self):
self.log("Starting TweetStore")
time.sleep(10)
self.running = True
while self.running:
self.commit()
time.sleep(self.interval)
def stop(self):
self.running = False
self.log("Stopping tweetstore thread from top stop.")
def commit(self):
conn = get_connection()
for tweet_group in self.tweets:
url_id = tweet_group["url_id"]
self.log("Committing %s tweets for query_url_id %s" % (len(tweet_group["results"]), url_id))
for tweet in tweet_group["results"]:
fields = """query_url_id, from_user_id, profile_image_url, tweeted_at,
from_user, twitter_id, text, source, json"""
value_string = u"""%s, %s, %s, %s, %s, %s,%s, %s, %s"""
if "user" not in tweet:
tweet["user"] = {
"id_str" : "No ID",
"profile_image_url" : "nothing",
"screen_name": "userless tweet",
}
values = (
url_id, tweet["user"]['id_str'],
tweet["user"]['profile_image_url'],
time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(tweet['created_at'],
'%a %b %d %H:%M:%S +0000 %Y'
)),
tweet["user"]['screen_name'], tweet['id_str'],
tweet['text'],
tweet['source'], simplejson.dumps(tweet),
)
if "location" in tweet:
fields += ", location"
value_string += ", %s"
values += (tweet['location'], )
if "iso_language_code" in tweet["metadata"]:
fields += ", iso_language_code "
value_string += ", %s"
values += (tweet["metadata"]['iso_language_code'], )
if "geo" in tweet and tweet["geo"]:
fields += ", reported_lat, reported_lon, reported_geometry, reported_geometry_pt"
value_string += u", %s, %s, %s, GeomFromText(%s)"
values += (
tweet['geo']['coordinates'][1], tweet['geo']['coordinates'][0],
"GeomFromText('POINT(%s %s)')" % (tweet['geo']['coordinates'][1],
tweet['geo']['coordinates'][0]),
#"GeomFromText('POINT(%s %s)')" % (tweet['geo']['coordinates'][1],
"POINT(%s %s)" % (tweet['geo']['coordinates'][1],
tweet['geo']['coordinates'][0]),
)
if "in_reply_to_user_id_str" in tweet and tweet["in_reply_to_user_id_str"]:
fields += ", to_user_id"
value_string += ", %s"
values += (tweet['in_reply_to_user_id_str'], )
if "retweet_id" in tweet and tweet["retweet_id"]:
fields += ", retweet_id"
value_string += ", %s "
values += (tweet["retweet_id"], )
query = "INSERT INTO " + self.TWEET_TABLE + "(" + fields + ", created_at, updated_at) VALUES (" + value_string + ", NOW(), NOW())"
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query, values)
conn.commit()
self.log("Commit finished")
self.tweets = []
def commit_safe(self):
""" Saves tweets to database.
Attempts thread safety using get_tweet_group
"""
self.log("Doing safe commit")
conn = get_connection()
while True:
tweet_group = self.get_tweet_group()
if not tweet_group:
break
url_id = tweet_group["url_id"]
for tweet in tweet_group["results"]:
fields = """query_url_id, from_user_id, location, profile_image_url, tweeted_at,
from_user, twitter_id, text, iso_language_code, source"""
value_string = u"""%s, %s, %s, %s, %s, %s,%s, %s, %s, %s"""
values = (
url_id, tweet['from_user_id'], tweet['location'],
tweet['profile_image_url'],
time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(tweet['created_at'],
"%a, %d %b %Y %H:%M:%S +0000")),
tweet['from_user'], tweet['id'],
tweet['text'],
tweet['iso_language_code'], tweet['source']
)
if "geo" in tweet and tweet["geo"]:
fields += ", reported_lat, reported_lon, reported_geometry_pt"
value_string += u", %s, %s, %s"
values += (
tweet['geo']['coordinates'][1], tweet['geo']['coordinates'][0],
"GeomFromText('POINT(%s,%s)')" % (tweet['geo']['coordinates'][1],
tweet['geo']['coordinates'][0]),
)
if "to_user_id" in tweet and tweet["to_user_id"]:
fields += ", to_user_id"
value_string += ", %s"
values += (tweet['to_user_id'], )
if "retweet_id" in tweet and tweet["retweet_id"]:
fields += ", retweet_id"
value_string += ", %s "
values += (tweet["retweet_id"], )
query = "INSERT INTO " + self.TWEET_TABLE + "(" + fields + ", created_at, updated_at) VALUES (" + value_string + ", NOW(), NOW())"
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
cursor.execute(query, values)
conn.commit()
def get_tweet_group(self):
""" Returns tweet group. Operates destructively on list.
"""
if len(self.tweets) > 0:
tweet_group = self.tweets[0]
self.tweets = self.tweets[1:]
return tweet_group
else:
return None
def stop(self):
if len(self.tweets) > 0:
self.commit()
self.running = False
# threading.Thread.stop()
self.log("Stopping tweetstore thread from lower stop.")
| {
"repo_name": "shepdl/stream-daemon",
"path": "database.py",
"copies": "1",
"size": "7889",
"license": "mit",
"hash": 8849506309473328000,
"line_mean": 42.3461538462,
"line_max": 146,
"alpha_frac": 0.4794016986,
"autogenerated": false,
"ratio": 4.022947475777665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002349174377665,
"avg_score": null,
"num_lines": null
} |
__author__ = 'David Anderson'
"""
Flask-Flywheel
--------------
Adds Flywheel support to your Flask application.
"""
import codecs
import os
import re
from setuptools import setup, find_packages
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r') as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="flask-spring",
version=find_version("flask_spring", "__init__.py"),
url="https://github.com/oggthemiffed/Flask-Spring",
license="MIT",
author="David Anderson",
author_email="herbaliser1978@gmail.com",
description="Adds the Spring framework support to your Flask application",
long_description=__doc__,
packages=find_packages( include=["flask_spring"], exclude=['contrib', 'docs', 'tests*']),
zip_safe=False,
install_requires=[
"springpython",
"flask",
"pyyaml"
],
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
include_package_data=False,
) | {
"repo_name": "oggthemiffed/Flask-Spring",
"path": "setup.py",
"copies": "1",
"size": "1631",
"license": "mit",
"hash": -1570287149767743500,
"line_mean": 29.7924528302,
"line_max": 93,
"alpha_frac": 0.6088289393,
"autogenerated": false,
"ratio": 3.8649289099526065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49737578492526063,
"avg_score": null,
"num_lines": null
} |
"""
Converts UseCaseMaker XML file to ASCIIDOC source
Copyright (c) David Avsajanishvili, 2009
"""
import ucm_xmls
from pyxmls import *
import getopt, os, re, codecs
TOP_COMMENT = \
r"""// ''''''''''''''''''''''''''''''''''''''''''''''''''
// THIS FILE IS GENERATED AUTOMATICALLY - DON'T EDIT!
// ''''''''''''''''''''''''''''''''''''''''''''''''''
// Use Cases retrieved from UseCaseMaker
// XML file with help of ucm2asciidoc script.
// ''''''''''''''''''''''''''''''''''''''''''''''''''
"""
TAB_HEADER = """
[cols="3m,12",options="header"]
|==========================================================
"""
TAB_FOOTER = """
|==========================================================
"""
def asciidoc_labeled(caps, vals):
ret = ""
for i in range(len(caps)):
if vals[i]:
ret += "%s:: \n%s\n" % (caps[i],vals[i])
return "\n--\n" + ret + "--\n" if ret.strip() else ""
def uc_maketable(uc):
"""
Makes AsciiDoc-formatted tables from Use Cases
"""
ret = ""
# Main flow
ret += TAB_HEADER + "|%(Prefix)s%(ID)s | Main success scenario" % uc.__dict__
alts = [] #(alternatives list)
for s in uc.steps:
if s.stepType in ('Default', 'Child'):
ret += "\n|%(Name)s |%(description)s" % s.__dict__
if s.stepType == 'Alternative':
alts.append(s)
ret += TAB_FOOTER
# Extensions
for als in alts:
ret += TAB_HEADER + "|Extension %(Name)s | %(description)s" % als.__dict__
for s in uc.steps:
if s.ID == als.ID and s.Prefix == als.Prefix and s.stepType == 'AlternativeChild':
ret += "\n|%(Name)s |%(description)s" % s.__dict__
ret += TAB_FOOTER
ret += "\n\n"
return ret
def ucmodel_to_asciidoc(
U,
cpt_char = r'=',
title_char = r'-',
cpt_title = ''):
"""
Renders UseCaseMaker model, deserialized from XML file, to ASCIIDOC.
"""
ret = ""
# Render Use Cases
for uc in U.model.usecases:
uc_nm = "%(Name)s (%(Prefix)s%(ID)s) " % uc.__dict__
uc_ttl = title_char * len(uc_nm)
dsc = uc.attributes.description
uc_table = uc_maketable(uc)
notes = "" if (not uc.attributes.notes) else (
"""
[NOTE]
============================
%s
============================
""" % (uc.attributes.notes,))
labels = asciidoc_labeled(
['Pre-Conditions', 'Post-Conditions', 'Trigger (%s)' % uc.trigger.eventType],
[uc.preconditions, uc.postconditions, uc.trigger.description])
prose = uc.prose
ret += """
%(uc_nm)s
%(uc_ttl)s
// Description:
%(dsc)s
// Labeled list:
%(labels)s
// Notes:
%(notes)s
// Step Flows:
%(uc_table)s
// Prose:
%(prose)s
// <<<< End of UseCase <<<<
""" % locals()
#Adding Main Caption title
if cpt_char and cpt_title:
ret = """%s
%s
%s""" % (cpt_title, cpt_char * len(cpt_title), ret)
return ret
def main(argv):
"""
Converts UseCaseMaker file to Use Cases
in differnet human-readable formats,
using AsciiDoc.
Usage:
%(command)s [options] usecasemaker_filename
Options:
-c, --title-char=TITLECHAR
Characters for titles. When only one character
specified, it is used to underline titles of Use Cases.
If more than one, the first is used to underline
title of document, the second - to underline titles
of the Use Cases.
Default: "=-"
-f, --format
Output format. Format can be:
* asciidoc -- AsciiDoc source
* docbook -- DocBook XML, generated by asciidoc tool
* html -- HTML, generated by asciidoc tool
* chunked -- chunked HTML (a2x)
* htmlhelp -- HtmlHelp files (a2x)
* manpage -- MAN page (a2x)
* pdf -- PDF file (a2x, dblatex)
* text -- plain text file (a2x)
* xhtml -- XHTML file (a2x)
* dvi -- DVI file (a2x, dblatex)
* ps -- PostScript file (a2x, dblatex)
* tex -- TeX file (a2x, dblatex).
-o, --output=FILENAME
Output file. By default - usecasemaker_filename with
appropriate extension
-v, --verbose
Verbosely print processing information
-t, --title
Title of AsciiDoc document. Used only in case when
--title-char consists of two characters.
Default: usecasemaker_filename without extension
Notes:
* To build a separate document default --title-char
must be used. This option is useful when
including Use Cases in another AsciiDoc file
as a section.
* When using --format other than asciidoc,
the AsciiDoc must be installed (asciidoc and a2x
executables must be available).
License:
Copyright (c) 2009, David Avsajanishvili
The tool is released under modified BSD license
See Also:
* UseCaseMaker: http://use-case-maker.sourceforge.net
* AsciiDoc: http://www.methods.co.nz/asciidoc/index.html
"""
command = os.path.split(argv[0])[1]
params = {}
cpt_char = None
verbose = False
format = 'asciidoc'
#Extract options
try:
opts, args = getopt.getopt(
argv[1:],
"c:f:o:vt:",
["title-char=", "format=",
"output=", "verbose", "title="])
infile = unicode(args[0],"utf8")
outfile = None
title = outfile
except getopt.GetoptError, err:
print main.__doc__ % locals()
print "Error: %s" % err
return -2
except IndexError, err:
print main.__doc__ % locals()
print "Error: File not specified."
return -2
for o, a in opts:
if o in ("-c", "--title-char"):
a = unicode(a,"utf8").strip()
if len(a) > 1:
params['cpt_char'] = a[0]
params['title_char'] = a[1]
else:
params['cpt_char'] = ''
params['title_char'] = a
elif o in ("-v", "--verbose"):
verbose = True
elif o in ("-o", "--output"):
outfile = unicode(a,"utf8")
elif o in ("-f", "--format"):
format = unicode(a,"utf8")
elif o in ("-t", "--title"):
title = unicode(a,"utf8")
params['cpt_title'] = title
outfile = outfile or u"%s.%s" % (
os.path.splitext(os.path.split(infile)[1])[0],
{'asciidoc':'asciidoc',
'docbook':'xml',
'html':'html'}.get(format,'xml'))
# Deserialize
serializer = XmlSerializer()
serializer.silent = not verbose
U = serializer.deserialize(infile, ucm_xmls.UCMDocument)
# Convert Model to ASCIIDOC
ret = TOP_COMMENT
ret = ret + ucmodel_to_asciidoc(U, **params)
if format == 'asciidoc':
# Write ASCIIDOC
f = open(outfile, "w")
f.write(codecs.encode(ret,"utf8"))
f.close()
else:
# Generate ASCIIDOC
try:
(f1,f2) = os.popen4('asciidoc -b%s -o"%s" %s -' %
(
('xhtml11' if format=='html' else 'docbook'),
outfile,
'--verbose' if verbose else '',
)
)
f1.write(codecs.encode(ret,"utf8"))
f1.close()
print f2.read()
finally:
if f1: f1.close()
if f2: f2.close()
if format in ['chunked','htmlhelp','manpage','pdf','text','xhtml','dvi','ps','tex']:
os.system('a2x -f%s -s %s "%s"' %
(
format,
'--verbose' if verbose else '',
outfile
)
)
| {
"repo_name": "avsd/ucm2asciidoc",
"path": "ucm2asciidoc/xmls2asciidoc.py",
"copies": "1",
"size": "8090",
"license": "bsd-3-clause",
"hash": 3537896486285629400,
"line_mean": 26.8006872852,
"line_max": 94,
"alpha_frac": 0.4896168109,
"autogenerated": false,
"ratio": 3.6739327883742052,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9617763583648458,
"avg_score": 0.009157203125149534,
"num_lines": 291
} |
"""
Helper package to make Python scripts with
command-line options, converting database table contents
to ASCIIDOC table.
Connects to database specified in the Conneciton string
and prints containment of the table
in ASCIIDOC format.
Requires Python 2.6 and cx_Oracle
to be installed on the workstation
"""
import sys, os, getopt, codecs
def get_table(sql, connstr, nls_lang=None):
"""
Retrieves data from table and returns it as list
"""
###########################################
# cx_Oracle module is needed for specified
# version of Oracle (9,10 or 11).
# It could be downloaded from SourceForge:
#
# http://cx-oracle.sourceforge.net/
#
# For more help see:
# http://www.orafaq.com/wiki/Python
###########################################
try:
import cx_Oracle
except:
raise ImportError("""Required cx_Oracle module not found.
The module could be obtained from here: http://cx-oracle.sourceforge.net/
See also: http://www.orafaq.com/wiki/Python""")
# if NLS_LANG is defined, set it to environment variable
if nls_lang:
import os
os.environ["NLS_LANG"] = nls_lang
connection = cx_Oracle.connect(connstr)
cursor = connection.cursor()
cursor.execute(sql)
ret = cursor.fetchall()
cursor.close()
connection.close()
if nls_lang:
try:
enc = nls_lang.split('.')[-1]
ret2 = [tuple(
[isinstance(b,basestring) and unicode(b,enc) or b
for b in a]
)for a in ret]
except LookupError:
return ret
ret = ret2
return ret
def make_asciidoc(dct):
"""
Returns contents as rows of a table
in asciidoc format
"""
if not dct:
return ""
ret = ""
for row in dct:
for cell in row:
ret += "|%s " % unicode(cell or "").replace(r"|",r"\|")
ret += "\n"
return ret
def main(argv):
"""
%(command)s - Prints ASCIIDOC of table contents from Oracle database
that's connection and SQL passed as command-line in argv.
Usage:
%(command)s [options] sql_command
Options:
-c, --connection-string=CONNSTRING
Connection string to connect to Oracle DB, mandatory.
-h, --help
Display this help message.
-n, --nls
NLS language definition (for example, "AMERICAN_AMERICA.UTF8")
-o, --output=FILENAME
Output file. If not specified, goes to standard
output (stdout).
-v, --verbose
Write detailed information to stderr.
"""
def log_error(s):
sys.stderr.write(s)
sys.stderr.write('\n')
def log(s):
pass
command = os.path.split(argv[0])[1]
params = {}
cpt_char = None
#Extract options
try:
opts, args = getopt.getopt(
argv[1:],
"n:o:c:vh",
["output=", "connection-string=", "verbose", "help", "nls="])
sql = args and " ".join(args) or None
connstr = None
outfile = None
nls = None
except getopt.GetoptError, err:
print main.__doc__ % locals()
print "Error: %s" % err
return -2
except IndexError, err:
print main.__doc__ % locals()
print "Error: SQL not specified."
return -2
for o, a in opts:
if o in ("-v", "--verbose"):
log = log_error
elif o in ("-o", "--output"):
outfile = a
elif o in ("-n", "--nls"):
nls = a
elif o in ("-c", "--connection-string"):
connstr = a
elif o in ("-h", "--help"):
print main.__doc__ % locals()
return 0
log("Generating ASCIIDOC from Oracle table")
log("=====================================")
if not connstr:
log_error("Oracle connection string not specified!")
return -2
try:
# Get SQL
if not sql:
sql = sys.stdin.read()
sys.stdin.close()
# Get data from Oracle
log("Executing script: \n\t%s" % sql)
ctnt = get_table(sql, connstr, nls)
# Generate
log("Generating ASCIIDOC...")
ret = make_asciidoc(ctnt)
# Write ASCIIDOC
log("Writing file %s ..." % (outfile or 'stdout'))
f = outfile and open(outfile, "w") or sys.stdout
f.write(codecs.encode(ret,"utf8"))
f.close()
log("Done!")
except Exception,err:
log_error("Error: %s\n")
raise
log("")
return 0
| {
"repo_name": "avsd/sql2asciidoc",
"path": "sql2asciidoc/oracle2asciidoc.py",
"copies": "1",
"size": "4986",
"license": "bsd-3-clause",
"hash": -8306055356921918000,
"line_mean": 24.6631016043,
"line_max": 77,
"alpha_frac": 0.5038106699,
"autogenerated": false,
"ratio": 4.110469909315746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010117214191160481,
"num_lines": 187
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.