text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
import click
import langml
@click.group()
@click.version_option(version=langml.__version__)
def cli():
"""LangML client"""
pass
def main():
from langml.baselines.cli import baseline
cli.add_command(baseline)
cli(prog_name='langml', obj={})
|
from functools import reduce
def score(motifs):
return map(reduce(lambda score, cur: score + cur, motifColumn), rotate(motifs))
def score(motifs):
return map(reduce(
lambda columnScore, nucleotide: columnScore.addToScore(nucleotide),
rotate(motifs), MotifColumnScore()), rotate(motifs))
# Rotates the motifs 90 degrees
def rotate(motifs):
tmp = [[]]
for i in range(len(motifs[0])):
for k in range(len(motifs)):
tmp[k][i] = motifs[k][i]
class MotifColumnScore:
score = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
def addToScore(nucleotide):
if hasattr(score, nucleotide):
score[nucleotide] += 1
else:
raise Exception("invalid nucleotide", nucleotide)
# find mininmum distance kmer in string
def minDistance(pattern, string):
distances = []
for i in len(string):
currentMer = string[i:len(pattern)]
distances[i] = distance(currentMer)
return min(distances)
def medianString(k, patterns):
|
class Solution:
def maxVowels(self, s: str, k: int) -> int:
window_start = 0
vowels = ['a','e','i','o','u']
max_vow = 0
count = 0
for window_end in range(len(s)):
if s[window_end] in vowels:
count+=1
if window_end >=k-1:
max_vow = max(max_vow, count)
if s[window_start] in vowels:
count-=1
window_start +=1
return max_vow
|
import numpy as np
import os
from matplotlib import pyplot as plt
error_list=np.genfromtxt("error_result", dtype=np.float64).reshape((-1,))
e_gap=np.genfromtxt("label_of_qe", dtype=np.float64).reshape((-1,))
e_pre=error_list+e_gap[:28600]
plt.scatter(e_gap[:25725],e_pre[:25725], s=3,marker="o",
alpha=0.7, label = 'train')
plt.scatter(e_gap[25725:28600],e_pre[25725:],s=3,marker="^",
alpha=0.7, label = 'text')
plt.xlabel('Egap(DFT)')
plt.ylabel('Egap(predicted)')
plt.legend('train test')
plt.show()
data=np.genfromtxt("ee.txt", dtype=np.float64)
plt.scatter(np.arange(80),data[:,1].T, s=20,marker="o",
alpha=0.7, label = 'train')
plt.scatter(np.arange(80),data[:,2].T,s=20,marker="^",
alpha=0.7, label = 'text')
plt.xlabel('epoch')
plt.ylabel('MAE')
plt.legend('train test')
plt.show()
|
# Generated by Django 3.2.8 on 2021-11-01 18:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dastugo_school', '0003_alter_contact_options'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='mesage',
new_name='message',
),
]
|
from icalendar import Calendar, Event
from django.conf import settings
from datetime import datetime, timedelta
def create_google_cal_link(event):
time_fmt = '%Y%m%dT%H%M%SZ'
event_url = ('https://www.google.com/calendar/render?action=TEMPLATE&text={text}'
'&dates={start_time}/{end_time}'
'&details={details}'
'&location={location}'
'&sf=true&output=xml').format(text=event.name.strip(),
start_time=event.start_time.strftime(time_fmt),
end_time=(event.start_time + timedelta(hours=2)).strftime(time_fmt),
details=event.description.strip(),
location=event.location_name.strip())
return event_url.replace(' ', '+')
def create_ics_output(event):
cal = Calendar()
cal.add('version', '2.0')
cal.add('prodid', '-//NYC Councilmatic//nyc.councilmatic.org//EN')
cal.add('x-wr-timezone', "US/Eastern")
cal.add('method', 'publish')
event_ics = Event()
event_ics.add('summary', event.name)
event_ics.add('location', event.location_name)
event_ics.add('description', event.description)
event_ics.add('dtstart', event.start_time)
event_ics.add('dtend', (event.start_time + timedelta(hours=2)))
event_ics.add('dtstamp', event.start_time)
event_ics.add('uid', event.ocd_id)
cal.add_component(event_ics)
return cal.to_ical()
|
# Generated by Django 2.1.7 on 2019-04-02 01:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translate', '0004_auto_20190327_0915'),
]
operations = [
migrations.AddField(
model_name='file',
name='file_session_key',
field=models.CharField(default='', max_length=40, verbose_name='Session Key'),
),
]
|
grocery_item = {} # defines empty dictionary
grocery_history = [] # defines empty list
stop = 'go'
while stop != 'q' : # creates the while loop
item_name = input('Item name: ') # accepts the item name
quantity = input('Quantity purchased: ') # accepts quantity of items purchased
cost = input('Price per item: ') # accepts price per item
grocery_item['name'] = item_name
grocery_item['number'] = int(quantity)
grocery_item['price'] = float(cost) # creates each variablefor the dictionary
grocery_history.append(grocery_item.copy()) # adds grocery items to the list
stop = input("Would you like to enter another item? \nType 'c' for continue or 'q' to quit: ") # creates input for user to continue or stop their grocery list
grand_total = 0 # creates the grand total variable
for index, item in enumerate(grocery_history): # creates the for loop
item_total = item['number'] * item['price'] # creates the equation for the price of x number of items
grand_total += item_total # adds our item prices to our grand total
print('%d %s @ $%.2f ea $%.2f' % (item['number'], item['name'], item['price'], item_total)) # outputs the user inputs into the grocery list
item_total = 0 # sets the initial item total to 0 with no inputs
print('Grand total: $%.2f' % grand_total) # prints the grand total of all inputs
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9a1 on 2015-11-01 04:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='moderated',
field=models.BooleanField(default=True, help_text='Указывает, что пользователь прошёл модерацию.', verbose_name='Прошёл модерацию'),
),
]
|
#coding=utf8
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import time
import uuid
from items import TitleItem, TitleGroup
import libs.hound
import config
from bs4 import BeautifulSoup
from pymongo import MongoClient
from scrapy import log
import json
import httplib, urllib
import urlparse
import time
class FilterPipeline(object):
def process_item(self, item, spider):
#filter the invalid item
toRemove = []
for t in item["titles"]:
if len(t["title"])==0:
toRemove.append(t)
for rm in toRemove:
item["titles"].remove(rm)
return item
class UrlFullFillPipeline(object):
def process_item(self, item, spider):
for t in item["titles"]:
if not urlparse.urlparse(t["url"]).scheme:
t["url"] = urlparse.urljoin(item["siteurl"], t["url"])
return item
class DuplicatedPipeline(object):
def process_item(self, item, spider):
log.msg("procesing deplicatedpipeline..............", level=log.INFO)
siteurl = item['siteurl']
mongo = MongoClient()
sitename = mongo.siteDb.siteCol.find_one({"siteurl":siteurl})["sitename"]
print "sitename:",sitename
print "type:",type(sitename)
print "after:",type(sitename.encode("utf-8"))
db = mongo.spiderDb
find = db.snapshotCol.find_one({"siteurl":siteurl}, {"_id":0, "titles":1}) or {"titles":[]}
titlesfromdb = [piece["title"] for piece in find["titles"]]
titlesfromspi = [piece["title"] for piece in item["titles"]]
#newItems: ["title"]
newItems = list(set(titlesfromspi)-set(titlesfromdb))
log.msg("----------get new:%d"%len(newItems), level=log.INFO)
if len(newItems)>0:
#if any new found, update db to the new
db.snapshotCol.update({"siteurl":siteurl}, {"$set":{"updatetime":time.time()}}, True)
db.snapshotCol.update({"siteurl":siteurl}, {"$set":{"titles":item["titles"]}}, True)
db.crawltitleCol.update({"siteurl":siteurl}, {"$set":{"updatetime":time.time()}}, True)
#insert the newitems into db
for piece in newItems:
for piece2 in item["titles"]:
if piece2["title"] == piece:
db.crawltitleCol.update({"siteurl":siteurl}, {"$push":{"titles":{"title":piece, "url":piece2["url"], "isnew":True}}}, True)
break
# send a new items msg to server
conn = httplib.HTTPConnection("localhost:8888")
try:
parms = urllib.urlencode({"sitename":sitename,"siteurl":siteurl,"count":len(newItems)})
conn.request("GET","/message/new_message?%s"%parms)
resp = conn.getresponse()
log.msg("send to server:%d, get:%s"%(resp.status,resp.read()))
except:
log.msg("-------send update message to server error!!!", level=log.INFO)
finally:
conn.close()
mongo.close()
return item
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class wizard_multi_charts_accounts(osv.osv_memory):
_name='wizard.multi.charts.accounts'
_inherit = 'wizard.multi.charts.accounts'
_columns = {
'currency_use_ids': fields.many2many(string='Currency', obj='res.currency', rel='l10n_curency_wizard_rel', id1='wizard_id', id2='currency_id', required=True),
'bank_account_ids': fields.one2many(obj='account.set_bank_account', fields_id='wizard_id', string='Set Bank Account'),
'cash_account_ids': fields.one2many(obj='account.set_cash_account', fields_id='wizard_id', string='Set Cash Account'),
}
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
tax_templ_obj = self.pool.get('account.tax.template')
if 'bank_accounts_id' in fields:
res.update({'bank_accounts_id': [{'acc_name': _('Cash'), 'account_type': 'cash'},{'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0].company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company_obj = self.pool.get('res.company')
country_id = company_obj.browse(cr, uid, company_id, context=context).country_id.id
currency_id = company_obj.on_change_country(cr, uid, company_id, country_id, context=context)['value']['currency_id']
res.update({'currency_id': currency_id})
ids = self.pool.get('account.chart.template').search(cr, uid, [('visible', '=', True),('name', '=', 'Basic Indonesian Chart of Account')], context=context)
if ids:
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(ids) == 1, 'chart_template_id': ids[0]})
if 'sale_tax' in fields:
sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id"
, "=", ids[0]), ('type_tax_use', 'in', ('sale','all'))], order="sequence")
res.update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False})
if 'purchase_tax' in fields:
purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id"
, "=", ids[0]), ('type_tax_use', 'in', ('purchase','all'))], order="sequence")
res.update({'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.update({
'purchase_tax_rate': 10.0,
'sale_tax_rate': 10.0,
})
return res
def _load_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, account_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function generates all the objects from the templates
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_acc_tax = self.pool.get('account.tax')
obj_tax_temp = self.pool.get('account.tax.template')
obj_acc_template = self.pool.get('account.account.template')
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
# create all the tax code.
tax_code_ref.update(obj_tax_code_template.generate_tax_code(cr, uid, template.tax_code_root_id.id, company_id, context=context))
# Generate taxes from templates.
tax_templates = [x for x in template.tax_template_ids]
generated_tax_res = obj_tax_temp._generate_tax(cr, uid, tax_templates, tax_code_ref, company_id, context=context)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = obj_acc_template.generate_account(cr, uid, template_id, taxes_ref, account_ref, code_digits, company_id, context=context)
account_ref.update(account_template_ref)
# writing account values on tax after creation of accounts
for key,value in generated_tax_res['account_dict'].items():
if value['account_collected_id'] or value['account_paid_id']:
obj_acc_tax.write(cr, uid, [key], {
'account_collected_id': account_ref.get(value['account_collected_id'], False),
'account_paid_id': account_ref.get(value['account_paid_id'], False),
})
if template.name <> 'Basic Indonesian Chart of Account':
# Create Journals
self.generate_journals(cr, uid, template_id, account_ref, company_id, context=context)
# generate properties function
self.generate_properties(cr, uid, template_id, account_ref, company_id, context=context)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
obj_fiscal_position_template.generate_fiscal_position(cr, uid, template_id, taxes_ref, account_ref, company_id, context=context)
return account_ref, taxes_ref, tax_code_ref
def execute(self, cr, uid, ids, context=None):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the tax codes, the
accounting properties... accordingly for the chosen company.
'''
obj_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
obj_wizard = self.browse(cr, uid, ids[0])
company_id = obj_wizard.company_id.id
self.pool.get('res.company').write(cr, uid, [company_id], {'currency_id': obj_wizard.currency_id.id})
# When we install the CoA of first company, set the currency to price types and pricelists
if company_id==1:
for ref in (('product','list_price'),('product','standard_price'),('product','list0'),('purchase','list0')):
try:
tmp2 = obj_data.get_object_reference(cr, uid, *ref)
if tmp2:
self.pool.get(tmp2[0]).write(cr, uid, tmp2[1], {
'currency_id': obj_wizard.currency_id.id
})
except ValueError, e:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(cr, uid, obj_wizard, company_id, context=context)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref, tax_code_ref = self._install_template(cr, uid, obj_wizard.chart_template_id.id, company_id, code_digits=obj_wizard.code_digits, obj_wizard=obj_wizard, context=context)
# write values of default taxes for product as super user
if obj_wizard.sale_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.product', "taxes_id", [taxes_ref[obj_wizard.sale_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.purchase_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.product', "supplier_taxes_id", [taxes_ref[obj_wizard.purchase_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.chart_template_id.name <> 'Basic Indonesian Chart of Account':
# Create Bank journals
self._create_bank_journals_from_o2m(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
else:
self.create_currency_account(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
self.create_cash_account(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
self.create_bank_account(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
return {}
def create_currency_account(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
obj_account = self.pool.get('account.account')
obj_account_template = self.pool.get('account.account.template')
code_digits = obj_wizard.code_digits
kriteria_account = ['Account Receivable',
'Advance Payment For Purchase',
'Deposit For Purchase',
'Trade Payable',
'Trade Payable Import',
'Purchase Discont',
'Purchase Return',
'Advance Payment For Sales',
'Deposit For Sales',
'Sales Revenue',
'Sales Return',
'Sales Price Different',
'Sales Discount'
]
if obj_wizard.currency_use_ids:
kriteria = [('name', 'in', kriteria_account)]
account_template_ids = obj_account_template.search(cr, uid, kriteria)
for account_template in obj_account_template.browse(cr, uid, account_template_ids):
current_num = 1
for currency in obj_wizard.currency_use_ids:
check = 0
while check == 0:
new_code = str(account_template.code.ljust(code_digits-len(str(current_num)), '0')) + '0' + str(current_num)
kriteria_check_new_code = [('code', '=', new_code)]
check_new_code_ids = obj_account_template.search(cr, uid, kriteria_check_new_code)
if not check_new_code_ids:
check += 1
else:
current_num += 1
kriteria_type = [('parent_id', '=', account_template.id)]
account_idr_ids = obj_account_template.search(cr, uid, kriteria_type)
account_idr = obj_account_template.browse(cr, uid, account_idr_ids)[0]
vals = {
'code' : new_code,
'name': account_template.name + ' ' + currency.name,
'user_type': account_idr.user_type.id,
'type': account_idr.type,
'currency_id': currency.id,
'parent_id' : acc_template_ref[account_template.id],
}
obj_account.create(cr, uid, vals, context=context)
current_num += 1
return True
def create_cash_account(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
obj_account = self.pool.get('account.account')
obj_account_template = self.pool.get('account.account.template')
code_digits = obj_wizard.code_digits
obj_data = self.pool.get('ir.model.data')
kriteria_account = ['Cash']
if obj_wizard.currency_use_ids:
kriteria = [('name', 'in', kriteria_account)]
account_template_ids = obj_account_template.search(cr, uid, kriteria)
for account_template in obj_account_template.browse(cr, uid, account_template_ids):
current_num = 1
for cash_account in obj_wizard.cash_account_ids:
check = 0
while check == 0:
new_code = str(account_template.code.ljust(code_digits-len(str(current_num)), '0')) + '0' + str(current_num)
kriteria_check_new_code = [('code', '=', new_code)]
check_new_code_ids = obj_account_template.search(cr, uid, kriteria_check_new_code)
if not check_new_code_ids:
check += 1
else:
current_num += 1
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_cash')
cash_type = tmp and tmp[1] or False
vals = {
'code' : new_code,
'name': cash_account.name,
'user_type': cash_type,
'type': 'liquidity',
'currency_id': cash_account.currency_id and cash_account.currency_id.id or False,
'parent_id' : acc_template_ref[account_template.id],
}
#raise osv.except_osv(_('Error !'), _('%s')%vals)
obj_account.create(cr, uid, vals, context=context)
current_num += 1
return True
def create_bank_account(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
obj_account = self.pool.get('account.account')
obj_account_template = self.pool.get('account.account.template')
code_digits = obj_wizard.code_digits
obj_data = self.pool.get('ir.model.data')
kriteria_account = ['Bank']
if obj_wizard.currency_use_ids:
kriteria = [('name', 'in', kriteria_account)]
account_template_ids = obj_account_template.search(cr, uid, kriteria)
for account_template in obj_account_template.browse(cr, uid, account_template_ids):
current_num = 1
for bank_account in obj_wizard.bank_account_ids:
check = 0
while check == 0:
new_code = str(account_template.code.ljust(code_digits-len(str(current_num)), '0')) + '0' + str(current_num)
kriteria_check_new_code = [('code', '=', new_code)]
check_new_code_ids = obj_account_template.search(cr, uid, kriteria_check_new_code)
if not check_new_code_ids:
check += 1
else:
current_num += 1
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_bank')
bank_type = tmp and tmp[1] or False
vals = {
'code' : new_code,
'name': bank_account.name,
'user_type': bank_type,
'type': 'liquidity',
'currency_id': bank_account.currency_id and bank_account.currency_id.id or False,
'parent_id' : acc_template_ref[account_template.id],
}
#raise osv.except_osv(_('Error !'), _('%s')%vals)
obj_account.create(cr, uid, vals, context=context)
current_num += 1
return True
wizard_multi_charts_accounts()
|
import folium
import pandas as pd
from jobs.routes import *
from jobs import db
NY_COORDINATES = (40.7128, -74.0060)
job_locations = lat_long_trace()
# create empty map zoomed in on New York
ny_map = folium.Map(location=NY_COORDINATES,tiles = 'Stamen Toner', zoom_start=12)
def map_job_locations():
for item in job_locations:
popup = folium.Popup(agencies_per_location([item['lat'], item['lng']]), parse_html=True)
marker = folium.Marker(location = [item['lat'], item['lng']], popup = popup)
marker.add_to(ny_map)
return ny_map
location_map = map_job_locations()
location_map.save('NYC_Jobs_Locations.html')
# add a marker for every record in the filtered data, use a clustered view
# for item in job_locations:
# map.simple_marker(location = [item['lat'], item['lng'])
#
# map.create_map(path='ny_map.html')
# display(map)
|
import numpy as np
np.set_printoptions(threshold=np.inf)
from scipy import ndimage
from skimage.morphology import thin
import matplotlib.pyplot as plt
from skimage.morphology import skeletonize,binary_closing
import mahotas as mh
import math
from utilities import bcolors
class EnergyCalibrator:
def __init__(self,params,debugmode=False):
self.p0 = params['p0']
self.p1 = params['p1']
self.p2 = params['p2']
self.p3 = params['p3']
self.p4 = params['p4']
self.norm = params['norm']
self.xscale = params['xscale']
self.noiseThreshold = params['noiseThr']
self.sliceRadius = params['sliceRadius']
self.length = -1
self.debug = debugmode
def getClusterMatrix(self,hits):
xs = [x[0] for x in hits]
ys = [x[1] for x in hits]
xmin = int(min(xs)); xmax = int(max(xs))
ymin = int(min(ys)); ymax = int(max(ys))
data = np.zeros((int(xmax-xmin),int(ymax-ymin)), dtype=float)
for x,y,z in hits:
data[int(x-xmin-1),int(y-ymin-1)] = z
return data
def branchedPoints(self,skel):
branch1=np.array([[2, 1, 2], [1, 1, 1], [2, 2, 2]])
branch2=np.array([[1, 2, 1], [2, 1, 2], [1, 2, 1]])
branch3=np.array([[1, 2, 1], [2, 1, 2], [1, 2, 2]])
branch4=np.array([[2, 1, 2], [1, 1, 2], [2, 1, 2]])
branch5=np.array([[1, 2, 2], [2, 1, 2], [1, 2, 1]])
branch6=np.array([[2, 2, 2], [1, 1, 1], [2, 1, 2]])
branch7=np.array([[2, 2, 1], [2, 1, 2], [1, 2, 1]])
branch8=np.array([[2, 1, 2], [2, 1, 1], [2, 1, 2]])
branch9=np.array([[1, 2, 1], [2, 1, 2], [2, 2, 1]])
br1=mh.morph.hitmiss(skel,branch1)
br2=mh.morph.hitmiss(skel,branch2)
br3=mh.morph.hitmiss(skel,branch3)
br4=mh.morph.hitmiss(skel,branch4)
br5=mh.morph.hitmiss(skel,branch5)
br6=mh.morph.hitmiss(skel,branch6)
br7=mh.morph.hitmiss(skel,branch7)
br8=mh.morph.hitmiss(skel,branch8)
br9=mh.morph.hitmiss(skel,branch9)
return br1+br2+br3+br4+br5+br6+br7+br8+br9
def endPoints(self,skel):
endpoint1=np.array([[0, 0, 0],
[0, 1, 0],
[2, 1, 2]])
endpoint2=np.array([[0, 0, 0],
[0, 1, 2],
[0, 2, 1]])
endpoint3=np.array([[0, 0, 2],
[0, 1, 1],
[0, 0, 2]])
endpoint4=np.array([[0, 2, 1],
[0, 1, 2],
[0, 0, 0]])
endpoint5=np.array([[2, 1, 2],
[0, 1, 0],
[0, 0, 0]])
endpoint6=np.array([[1, 2, 0],
[2, 1, 0],
[0, 0, 0]])
endpoint7=np.array([[2, 0, 0],
[1, 1, 0],
[2, 0, 0]])
endpoint8=np.array([[0, 0, 0],
[2, 1, 0],
[1, 2, 0]])
ep1=mh.morph.hitmiss(skel,endpoint1)
ep2=mh.morph.hitmiss(skel,endpoint2)
ep3=mh.morph.hitmiss(skel,endpoint3)
ep4=mh.morph.hitmiss(skel,endpoint4)
ep5=mh.morph.hitmiss(skel,endpoint5)
ep6=mh.morph.hitmiss(skel,endpoint6)
ep7=mh.morph.hitmiss(skel,endpoint7)
ep8=mh.morph.hitmiss(skel,endpoint8)
ep = ep1+ep2+ep3+ep4+ep5+ep6+ep7+ep8
return ep
def pruning(self,skeleton, size):
'''remove iteratively end points "size"
times from the skeleton
'''
for i in range(0, size):
endpoints = self.endPoints(skeleton)
endpoints = np.logical_not(endpoints)
skeleton = np.logical_and(skeleton,endpoints)
return skeleton
def points_in_circle_np(self, radius, x0=0, y0=0):
x_ = np.arange(x0 - radius - 1, x0 + radius + 1, dtype=int)
y_ = np.arange(y0 - radius - 1, y0 + radius + 1, dtype=int)
x, y = np.where((np.hypot((x_-x0)[:,np.newaxis], y_-y0)<= radius))
points = []
for x, y in zip(x_[x], y_[y]):
points.append((x, y))
return points
def uncalibIntegral(self,hits):
return sum([h[2] for h in hits])
def density(self, sliceOfClu):
nhits = len([h for h in sliceOfClu if h[2]>self.noiseThreshold])
integral = max(sum([h[2] for h in sliceOfClu]),0)
return integral/nhits if nhits>0 else 0
def saturationFactorNLO(self,density):
## this gives eV/ph
if density<=0: # protection for the formula below
ret = 0.85 # seems to provide some continuity
else:
x = density/self.xscale
ret = (self.p3 + self.p4*x)/(self.p0 * (1-math.exp(-1*(math.pow(x,self.p2)/self.p1))))/self.norm
return ret
def calibratedEnergy(self,hits):
slices,centers = self.getSlices(hits)
integrals = [max(0.,sum([h[2] for h in sl])) for sl in slices]
densities = [self.density(sl) for sl in slices]
## the energy is now in keV
calibSlicesEnergy = [self.saturationFactorNLO(densities[sl]) * integrals[sl] / 1000. for sl in range(len(densities))]
calibEnergy = sum(calibSlicesEnergy)
if self.debug:
print (bcolors.OKBLUE + "Slices bare sum = {bsum:.1f}".format(bsum=sum(integrals)) + bcolors.ENDC)
print ("Slices integral = " + ', '.join('{:.1f}'.format(i) for i in integrals))
print ("Slices densities = " + ', '.join('{:.1f}'.format(i) for i in densities))
print ("Slices calib energy = " + ', '.join('{:.1f}'.format(i) for i in calibSlicesEnergy))
print ("Slices centers = " + ', '.join('({:.1f},{:.1f})'.format(i[0],i[1]) for i in centers))
print (bcolors.OKGREEN + "supercluster calibrated integral = {ene:.1f} keV".format(ene=calibEnergy) + bcolors.ENDC)
return calibEnergy,calibSlicesEnergy,centers
def getSlices(self,hits):
cluster_matrix = self.getClusterMatrix(hits) # this has x,y,z
cluster_img = cluster_matrix != 0 # this is the binary version to run the skeletonization
skeleton = thin(cluster_img) # this is the 1-pixel wide skeleton of the cluster
pruned = self.pruning(skeleton,10) # remove little branches
skel_points = np.column_stack(np.nonzero(pruned))
remaining_skel_points = [(point[0],point[1]) for point in skel_points] # simpler with an array of tuples
remaining_cluster = cluster_img
slices = []
slice_centers = []
while len(remaining_skel_points):
p = remaining_skel_points[-1]
clu_slice = []
circlepoints = self.points_in_circle_np(self.sliceRadius,p[0],p[1])
for cp in circlepoints:
ix = cp[0]; iy = cp[1];
if ix>=cluster_matrix.shape[0] or iy>=cluster_matrix.shape[1] or ix<0 or iy<0:
continue
z = cluster_matrix[ix,iy]
if remaining_cluster[ix,iy]:
clu_slice.append((ix,iy,z))
remaining_cluster[ix,iy] = False
# this includes the center and all the intersection of the circle with the skeleton
if cp in remaining_skel_points:
remaining_skel_points.remove(cp)
#remaining_skel_points = np.setdiff1d(remaining_skel_points,circlepoints)
slices.append(clu_slice)
slice_centers.append((p[0],p[1]))
#print ("slices ",slices)
#print ("Found ",len(slices)," slices")
# this is a better estimate of the length of a curved cluster (in pixels)
#self.length = len(skeleton) # this would be correct if there are no points ZS along the path. If there are many, then it's an underestimate
# approximate locally with the radius of the slice. It's ok for small enough slice radius
# N.B. use radius, not diameter, since the center is the first available point along the skeleton
self.length = len(slices)*self.sliceRadius
return slices,slice_centers
def clusterLength(self):
l = -999
if self.length<0:
print ("ERROR! You asked for EnergyCalibrator.length() before getting the calibrated energy, and this is not yet set!!")
else:
l = self.length
return l
from skimage import io
from skimage.util import img_as_ubyte
if __name__ == '__main__':
## this tests the calibrator with saved numpy array of one cluster
# load hits
hits = np.load('debug_code/supercluster3.npy')
filePar = open('modules_config/energyCalibrator.txt','r')
params = eval(filePar.read())
calibrator = EnergyCalibrator(params)
uncal = calibrator.uncalibIntegral(hits)
print ("Uncalibrated integral (photons) = ",uncal)
cal = calibrator.calibratedEnergy(hits)
print ("Calibrated energy (keV) = ",cal)
## this is to make example figures of the method
## note: morphology functions only work on gray-scale or binary images, so we set as_gray=True.
# image = img_as_ubyte(io.imread('pic_run02317_ev8_sc_3D.png', as_gray=True))
# print (type(image))
cluster_matrix = calibrator.getClusterMatrix(hits) # this has x,y,z
image = cluster_matrix != 0 # this is the binary version to run the skeletonization
# skeleton = skeletonize(image)
thinned = thin(image)
pruned = calibrator.pruning(thinned,10)
# #medial_axis = medial_axis(image)
fig, ax = plt.subplots(figsize=(10,10))
#ax[0].imshow(hits, cmap=plt.cm.gray)
#ax[0].set_title('original')
#ax[0].axis('off')
# ax[1].imshow(skeleton, cmap=plt.cm.gray)
# ax[1].set_title('skeleton')
# ax[1].axis('off')
#ax[0].imshow(thinned, cmap=plt.cm.gray)
#ax[0].set_title('thinned')
#ax[0].axis('off')
font = {'family': 'arial',
'color': 'black',
'weight': 'normal',
'size': 24,
}
ax.imshow(pruned, cmap=plt.cm.gray_r)
ax.set_title('supercluster axis',font,pad=40)
ax.invert_yaxis()
plt.xlabel('x (pixels)', font, labelpad=20)
plt.ylabel('y (pixels)', font, labelpad=20)
fig.tight_layout()
# plt.show()
for ext in ['pdf','png']:
plt.savefig('skeleton_paper.{ext}'.format(ext=ext))
|
from django.conf import settings
from rest_framework import serializers
from openbook_auth.models import User, UserProfile
from openbook_auth.validators import username_characters_validator, user_username_exists
from openbook_common.models import Badge
from openbook_common.serializers_fields.user import IsFollowingField, FollowListsField
from openbook_follows.models import Follow
from openbook_lists.models import List
from openbook_lists.validators import list_id_exists
class RequestToFollowUserSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
class ApproveUserFollowRequestSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
class RejectUserFollowRequestSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
class FollowUserRequestSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
lists_ids = serializers.ListSerializer(
required=False,
child=serializers.IntegerField(validators=[list_id_exists])
)
class UserProfileBadgeSerializer(serializers.ModelSerializer):
class Meta:
model = Badge
fields = (
'keyword',
'keyword_description'
)
class UserProfileSerializer(serializers.ModelSerializer):
badges = UserProfileBadgeSerializer(many=True)
class Meta:
model = UserProfile
fields = (
'name',
'avatar',
'badges'
)
class FollowUserListSerializer(serializers.ModelSerializer):
class Meta:
model = List
fields = (
'id',
'name',
)
class FollowUserSerializer(serializers.ModelSerializer):
is_following = IsFollowingField()
follow_lists = FollowListsField(list_serializer=FollowUserListSerializer)
class Meta:
model = User
fields = (
'id',
'username',
'is_following',
'follow_lists'
)
class FollowSerializer(serializers.ModelSerializer):
followed_user = FollowUserSerializer(many=False)
class Meta:
model = Follow
fields = (
'id',
'user',
'lists',
'followed_user',
)
class ReceivedFollowRequestsRequestSerializer(serializers.Serializer):
max_id = serializers.IntegerField(
required=False,
)
count = serializers.IntegerField(
required=False,
max_value=20
)
class DeleteFollowSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
class UpdateFollowSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=settings.USERNAME_MAX_LENGTH,
allow_blank=False,
validators=[
username_characters_validator,
user_username_exists
],
required=False
)
lists_ids = serializers.ListSerializer(
required=False,
child=serializers.IntegerField(validators=[list_id_exists])
)
|
import sys
import time
def text2word(filename):
with open(filename) as f:
word_list = [i for i in f.read().split()]
return word_list
#########################
# levenschtein distance #
#########################
def levenschtein(source, target):
source = ' '+source
target = ' '+target
n = len(source)
m = len(target)
# create a distance matrix
row = [0 for i in range(m)]
d = []
for i in range(n):
d.append(row[::])
# initialize the matrix
d[0] = [i for i in range(m)]
for i in range(n):
d[i][0] = i
# recurrence relation
for i in range(1,n):
for j in range(1,m):
up = d[i-1][j]
left = d[i][j-1]
up_left = d[i-1][j-1]
if source[i] == target[j]:
cost=0
else:
cost=1
d[i][j] = min(up + 1, left + 1, up_left+cost)
return d[n-1][m-1]
##################
# osa distance 2 #
##################
def osa(source, target):
source = ' '+source
target = ' '+target
n = len(source)
m = len(target)
# create a distance matrix
row = [0 for i in range(m)]
d = []
for i in range(n):
d.append(row[::])
# initialize the matrix
d[0] = [i for i in range(m)]
for i in range(n):
d[i][0] = i
# recurrence relation
for i in range(1,n):
for j in range(1,m):
up = d[i-1][j]
left = d[i][j-1]
up_left = d[i-1][j-1]
if source[i] == target[j]:
cost=0
else:
cost=1
d[i][j] = min(up + 1, left + 1, up_left+cost)
if i>1 and i>1 and source[i]==target[j-1] and source[
i-1]==target[j]:
d[i][j] = min(d[i][j], d[i-2][j-2]+cost)
return d[n-1][m-1]
####################
# damerau distance #
####################
def damerau(source, target):
n = len(source)+2
m = len(target)+2
# a array of 24 zeros
letters = 'abcdefghijklmnopqrstuvwxyz'
count = [0 for i in range(24)]
da = {k:v for k,v in zip(letters, count)}
# create a distance matrix
row = [0 for i in range(m)]
d = []
for i in range(n):
d.append(row[::])
maxdist = len(source) + len(target)
# initialize the matrix
d[-1][-1] = maxdist
for i in range(len(source)+1):
d[i][-1] = maxdist
d[i][0] = i
for j in range(len(source)+1):
d[-1][j] = maxdist
d[0][j] = j
# recurrence relation
for i in range(1,len(source)+1):
db = 0
for j in range(1, len(target)+1):
k = int(da[target[j-1]])
l = db
if source[i-1] == target[j-1]:
cost = 0
db = j
else:
cost = 1
up = d[i-1][j]
left = d[i][j-1]
up_left = d[i-1][j-1]
d[i][j] = min(up+1, left+1, up_left+cost,
d[k-1][l-1] + (i-k-1) + 1 + (j-l-1))
da[source[i-1]] = i
return d[len(source)][len(target)]
print(damerau('ca','abc')) # should be 2
# input_word is a list of possibly misspelled words
# right_word is a list of correct words
def correct_words(input_word, right_word, mode):
result = []
for i in input_word:
best_dist = 1000000
best_word = None
if i in right_word: result.append([i,i,0])
else:
for j in right_word:
if mode=='1':
dist = levenschtein(i, j)
elif mode=='2':
dist = osa(i, j)
elif mode=='3':
dist = damerau(i, j)
if dist < best_dist:
best_dist = dist
best_word = j
result.append([i, best_word, best_dist])
return result
def correct_words1(input_word, right_word, mode):
result = []
if mode=='1':
for i in input_word:
best_dist = 1000000
best_word = None
for j in right_word:
dist = levenschtein(i,j)
if dist<best_dist:
best_dist=dist
best_word=j
result.append([i, best_word, best_dist])
elif mode=='2':
for i in input_word:
best_dist = 1000000
best_word = None
for j in right_word:
dist = osa(i,j)
if dist<best_dist:
best_dist=dist
best_word=j
result.append([i, best_word, best_dist])
elif mode=='3':
for i in input_word:
best_dist = 1000000
best_word = None
for j in right_word:
dist = damerau(i,j)
if dist<best_dist:
best_dist=dist
best_word=j
result.append([i, best_word, best_dist])
return result
def main():
mode = sys.argv[1]
input = sys.argv[2]
word_dict = sys.argv[3]
output = sys.argv[4]
input_word = text2word(input)
right_word = text2word(word_dict)
##########
# Task 1 #
##########
if mode == '1':
start = time.time()
result = correct_words(input_word, right_word, mode)
end = time.time()
duration = end - start
##########
# Task 2 #
##########
elif mode == '2':
start = time.time()
result = correct_words(input_word, right_word, mode)
end = time.time()
duration = end - start
# write out to a file
with open(output,'w') as f:
for i in result:
f.write("{} {} {}".format(i[1], i[2], '\n'))
#f.write(str(duration))
if __name__ == '__main__':
main()
|
import argparse
import skrf as rf
from pathlib import Path
from scipy.interpolate import interp1d
from numpy import log10
parser = argparse.ArgumentParser(description = 'script to return the amplitude of a circuit at a given freqeuncy based on its measured S-paramter file')
parser.add_argument("file", help="The S-paramter file", type=Path)
parser.add_argument("-f", help="Frequency.", type=float)
parser.add_argument("-pin", help="Input port. Default is 1.", type=int, default=1)
parser.add_argument("-pout", help="Output port. Default is 2.", type=int, default=2)
parser.add_argument('-l','--linear', action='store_true', help="Prints the results in linear value. Otherwise the result is in dB")
args = parser.parse_args()
# skrf.Network can not accept a Path type as argument
nw = rf.Network(str(args.file.resolve()))
sp = abs(nw.s[:,(args.pout-1),(args.pin-1)])
f = interp1d(nw.f, sp, kind="cubic")
ret = f(args.f)
txt = "S%d%d: " % (args.pout, args.pin)
if args.linear:
print(txt + f"{ret:.2}")
else:
db = 20*log10(ret)
print(txt + f"{db:.2f} dB")
|
from django.test import TestCase
from requests.exceptions import HTTPError
from requests.models import Response
from generic_api.errors_handler import HttpErrorsHandler
class GenericErrorsHandlerTestCase(TestCase):
def setUp(self):
self.errors_handler = HttpErrorsHandler()
def test_response_valid(self):
response = Response()
response._content = b'{"success": 1}'
response.status_code = 200
self.assertEqual(self.errors_handler.validate(response), {'success': True})
def test_response_invalid(self):
response = Response()
response._content = b'{"success": 0}'
response.status_code = 400
with self.assertRaises(HTTPError):
self.errors_handler.validate(response)
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Time : 18-7-26 下午4:59
# @Author : viaeou
# @Site :
# @File : tensorboard_start.py
# @Software: PyCharm
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
with tf.name_scope('inputs'):
# define placeholder for inputs to network
train_x = tf.placeholder(tf.float32, shape=[None, 1], name='train_x')
train_y = tf.placeholder(tf.float32, shape=[None, 1], name='train_y')
def add_layer(inputs, in_size, out_size, activation_function=None):
with tf.name_scope('layer'):
with tf.name_scope('weights'):
weights = tf.Variable(
tf.random_normal([in_size, out_size]),
name='w'
)
with tf.name_scope('bias'):
biases = tf.Variable(
tf.zeros([1, out_size]) + 0.1,
name='b'
)
with tf.name_scope('add'):
add_res = tf.add(tf.matmul(inputs, weights), biases)
if activation_function is None:
outputs = add_res
else:
outputs = activation_function(add_res, )
return outputs
# add hidden layer
l1 = add_layer(train_x, 1, 10, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, activation_function=None)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(train_y - prediction),
reduction_indices=[1]))
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
writer = tf.summary.FileWriter("logs/", sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
# direct to the local dir and run this in terminal:
# $ tensorboard --logdir=logs
|
#!/usr/bin/python
def main():
n = input()
l = map(int, raw_input().split())
print sorted(l)[len(l)/2]
if __name__ == '__main__':
main()
|
from unittest import TestCase, main
from Utilities import StringOperations
class TestStringOperations(TestCase):
def test_getLastChar(self):
last_char = StringOperations.getLastChar("PBD-7541")
self.assertEqual("1", last_char)
def test_getLastChar_arg_not_string(self):
self.assertRaises(ValueError, StringOperations.getLastChar, 123)
def test_getDay(self):
sunday = StringOperations.getDay("04-03-2018")
self.assertEqual(7, sunday)
def test_getDay_invalid_arg(self):
self.assertRaises(ValueError, StringOperations.getDay, 2018)
def test_getDay_invalid_string(self):
self.assertRaises(ValueError, StringOperations.getDay, "day")
if __name__ == "__main__":
main()
|
storage = {}
def add(channel, message):
if channel in storage:
storage[channel].append(message)
return message
else:
storage[channel] = [message]
return message
def get(channel, fromDate=None, toDate=None):
if channel in storage:
return storage[channel]
else:
return None |
"""
Abstraction layer over an agent's neural network.
"""
from typing import Tuple
import torch
import torch.nn.functional as F
from model import ActorCritic
class ActorCriticAgent(object):
"""
Abstraction layer over an agent's neural network
"""
def __init__(self, model: ActorCritic, shared_model: ActorCritic):
self.model = model
self.shared_model = shared_model
self.model.load_state_dict(shared_model.state_dict())
def act(self,
state: torch.Tensor,
hidden_state: torch.Tensor) -> Tuple[int, torch.Tensor,
torch.Tensor, torch.Tensor]:
"""
Get an action, and some relevant information about how the action was
determined.
Returns: action, value, relevant_log_prob, entropy, hidden_state
"""
raw_probs, value, hidden_state = self.model(state, hidden_state)
probs = F.softmax(raw_probs, dim=1)
log_probs = F.log_softmax(raw_probs, dim=1)
entropy = -(log_probs * probs).sum(1, keepdim=True)
action = probs.multinomial(num_samples=1)
relevant_log_prob = log_probs.gather(1, action)
return action.item(), value, relevant_log_prob, entropy, hidden_state
|
#!/bin/python3
import sys
x1,v1,x2,v2 = input().strip().split(' ')
x1,v1,x2,v2 = [int(x1),int(v1),int(x2),int(v2)]
'''
def checker():
if x1 == x2:
return True
elif x1 < x2 and v1 < v2:
return False
elif x1 > x2 and v1 > v2:
return False
else:
return None
while True:
check = checker()
if check == True:
print("YES")
break
if check == False:
print("NO")
break
x1 += v1
x2 += v2
'''
if v2 == v1:
if x1 != x2:
print("NO")
else:
print("YES")
else:
n = (x1 - x2) / (v2 - v1)
if n.is_integer() and n > 0:
print("YES")
else:
print("NO")
|
#!/usr/bin/env python3
# UPS patcher based on UPS module for Rom Patcher JS v20180930 - Marc Robledo 2017-2018
# Author: MinN
import sys
import zlib
CHECKSUM_TARGET = True # check the target's checksum
CHECKSUM_PATCH = False # don't check the patch's checksum because NUPS BUG
class ChecksumError(Exception):
pass
def encode_vlv(data):
buffer = bytearray()
while True:
x = data & 0x7f
data = data >> 7
if data == 0:
buffer.append(0x80 | x)
break
buffer.append(x)
data -= 1
return buffer
def read_vlv(array, ptr):
data = 0
shift = 1
while True:
x = array[ptr]
ptr += 1
if x == 2 ** 32:
raise Exception('Can\'t read UPS VLV at 0x{:X}'.format(ptr-1))
data += (x & 0x7f) * shift
if (x & 0x80) != 0:
break
shift = shift << 7
data += shift
return data, ptr
def get_checksum(patch):
try:
with open(patch, "rb") as file:
patch_file = file.read()
checksum_input = int.from_bytes(patch_file[-12:-8], byteorder="little")
checksum_output = int.from_bytes(patch_file[-8:-4], byteorder="little")
checksum_patch = int.from_bytes(patch_file[-4:], byteorder="little")
return checksum_input, checksum_output, checksum_patch
except Exception:
return -1
def checksum(file):
with open(file, "rb") as f:
return zlib.crc32(f.read())
def patch_ups(source, patch, target):
with open(source, "rb") as file:
source_file = file.read()
with open(patch, "rb") as file:
patch_file = file.read()
checksum_input = int.from_bytes(patch_file[-12:-8], byteorder="little")
if checksum_input != zlib.crc32(source_file):
raise ChecksumError
checksum_patch = int.from_bytes(patch_file[-4:], byteorder="little")
if CHECKSUM_PATCH and checksum_patch != zlib.crc32(patch_file[:-4]):
raise ChecksumError
ptr = 4
size_input, ptr = read_vlv(patch_file, ptr)
size_output, ptr = read_vlv(patch_file, ptr)
target_file = bytearray(source_file).ljust(size_output, b'\0')
if len(source_file) != size_input:
raise ChecksumError
rom_offset = 0
while ptr < len(patch_file) - 12:
offset, ptr = read_vlv(patch_file, ptr)
diff = []
while patch_file[ptr] != 0:
diff.append(patch_file[ptr])
ptr += 1
ptr += 1
rom_offset += offset
for i in range(len(diff)):
target_file[rom_offset] = target_file[rom_offset] ^ diff[i]
rom_offset += 1
rom_offset += 1
checksum_output = int.from_bytes(patch_file[-8:-4], byteorder="little")
if CHECKSUM_TARGET and checksum_output != zlib.crc32(target_file):
raise ChecksumError
target_fd = open(target, "wb")
target_fd.write(target_file)
target_fd.close()
def make_ups(original, modified, target):
with open(original, "rb") as file:
original_file = file.read()
with open(modified, "rb") as file:
modified_file = file.read()
target_file = open(target, "wb")
po = 0
pm = 0
last_diff = 1
diff_list = []
while pm < len(modified_file):
b1 = original_file[po] if po < len(original_file) else 0
b2 = modified_file[pm]
po += 1
pm += 1
if b1 != b2:
curr_diff = pm
xor = bytearray()
while b1 != b2:
xor.append(b1 ^ b2)
if pm == len(modified_file):
break
b1 = original_file[po] if po < len(original_file) else 0
b2 = modified_file[pm]
po += 1
pm += 1
diff_list.append((curr_diff - last_diff, xor))
last_diff = curr_diff + len(xor) + 1
buffer = bytearray()
buffer += b"UPS1"
buffer += encode_vlv(len(original_file))
buffer += encode_vlv(len(modified_file))
for offset, xor in diff_list:
buffer += (encode_vlv(offset))
buffer += xor
buffer += b"\0"
buffer += zlib.crc32(original_file).to_bytes(4, byteorder="little")
buffer += zlib.crc32(modified_file).to_bytes(4, byteorder="little")
buffer += zlib.crc32(buffer).to_bytes(4, byteorder="little")
target_file.write(buffer)
target_file.close()
def help():
print("Commands:")
print("ups.py patch unmodified_rom patch_file target")
print("ups.py make unmodified_rom modified_rom target")
def main():
if len(sys.argv) < 4:
help()
elif sys.argv[1] == "patch":
patch_ups(sys.argv[2], sys.argv[3], sys.argv[4])
elif sys.argv[1] == "make" and len(sys.argv) >= 4:
make_ups(sys.argv[2], sys.argv[3], sys.argv[4])
else:
help()
if __name__ == '__main__':
main()
|
import dataset
import torch
import pickle
import pytorch_lightning as pt
from pytorch_lightning.trainer.supporters import CombinedLoader
import model
class MyDataModule(pt.LightningDataModule):
def __init__(self, vocab, vocab_size, csv_path, batch_size, batch_size_val, **kwargs):
super().__init__()
self.vocab = vocab
self.vocab_size = vocab_size
self.csv_path = csv_path
self.batch_size = batch_size
self.batch_size_val = batch_size_val
self.kwargs = kwargs
def setup(self, stage):
self.triplet_dataset_train = dataset.AmazonDataset(self.csv_path, self.vocab, self.vocab_size, 'train')
self.big_dataset_train = dataset.BigDataset(self.vocab, self.vocab_size, 'train')
self.triplet_dataset_val = dataset.AmazonDataset(self.csv_path, self.vocab, self.vocab_size, 'val')
self.big_dataset_val = dataset.BigDataset(self.vocab, self.vocab_size, 'test')
def train_dataloader(self):
triplet_train = torch.utils.data.DataLoader(self.triplet_dataset_train, shuffle=True,
batch_size=self.batch_size, **self.kwargs)
big_train = torch.utils.data.DataLoader(self.big_dataset_train, shuffle=True,
batch_size=2000, **self.kwargs)
loaders = {"triplet": triplet_train, "big": big_train}
combined_loaders = CombinedLoader(loaders, "max_size_cycle")
return combined_loaders
def val_dataloader(self):
triplet_val = torch.utils.data.DataLoader(self.triplet_dataset_val, shuffle=False,
batch_size=self.batch_size_val, **self.kwargs)
big_val = torch.utils.data.DataLoader(self.big_dataset_val, shuffle=False,
batch_size=2000, **self.kwargs)
loaders = {"triplet": triplet_val, "big": big_val}
combined_loaders = CombinedLoader(loaders, "max_size_cycle")
return combined_loaders
class MyValDataModule(pt.LightningDataModule):
def __init__(self, vocab, vocab_size, csv_path, batch_size, batch_size_val, **kwargs):
super().__init__()
self.vocab = vocab
self.vocab_size = vocab_size
self.csv_path = csv_path
self.batch_size_val = batch_size_val
self.kwargs = kwargs
def setup(self, stage):
self.triplet_dataset_val = dataset.AmazonDataset(self.csv_path, self.vocab, self.vocab_size, 'val')
self.big_dataset_val = dataset.BigDataset(self.vocab, self.vocab_size, 'test')
def val_dataloader(self):
triplet_val = torch.utils.data.DataLoader(self.triplet_dataset_val, shuffle=False,
batch_size=self.batch_size_val, **self.kwargs)
big_val = torch.utils.data.DataLoader(self.big_dataset_val, shuffle=False,
batch_size=2000, **self.kwargs)
loaders = {"triplet": triplet_val, "big": big_val}
combined_loaders = CombinedLoader(loaders, "max_size_cycle")
return combined_loaders
def get_data_args(parser):
# parser = argparse.ArgumentParser(description='Data Arguments')
parser.add_argument('--batch_size', type=int, default=30, help='batch size of the training')
parser.add_argument('--batch_size_val', type=int, default=100, help='batch size of the validation sets')
parser.add_argument('--csv_path', type=str, default='./Data/triplet_data.csv', help='path of the train/val data csv')
parser.add_argument('--dict_path', type=str, default='./Data/etm_amazonreviews_K_30_Htheta_800_Optim_adam_Clip_0.0_ThetaAct_relu_Lr_0.005_Bsz_1000_RhoSize_300_trainEmbeddings_0', help='path of the pretrained ETM')
parser.add_argument('--vocab_path', type=str, default='./Data/vocab.pkl', help='path of the overall corpus vocab, that the ETM was trained on')
parser.add_argument('--emb_path', type=str, default='./Data/embeddings.emb', help='path of the pretrained ETM embeddings')
parser.add_argument('--emb_np_path', type=str, default='./Data/embeddings.npz.npy', help='path of the preloaded pretrained ETM embeddings as a numpy file')
return parser
def get_vocab(vocab_path):
"""
Returns:
vocab (list): list of words in the corpus
vocab_size (int): size of the vocab of the corpus
"""
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
vocab_size = len(vocab)
return vocab, vocab_size
def generate_embeddings(vocab, vocab_size, device, save_path):
"""
Args:
vocab (list): list of words in the corpus
vocab_size (int): size of the vocab of the corpus
device (torch.device): device on which to perform computation
save_path (str): path where embedding numpy will get saved
"""
model.ETM.generate_embeddings(vocab, vocab_size, device, save_path)
def load_embeddings(emb_path, device):
"""
Args:
emb_path (str): path of embeddings numpy file
device (torch.device): device on which to perform computation
Return:
(tensor) pretrained embeddings
"""
return model.ETM.load_embeddings(emb_path, device)
def get_test_loader(vocab, vocab_size, csv_path, batch_size_test, **kwargs):
triplet_dataset_test = dataset.AmazonDataset(csv_path, vocab, vocab_size, 'val')
big_dataset_test = dataset.BigDataset(vocab, vocab_size, 'test')
triplet_test = torch.utils.data.DataLoader(triplet_dataset_test, shuffle=True,
batch_size=batch_size_test, **kwargs)
big_test = torch.utils.data.DataLoader(big_dataset_test, shuffle=False,
batch_size=2000, **kwargs)
loaders = {"triplet": triplet_test, "big": big_test}
combined_loaders = CombinedLoader(loaders, "min_size")
return combined_loaders
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float64
class JointPub(object):
def __init__(self):
self.publishers_array = []
self._pub_upperlegM1_joint_position = rospy.Publisher(
'/spotmini/head_upperlegM1_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_upperlegM2_joint_position = rospy.Publisher(
'/spotmini/head_upperlegM2_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_upperlegM3_joint_position = rospy.Publisher(
'/spotmini/head_upperlegM3_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_upperlegM4_joint_position = rospy.Publisher(
'/spotmini/head_upperlegM3_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_lowerlegM1_joint_position = rospy.Publisher(
'/spotmini/upperlegM1_lowerlegM1_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_lowerlegM2_joint_position = rospy.Publisher(
'/spotmini/upperlegM2_lowerlegM2_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_lowerlegM3_joint_position = rospy.Publisher(
'/spotmini/upperlegM3_lowerlegM3_joint_position_controller/command',
Float64,
queue_size=1)
self._pub_lowerlegM4_joint_position = rospy.Publisher(
'/spotmini/upperlegM3_lowerlegM3_joint_position_controller/command',
Float64,
queue_size=1)
self.publishers_array.append(self._pub_upperlegM1_joint_position)
self.publishers_array.append(self._pub_upperlegM2_joint_position)
self.publishers_array.append(self._pub_upperlegM3_joint_position)
self.publishers_array.append(self._pub_upperlegM4_joint_position)
self.publishers_array.append(self._pub_lowerlegM1_joint_position)
self.publishers_array.append(self._pub_lowerlegM2_joint_position)
self.publishers_array.append(self._pub_lowerlegM3_joint_position)
self.publishers_array.append(self._pub_lowerlegM4_joint_position)
def move_joints(self, joints_array):
i = 0
for publisher_object in self.publishers_array:
joint_value = Float64()
joint_value.data = joints_array[i]
rospy.loginfo(str(joint_value))
publisher_object.publish(joint_value)
i += 1
def start_loop(self, rate_value = 2.0):
rospy.loginfo("Start Loop")
pos1 = [-0.3,-0.0,-0.3, 0.0,-0.2,-0.0, -0.2,0.0]
pos2 = [0.0,-0.3,-0.0, -0.3,0.0, -0.2, -0.0,-0.2]
position = "pos1"
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
if position == "pos1":
self.move_joints(pos1)
position = "pos2"
else:
self.move_joints(pos2)
position = "pos1"
rate.sleep()
if __name__=="__main__":
rospy.init_node('joint_publisher_node')
joint_publisher = JointPub()
rate_value = 0.5
joint_publisher.start_loop(rate_value)
|
from setuptools import setup
setup(
name = 'front',
version = '0.1',
install_requires = ['Click',],
py_modules = ['endpoints'],
entry_points= '''
[console_scripts]
hello=endpoints:hello
signup=endpoints:SignUp
login=endpoints:Login
refreshtoken=endpoints:RefreshToken
gettasks=endpoints:GetAllTasks
gettask=endpoints:GetOneTask
createtask=endpoints:CreateATask
deletetask=endpoints:DeleteOneTask
deletetasks=endpoints:DeleteAllTasks
_help=endpoints:help
'''
) |
import discord
from discord.ext import commands
from weather import Weather, Unit #weather-api
from weatheralerts import WeatherAlerts #weatheralerts
class StormCog:
'''For Important Weather Alert Parsing'''
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['weather'], pass_context=True)
async def current(self, ctx, *, givenLocation : str = None):
"""Gets the current weather of a specified location (by default it looks up Ames and Des Moines)."""
nws = Weather(unit=Unit.FAHRENHEIT)
if(givenLocation == None):
location = nws.lookup_by_location('des moines, iowa')
condition = location.condition
result = (result + location.description[7:] + '\n' +
'Currently:\t' + condition.text + ', ' + condition.temp + "°F")
else:
location = nws.lookup_by_location(givenLocation)
condition = location.condition
result = (location.description[7:] + '\n' +
'Currently:\t' + condition.text + ', ' + condition.temp + "°F")
await self.bot.say(result)
@commands.command(pass_context=True)
async def severe(self, ctx):
"""Gets any Severe Weather Reports for Ames and Des Moines."""
result = "Ames:\n"
nws = WeatherAlerts(samecodes='019169')
for alert in nws.alerts:
result = result + alert.title + "\n"
result = result + "\nDes Moines:\n"
nws = WeatherAlerts(samecodes='019153')
for alert in nws.alerts:
result = result + alert.title + "\n"
await self.bot.say(result)
def setup(bot):
bot.add_cog(StormCog(bot))
|
#!/usr/bin/env python
from time import sleep
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.mei_message import *
from pymodbus.exceptions import *
from ModbusSocketFramerHMAC import ModbusSocketFramerHMAC as ModbusFramer
#depuracion
import logging
logging.basicConfig()
log = logging.getLogger()
#log.setLevel(logging.DEBUG)
IP_SLAVE = "localhost"
COIL_PUERTA = 0
MAX_TEMP = 40
MIN_TEMP = 35
def cerrarPuerta(client):
log.debug("Cerrando puerta")
values = client.write_coil(COIL_PUERTA, False)
if type(values) is ModbusIOException:
raise Exception('Disconnected')
return 0
def abrirPuerta(client):
log.debug("Abriendo puerta")
values = client.write_coil(COIL_PUERTA, True)
if type(values) is ModbusIOException:
raise Exception('Disconnected')
return 0
def leerTemperatura(client):
log.debug("Leyendo temperatura")
values = client.read_holding_registers(address=0x00, count=2)
if type(values) is ModbusIOException:
raise Exception('Disconnected')
temp, hum = values.registers
return temp
def puertaAbierta(client):
log.debug("Leyendo estado de la puerta")
return client.read_coils(COIL_PUERTA).getBit(0)
def main():
log.debug("Conectando al esclavo")
client = ModbusTcpClient(IP_SLAVE, framer=ModbusFramer)
connected = client.connect()
while not connected:
print "Connection failed"
sleep(5)
connected = client.connect()
log.debug("Conexion establecidad, leyendo informacion del dispositivo")
rq = ReadDeviceInformationRequest()
rr = client.execute(rq)
print "Conectado a dispositivo " + rr.information[0]
while True:
try:
temp = leerTemperatura(client)
estadoPuerta = puertaAbierta(client)
print "Temperatura: " , temp, "Puerta abierta? ", estadoPuerta
if temp > MAX_TEMP:
abrirPuerta(client)
if temp < MIN_TEMP:
cerrarPuerta(client)
except Exception as e:
print "reconectando"
client = ModbusTcpClient(IP_SLAVE)
sleep(2)
if __name__ == "__main__":
main()
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# github: https://github.com/houm01
# blog: https://houm01.com
from django.shortcuts import render
from datetime import datetime
from data import courses_db
from houm01_demo.models import Courses
import json
def readme(request):
mytime = int(datetime.now().strftime("%w"))
network_python = '介绍'
readme = 'readme_body'
# courses_list = ['安全','Python','DC']
courses_list = []
teacher_list = []
courses_result = Courses.objects.all()
for x in courses_result:
courses_list.append(x.courses_name)
teacher_list.append({'courses':x.courses_name,'teacher': x.courses_teacher})
# teacher_list = [{'courses':'安全','teacher':'现任明教教主'},
# {'courses':'安全','teacher':'马老师'},
# {'courses':'数据中心','teacher':'安德'},
# {'courses':'教主VIP','teacher':'现任明教教主'},]
return render(request,'README.html',locals()) # locals() 表示把本地变量都传进去
def sec_course(request):
c = Courses.objects.get(courses_name='安全')
courses_sec = {'方向': c.courses_name,
'摘要': c.courses_summary,
'授课老师': c.courses_teacher,
'授课方式': c.courses_method,
'课程特色': c.courses_characteristic,
'实验环境': c.courses_provide_lab,}
# '具体课程': json.loads(c.courses_detail)}
return render(request,'course.html',{'courseinfo': courses_sec})
def dc_course(request):
c = Courses.objects.get(courses_name='数据中心')
courses_dc = {'方向': c.courses_name,
'摘要': c.courses_summary,
'授课老师': c.courses_teacher,
'授课方式': c.courses_method,
'课程特色': c.courses_characteristic,
'实验环境': c.courses_provide_lab,
}
# '具体课程': json.loads(c.courses_detail)}
return render(request, 'course.html', {'courseinfo': courses_dc})
|
# side = 7
# i = 4
# j = 7
# while side <= 7:
# for i in range(0, 4):
# for j in range(0, 7):
# if((j == 0 or j == 6) and i <= 2) or ((j == 1 or j == 5) and i <= 1) or ((j == 2 or j == 4) and i <= 0):
# print(" " ,end= " ")
# else:
# print("*", end= " ")
# print()
# break
num = int(input("Choose a number for the height of your triangle: "))
side = 7
while side <= 7:
for j in range(1, num + 1):
print(" " * (num - j) + "* " * j)
break |
import json
import requests
from os import path
from logger import Logger
class Puller:
""" Class for pulling data from GitHub via API """
api_url = 'https://api.github.com/'
def __init__(self, config):
self.auth_params = config[u'auth']
self.rate_limit = 5000 # rate limit remaining
self.rate_reset = 0 # timestamp of rate reset
self.logger = Logger(__file__)
def get_rate_limit(self):
return self.rate_limit
def get_rate_reset(self):
return int(self.rate_reset)
def request(self, method, get_params={}):
params = dict(self.auth_params.items() + get_params.items())
response = requests.get(Puller.api_url + method, params=params)
if response.status_code != 200:
pass # TODO: Throw an exception
self.rate_limit = int(response.headers['x-ratelimit-remaining'])
self.rate_reset = response.headers['x-ratelimit-reset']
self.logger.info("Get %s; rate limit: %d" % (method, self.rate_limit))
return json.loads(response.text)
def pull_repositories(self, since_repo_id):
repositories = self.request('repositories', {'since': since_repo_id})
repositories_langs = {
'new': {},
'fork': {}
}
for repo in repositories:
repo_full_name = repo.parent.full_name if repo.fork else repo.full_name
method = '/repos/%s/languages' % repo_full_name
languages = self.request(method)
if languages:
repo_type = ['new', 'fork'][repo.fork]
stats = repositories_langs[repo_type]
for lang_name in languages:
if lang_name not in stats:
stats[lang_name] = 0
stats[lang_name] += languages[lang_name]
return repositories_langs
def get_commits_stat(self):
events = self.request('events')
push_events = [event for event in events if event[u'type'] == 'PushEvent']
if len(push_events) == 0:
return False
# fetch commits
commits = []
commits_stat = []
for event in push_events:
commits += event[u'payload'][u'commits']
commits_urls = [commit[u'url'] for commit in commits]
for commit_url in commits_urls:
files_stat = {}
commit_id = commit_url.split('/')[-1]
commit = requests.get(commit_url, params=self.auth_params)
commit_data = json.loads(commit.text)
# update rate limit and reset time
self.rate_limit -= 1
self.rate_reset = commit.headers['x-ratelimit-reset']
if u'files' in commit_data:
file_names = [file_obj[u'filename'] for file_obj in commit_data[u'files']]
file_types = [path.splitext(filename)[1] for filename in file_names]
if len(file_types) > 0:
for file_type in file_types:
if file_type not in files_stat:
files_stat[file_type[1:]] = file_types.count(file_type)
commits_stat.append({"cid": commit_id, "data": files_stat})
self.logger.info("returning %d commits; rate limit: %d" % (len(commits_stat), self.rate_limit))
return commits_stat |
from django.contrib import admin
from business.models import Business, Category, Neighborhood
class BusinessAdmin(admin.ModelAdmin):
pass
class CategoryAdmin(admin.ModelAdmin):
pass
class NeighborhoodAdmin(admin.ModelAdmin):
pass
admin.site.register(Business, BusinessAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Neighborhood, NeighborhoodAdmin)
|
# -*- coding: utf-8 -*-
# @Author: Dang Kai
# @Email : 1370465454@qq.com
# @Date: 2019-04-28 18:12:53
# @Last Modified time: 2019-04-28 18:12:53
from flask import render_template,request,flash,session,url_for
from Api_Manager import server
from Api_Manager.controller.login_form import LoginForm
from Api_Manager.model.user_model import User
@server.route('/login',methods=['GET','POST'])
def login():
form=LoginForm()
if form.validate_on_submit():
#获取表单的用户名密码
# username=form.username.data
#在数据库中找
user=User.query.filter_by(username=form.username.data).first()
if user is not None and user.password == form.password.data:
flash('登录成功')
return render_template('/main/success.html',title="首页")
return render_template('/main/login.html',title="登录",form=form)
|
import numpy as np
import pandas as pd
import os
from scipy import optimize
from scipy.special import expit
class main:
class NN:
def __init__(self, hidden_layer_count, hidden_layer_size, lambda_val):
"""
Neural Network class.
What it basically is doing:-
1. It has self.L layers.
2. It does forward propagation to get values of al ie layers' node value.
3. After that, we use backpropagation to minimize our cost function.
Cost function takes note of thetas in regularization and the logistical sort of cost by log function,
in such a way that if |hx - y| is big, the cost would be big, and vice versa.
4. Backpropagation involves calculation of deltas, corresponding to each node in the NN.
Going from right to left, to calc the val. of del_l's jth node,
we can sum all weights times the del it is coming from.
It is like alloting of error's map occured at output layer to inner nodes. And when value tries to flow
in forward direction(the same val by which we calc. dels),
then we multiply val of node with del value of that node.
That mul. is like representative of dataloss of all past data sort of thing idk even roughly clearly.
5. After that, as usual, we can use theta:=theta-alpha*grad or any other adv. optimization func.
"""
self.hlc = hidden_layer_count
self.hls = hidden_layer_size
self.L = self.hlc + 2 #layers' count
#self.times = 1e3
self.lambda_val = lambda_val
def cost_func(self, theta):
m = self.m
X, y = self.X, self.y
origin, layer1s, layer2s = 0, (self.ils+1), (self.hls)
layer = X.copy()
for i in range(1, self.L): #forward prop
layer_wb = np.asarray([np.ones(len(layer)), *layer.T]).T #layer with bias unit
theta_curr = theta[origin: (origin + layer1s*layer2s)].reshape(layer1s,layer2s) #current layer theta
origin = layer1s*layer2s
layer = self.hx(layer_wb, theta_curr)
layer1s = self.hls + 1
layer2s = self.hls if i < self.L - 2 else self.ols
cost = (-1/m)*( y*np.log(layer) + (1-y)*np.log(1-layer) ).sum()
reg_term = (self.lambda_val/(2*m))*((theta**2).sum())
return cost + reg_term
def hx(self, X, theta):
#assuming X has m examples, n features, + 1 bias unit,
#assuming theta is a mat of shape (n+1, hls), +1 'cuz theres bias unit theta0
return expit(X@(theta)) #result's shape is (m, hls)
def grad(self, theta):
X = self.X
y = self.y
origin, layer1s, layer2s = 0, (self.ils+1), (self.hls)
layer_ls = [] #this will have all unbiased layers in increasing order.
theta_ls = [] #this will have all thetas in increasing order from theta1 to thetaL-1.
biased_ls = [] #this will have all biased list in increasing order ie l1, l2, ... lL-1. Not lL.
layer = X.copy()
layer_ls.append(layer.copy())
for i in range(1, self.L): #forward prop
layer_wb = np.asarray([np.ones(len(layer)), *layer.T]).T #layer with bias unit
biased_ls.append(layer_wb.copy())
theta_curr = theta[origin: (origin + layer1s*layer2s)].reshape(layer1s,layer2s) #current layer theta
theta_ls.append(theta_curr.copy())
origin = layer1s*layer2s
layer = self.hx(layer_wb, theta_curr)
layer_ls.append(layer.copy())
layer1s = self.hls + 1
layer2s = self.hls if i < self.L - 2 else self.ols
layer_idx = -1
bias_idx = -1
del_last = layer_ls[layer_idx] - y
del_ls = [] #this will have deltas in decreasing order due to back prop. Thus del for last layer is on 0 idx
del_ls.append(del_last.copy())
# del of last layer is appended. Now we've to calculate del for all layers except inp layer.
# For that we'll do del_l = (thetal.T)*del_l+1 .*al .*(1-al)
# where for a part. layer's del's calc., you'll have to get theta for that layer - 1 num.
# Ex: for last - 1 layer, you need last theta.
# Also thats why below we iterate self.L - 2 times since out is processed and inp layer has not to process
for i in range(1, self.L-1): #backward prop
#del_l = (thetal.T)*del_l+1 .*al .*(1-al)
# al.*(1-al) is sigmoid gradient. (Have to read it though since idk'boutthat)
if i>1:
del_ = (del_ls[-1][:,1:]@theta_ls[bias_idx].T)*biased_ls[bias_idx]*(1-biased_ls[bias_idx])
else: del_ = (del_ls[-1]@theta_ls[bias_idx].T)*biased_ls[bias_idx]*(1-biased_ls[bias_idx])
#reasons for above if-else:
# Last layer delta has shape m*output_nodes, and no bias unit in that.
# Also know that nodal value of layer l+1 is affected by layer l's bias unit , but not converse.
# So consider this simple situation. If we've got l layers, l-1's bias' delta is mistake because
# of that bias. But since no past layers < l-1 has ever touched bias node of l-1 layer,
# thus to calc del of past layer in back prop, we leave the bias layer.
# One more view angle is that; we do let forces of mistake come from future layers ie layers > l-a
# to get mistake value of bias unit of l-a and then we improve it in theta updation. But we stop that
# force at that bias unit and don't blame past layer's node for mistake.
#we started removing bias from layer l-1, which was used for del of l-2, thus we did that else at i>1
del_ls.append(del_.copy())
bias_idx -= 1
raveled_accum = []
del_idx = -1
biased_idx = 0
for i in range(1, self.L-1): #theta updation #self.L-1 since last theta is updated seperately after loop
Theta_grad = (1/self.m)*(del_ls[del_idx][:,1:].T@biased_ls[biased_idx]).T
theta_grad_reg = (self.lambda_val/self.m)*(theta_ls[biased_idx][:,1:]).sum()
Theta_grad+= theta_grad_reg
raveled_accum.extend(Theta_grad.ravel())
del_idx-=1
biased_idx += 1
Thetalast_grad = (1/self.m)*(del_ls[0].T@biased_ls[-1]).T #no biased unit in last layer.
thetalast_grad_reg = (self.lambda_val/self.m)*(theta_ls[-1][:,1:]).sum()
Thetalast_grad+= thetalast_grad_reg
raveled_accum.extend(Thetalast_grad.ravel())
print(".",end="",flush=True)
return np.asarray(raveled_accum).ravel()
def fit(self, X, y):
self.m, self.n = X.shape
self.ils = self.n #inp_layer_size
self.ols = len(y[0]) #out_layer_size
self.hls = self.hls #hidden layer size
self.X = X
self.y = y
#random initialization
#init_thetas = np.random.rand(ils + 1, hls)
#for i in range(self.L-3):
# init_thetas.extend(np.random.rand(hls +1, hls))
#init_thetas += np.random.rand(hls + 1, ols)
#optimize func needs cost func, init thetas, grad func, args. theta is given to funcs by default.
self.Theta = optimize.fmin_cg(self.cost_func,
np.random.rand((self.ils+1)*self.hls + (self.L-3)*(self.hls+1)*self.hls +
(self.hls+1)*self.ols),
self.grad,
disp=0)
print('\nTheta Optimized', self.Theta)
def predict(self, X):
theta = self.Theta
origin, layer1s, layer2s = 0, (self.ils+1), (self.hls)
layer = X.copy()
for i in range(1, self.L):
layer_wb = np.asarray([np.ones(len(layer)), *layer.T]).T #layer with bias unit
theta_curr = theta[origin: (origin + layer1s*layer2s)].reshape(layer1s,layer2s) #current layer theta
origin = layer1s*layer2s
layer = self.hx(layer_wb, theta_curr)
layer1s = self.hls + 1
layer2s = self.hls if i < self.L - 2 else self.ols
return layer
"""
def predict1(self, X):
Theta1= self.Theta[:(self.n+1)*(self.hls)].reshape(self.n+1,self.hls)
Theta2= self.Theta[(self.n+1)*(self.hls):].reshape(self.hls + 1, len(self.y[0]))
X = np.asarray([np.ones(len(X)), *X.T]).T
hx2=self.hx(X, Theta1)#shape is (m, hls)'s corresponding, m here test's examples' count
hx2_wb = np.asarray([np.ones(len(hx2)), *hx2.T]).T
hx3=self.hx(hx2_wb,Theta2)
prediction = hx3
return prediction """
def make_vects(self, label_list):
"""
Takes in the classification label_list, and assigns a 0 1 vector to it.
Ex: If its a binary classification with label like anything like A and B, then
vectors alloted or mapped for A is [1, 0] and for B is [0, 1]
"""
self.uniqs = list(set(label_list))
l_uniq = len(self.uniqs)
l_ll = len(label_list)
vects = np.zeros((l_ll, l_uniq))
for i in range(l_ll):
vects[i][self.uniqs.index(label_list[i])] = 1
return np.asarray(vects)
def run(self, input_file):
#checking if input file exists
if not os.path.exists(input_file): raise FileNotFoundError(f"file {input_file} not found"); exit()
#reading file data and making test and train splits
df = pd.read_csv(input_file)
df = df.sample(frac = 1).reset_index(drop=True)
df_train, df_cv,df_test = df.iloc[: 6* len(df)//10].copy(), df.iloc[6* len(df)//10 :8*len(df)//10].copy(), df.iloc[8* len(df)//10 :].copy()
X_train, y_train = df_train[df_train.columns[:-1]].to_numpy(), df_train[df_train.columns[-1]].to_numpy()
X_cv, y_cv = df_cv[df_cv.columns[:-1]].to_numpy(), df_cv[df_cv.columns[-1]].to_numpy()
X_test, y_test = df_test[df_test.columns[:-1]].to_numpy(), df_test[df_test.columns[-1]].to_numpy()
#specifying parameters and making neural network object.
hidden_layer_count = 1
hidden_layer_size = 25
lambda_val_l = [1e-3, 5e-3, 1e-2, 5e-2, 1e-1, 5e-1]#, 1e0, 5e0, 1e1, 5e1, 1e2, 5e2]
#lambda_val_l = np.asarray(lambda_val_l)/1000
combo_lambda_score = []
for lambda_val in lambda_val_l:
print("\nCross Validating at lambda: {}".format(lambda_val));
model = self.NN(hidden_layer_count, hidden_layer_size, lambda_val)
model.fit(X_train, self.make_vects(y_train))
prediction_vects = model.predict(X_cv)
prediction = pd.Series(
np.argmax(prediction_vects, axis= 1).ravel()).apply(lambda x: self.uniqs[x]).to_numpy()
df_res = pd.DataFrame({"p":prediction, "y":y_cv})
count_success = (df_res.p == df_res.y).sum()
count_failure = len(df_res) - count_success
#print(f" success: {count_success}, failure: {count_failure}, odds of win: {count_success/len(df_res)}")
combo_lambda_score.append((count_success/len(df_res), lambda_val, model))
cls_sorted = sorted(combo_lambda_score, reverse=True)
score_max, lambda_val_final, model = cls_sorted[0]
print("Best lambda found: {}; score: {}".format(lambda_val_final, score_max))
print("Model Trained.", "Testing...", sep = "\n")
#predicting the result
prediction_vects = model.predict(X_test)
print("-"*20)
#prediction_vects1 = model.predict1(X_test)
prediction = pd.Series(np.argmax(prediction_vects, axis= 1).ravel()).apply(lambda x: self.uniqs[x]).to_numpy()
df_res = pd.DataFrame({"p":prediction, "y":y_test})
count_success = (df_res.p == df_res.y).sum()
count_failure = len(df_res) - count_success
print(df_res.head())
print(f" success: {count_success}, failure: {count_failure}, odds of win: {count_success/len(df_res)}")
# deciding the size of hidden layers, deciding lambda, and such values has huge effect on training the stuff.
# Do some analysis on how you could have found optimum value of such consts in this example.
# even the sample size if affecting the model on large scale even when negligible changes are taken there like
# 9/10 -> 8/10.
if __name__ == "__main__":
main().run("digits.csv")
"""
For lambda selection, we do as i did up there.
For learning curves:
When you are computing the training set error,
make sure you compute it on the training subset (i.e., X(1:n,:) and y(1:n))
(instead of the entire training set).However, for the cross validation error,
you should compute it over the entire cross validation set.
So; plotting that moving n,vs the error found of jtrain and jcv (j is cost func), we can decide whether to get more data from field will help on the basis of whether jcv and jtrain are going on desired error as n-> inf, else if they both are converging to a big error, then we'll not focus on more data collection.
When training error is low, but thereis a gap between the training and cross validation errors, then it indicates a high
variance problem(overfitting).
When both the train error and cross validation error are high when the number of training examples is increased, indicating high bias(underfitting).
This is super super easy to remember since when there'll be overfitting, jtrain will be seriously low and jcv won't thus gap, and lowness of jtrain. On the other hand, when underfitting, both jtrain and jcv are big.
"""
|
import pandas as pd
def get_monthly_pred(city, BASE_DIR):
data = []
if city == "Agra":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Agra.csv")
elif city == "Ahmedabad":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Ahmedabad.csv")
elif city == "Alwar":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Alwar.csv")
elif city == "Amaravati":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Amaravati.csv")
elif city == "Amritsar":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Amritsar.csv")
elif city == "Asanol":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Asansol.csv")
elif city == "Bathinda":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Bathinda.csv")
elif city == "Bengaluru":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Bengaluru.csv")
elif city == "Chandrapur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Chandrapur.csv")
elif city == "Delhi":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Delhi.csv")
elif city == "Dewas":
data_df = pd.read_csv(
BASE_DIR+"/src/app_aerify/monthly_predicted_data/pred_Dewas.csv")
elif city == "Durgapur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Durgapur.csv")
elif city == "Faridabad":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Faridabad.csv")
elif city == "Ghaziabad":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Ghaziabad.csv")
elif city == "Haldia":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Haldia.csv")
elif city == "Howrah":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Howrah.csv")
elif city == "Hyderabad":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Hyderabad.csv")
elif city == "Jaipur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Jaipur.csv")
elif city == "Jalandhar":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Jalandhar.csv")
elif city == "Jodhpur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Jodhpur.csv")
elif city == "Kanpur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Kanpur.csv")
elif city == "Kolkata":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Kolkata.csv")
elif city == "Kota":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Kota.csv")
elif city == "Lucknow":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Lucknow.csv")
elif city == "Ludhiana":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Ludhiana.csv")
elif city == "Nagpur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Nagpur.csv")
elif city == "Nashik":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Nashik.csv")
elif city == "Mumbai":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_NaviMumbai.csv")
elif city == "Noida":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Noida.csv")
elif city == "Patna":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Patna.csv")
elif city == "Pune":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Pune.csv")
elif city == "Satna":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Satna.csv")
elif city == "Singrauli":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Singrauli.csv")
elif city == "Tirupati":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Tirupati.csv")
elif city == "Udaipur":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Udaipur.csv")
elif city == "Ujjain":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Ujjain.csv")
elif city == "Varanasi":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Varanasi.csv")
elif city == "Vijayawada":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Vijayawada.csv")
elif city == "Visakhapatnam":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Visakhapatnam.csv")
elif city == "Thiruvanthapuram":
data_df = pd.read_csv(
BASE_DIR+"/app_aerify/monthly_predicted_data/pred_Visakhapatnam.csv")
data = list(data_df["lstm"])
date = list(data_df["date"])
# print(data)
# print(date)
return {"aqi": data, "date": date}
|
from unittest import TestCase
import unittest
from src.Year2016.Day14 import Day14
class Day14Test(TestCase):
def setUp(self):
self.day = Day14()
def test_abc(self):
starting_key = 'abc'
result = self.day.one_time_pads(starting_key, 64, self.day.simple_md5)
self.assertEqual(result[63], 22728)
def test_puzzle(self):
starting_key = 'ihaygndm'
result = self.day.one_time_pads(starting_key, 64, self.day.simple_md5)
self.assertEqual(result[63], 15035)
def test_part2_hash_of_hash(self):
starting_key = 'abc0'
value = self.day.hash_2016(starting_key)
self.assertEqual(value, 'a107ff634856bb300138cac6568c0f24')
@unittest.skip("Take up to 2 minutes")
def test_puzzle_part2(self):
starting_key = 'ihaygndm'
result = self.day.one_time_pads(starting_key, 64, self.day.hash_2016)
self.assertEqual(result[63], 19968)
|
import os, json, time, csv
from datetime import datetime, timedelta
from betfair_python_rest.managers import (
BetFairAPIManagerBetting,
BetFairAPIManagerAccounts,
)
from betfair_python_rest.forms import (
MarketFilterAndTimeGranularityForm,
MarketFilterAndLocaleForm,
ListMarketCatalogueForm,
ListMarketBookForm,
ListRunnerBookForm,
ListMarketProfitAndLossForm,
ListCurrentOrdersForm,
LimitOrder,
ListClearedOrdersForm,
PlaceOrderForm,
CancelOrdersForm,
ReplaceOrdersForm,
PlaceInstruction,
CancelInstruction,
ReplaceInstruction,
UpdateOrdersForm,
UpdateInstruction,
)
from betfair_python_rest.enums.betting import (
TimeGranularity,
BetStatus,
OrderType,
PersistenceType,
SideChoices,
MarketProjection,
PriceData,
MatchProjection,
)
'''KEY DETaiLS GO HERE'''
api_key = "API KEY GOES HERE"
login = "USERNAME GOES HERE"
password = "PASSWORD GOES HERE"
outputPath = "outputFile.csv"
class CustomBetFairAPIManagerBetting(BetFairAPIManagerBetting):
cwd = os.getcwd()
crt_path = os.path.join(cwd, "certFiles", "client-2048.crt")
crt_key_path = os.path.join(cwd, "certFiles", "client-2048.key")
test_manager = CustomBetFairAPIManagerBetting(
login, password, api_key, log_mode=True, raise_exceptions=False
)
"""LIST COMPETITIONS IN ENGLISH"""
def list_competitions(apiManager):
market_and_locale = MarketFilterAndLocaleForm(text_query="English")
return apiManager.list_competitions(request_class_object=market_and_locale)
"""LIST EVENTS IN COMPETITION"""
def list_events(apiManager, competitionId):
market_and_locale = MarketFilterAndLocaleForm(competitions_ids=[competitionId])
return apiManager.list_events(request_class_object=market_and_locale)
"""LIST MARKETS IN EVENT"""
def list_market_catalogue(apiManager, eventId):
market_projection = [
MarketProjection.RUNNER_DESCRIPTION.name,
MarketProjection.RUNNER_METADATA.name,
]
list_market_catalogue_form = ListMarketCatalogueForm(
market_projection=market_projection, event_ids=[eventId]
)
return apiManager.list_market_catalogue(
request_class_object=list_market_catalogue_form
)
"""LIST MARKETS BOOK"""
def list_market_book(apiManager, marketId):
match_proj = MatchProjection.ROLLED_UP_BY_PRICE.name
price_datz = PriceData.EX_TRADED.name
list_market_catalogue_form = ListMarketBookForm(
market_ids=[marketId], price_data=[price_datz]
)
return apiManager.list_market_book(request_class_object=list_market_catalogue_form)
titleRow = [
"League",
"EventID",
"Event Name",
"Open Time",
"Market ID",
"Market Name",
"Market Total Matched",
"Market Total Available",
"SelectionID",
"Selection Name",
"Selection Handicap",
"Last Price Traded",
"Total Matched",
]
with open(outputPath, "w+") as csv_file:
writer = csv.writer(csv_file, delimiter=",")
writer.writerow(titleRow)
#leaguesJson = test_list_competitions(test_manager)
premierLeague = 10932509
faCup = 30558
championship = 7129730
league1 = 35
laLiga = 117
eventsJson = list_events(test_manager, premierLeague)
print("--------------------------------DATA--------------------------------")
for event in eventsJson:
event = event["event"]
eventId = event["id"]
eventName = event["name"]
eventOpenDate = event["openDate"]
marketsJson = list_market_catalogue(test_manager, eventId)
print("\n")
print(eventName + " :: " + eventId)
print(
"------------------------------------------------------------------------------"
)
for market in marketsJson:
marketId = market["marketId"]
marketName = market["marketName"]
runnersList = {}
for runner in market["runners"]:
runnersList[runner["selectionId"]] = {}
runnersList[runner["selectionId"]]["name"] = runner["runnerName"]
marketJson = list_market_book(test_manager, marketId)
print(marketJson)
if "faultcode" not in json.dumps(marketJson):
marketJson = marketJson[0]
marketTotalMatched = marketJson["totalMatched"]
marketTotalAvailable = marketJson["totalAvailable"]
print(marketName + " :: " + marketId)
print("Market Total Matched: " + str(marketTotalMatched))
print("Market Total Available: " + str(marketTotalAvailable))
print("\n")
for runner in marketJson["runners"]:
if runner["status"] == "ACTIVE":
try:
selectionId = runner["selectionId"]
runnersList[selectionId]["handicap"] = runner["handicap"]
runnersList[selectionId]["totalMatched"] = runner["totalMatched"]
try:
runnersList[selectionId]["lastPriceTraded"] = runner[
"lastPriceTraded"
]
except:
runnersList[selectionId]["lastPriceTraded"] = 0.0
print(
str(runnersList[selectionId]["name"])
+ " :: "
+ str(runnersList[selectionId]["handicap"])
+ " :: "
+ str(runnersList[selectionId]["lastPriceTraded"])
)
line = []
line.append("Premier League")
line.append(eventId)
line.append(eventName)
line.append(eventOpenDate)
line.append(marketId)
line.append(marketName)
line.append(marketTotalMatched)
line.append(marketTotalAvailable)
line.append(selectionId)
line.append(runnersList[selectionId]["name"])
line.append(runnersList[selectionId]["handicap"])
try:
line.append(runnersList[selectionId]["lastPriceTraded"])
except:
line.append(0.0)
try:
line.append(runnersList[selectionId]["totalMatched"])
except:
line.append(0.0)
with open(outputPath, "a+") as csv_file:
writer = csv.writer(csv_file, delimiter=",")
writer.writerow(line)
except KeyError:
print("Malformed Row")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from skymap import *
def main():
# Based on the 5th Revised edition of the Yale Bright
# Star Catalog, 1991, from
# ftp://adc.gsfc.nasa.gov/pub/adc/archives/catalogs/5/5050.
cat = Catalog("YBS.edb")
obs = ephem.Observer()
obs.lat = ephem.degrees(str(LATITUD))
obs.lon = ephem.degrees(str(LONGITUD))
obs.date = "23/9/2015 23:00"
fig = figure()
# Dibujo osa mayor
ursae = Constellation(obs,cat)
ursae.add_edge(fig,"Merak","Dubhe")
ursae.add_edge(fig,"Merak","Phecda")
ursae.add_edge(fig,"Dubhe","Megrez")
ursae.add_edge(fig,"Megrez","Phecda")
ursae.add_edge(fig,"Alioth","Megrez")
ursae.add_edge(fig,"Alioth","Mizar")
ursae.add_edge(fig,"Mizar","Alcaid")
# Dibujo osa menor
ursae_minor = Constellation(obs,cat)
ursae_minor.add_edge(fig,"Polaris","UMi Delta")
ursae_minor.add_edge(fig,"UMi Epsilon","UMi Delta")
ursae_minor.add_edge(fig,"UMi Epsilon","UMi Delta")
ursae_minor.add_edge(fig,"UMi Epsilon","UMi Zeta")
ursae_minor.add_edge(fig,"UMi Eta","UMi Zeta")
ursae_minor.add_edge(fig,"UMi Eta","UMi Gamma")
ursae_minor.add_edge(fig,"UMi Gamma","UMi Beta")
ursae_minor.add_edge(fig,"UMi Beta","UMi Zeta")
# Dibujo draco
draco = Constellation(obs,cat)
draco.add_edge(fig,"Dra Kappa","Dra Lambda")
draco.add_edge(fig,"Dra Alpha","Dra Kappa")
draco.add_edge(fig,"Dra Alpha","Dra Iota")
draco.add_edge(fig,"Dra Theta","Dra Iota")
draco.add_edge(fig,"Dra Theta","Dra Eta")
draco.add_edge(fig,"Dra Zeta","Dra Eta")
draco.add_edge(fig,"Dra Zeta","Dra Chi")
draco.add_edge(fig,"Dra Epsilon","Dra Chi")
draco.add_edge(fig,"Dra Epsilon","Dra Delta")
draco.add_edge(fig,"Dra Xi","Dra Delta")
draco.add_edge(fig,"Dra Xi","Dra Nu1")
draco.add_edge(fig,"Dra Beta","Dra Nu1")
draco.add_edge(fig,"Dra Beta","Dra Gamma")
draco.add_edge(fig,"Dra Xi","Dra Gamma")
#Dibujo cefeo
cepheus = Constellation(obs,cat)
cepheus.add_edge(fig,"Cep Gamma","Cep Iota")
cepheus.add_edge(fig,"Cep Gamma","Cep Beta")
cepheus.add_edge(fig,"Cep Iota","Cep Zeta")
cepheus.add_edge(fig,"Cep Zeta","Cep Alpha")
cepheus.add_edge(fig,"Cep Eta","Cep Alpha")
cepheus.add_edge(fig,"Cep Beta","Cep Alpha")
cepheus.add_edge(fig,"Cep Epsilon","Cep Zeta")
# Dibujo corona
corona = Constellation(obs,cat)
corona.add_edge(fig,"CrB Alpha","CrB Beta")
corona.add_edge(fig,"CrB Theta","CrB Beta")
corona.add_edge(fig,"CrB Alpha","CrB Delta")
corona.add_edge(fig,"CrB Epsilon","CrB Delta")
corona.add_edge(fig,"CrB Epsilon","CrB Iota")
# Dibujo el gráfico entero
showGraph(fig)
if (__name__=='__main__'):
main()
|
import numpy as np
from pyflann import *
import math
import clusterAssign
import fileStructure
import tfidf
import ml_metrics as metrics
import hamEmbed
he_threshold = 52
invFileTable = np.load('matrices/data72/invFileTable20000k3.npy')
centers = np.load('matrices/data72/codebook.npy')
numcenters = invFileTable.shape[0]
#removing a percentage of the most popular words from list
del_rows_sum = np.sum(invFileTable, axis=1)
del_rows = np.argsort(-del_rows_sum)
num_del_rows1 = int(0.85*numcenters) #hyperparameter to prune off very popular features
num_del_rows2 = int(1*numcenters) #hyperparameters to prune off unpopular features
del_rows1 = del_rows[:num_del_rows1]
del_rows2 = del_rows[num_del_rows2:]
del_rows = np.concatenate((del_rows1,del_rows2))
del_rows = np.sort(del_rows)
row_index = -np.ones(numcenters)
del_row_index = 0
good_row_count = 0
for i in range(numcenters):
if del_row_index < del_rows.shape[0] and del_rows[del_row_index] == i:
del_row_index = del_row_index+1
else:
row_index[i] = good_row_count
good_row_count = good_row_count+1
invFileTable = np.delete(invFileTable,del_rows, axis=0)
row_index = row_index.astype('int')
#itf-df for all words
idftrain, invFileTable= tfidf.tfidf(invFileTable)
print "Loading test data..........."
Xtest = np.loadtxt('tempX.txt', delimiter=',')
Ytest = np.loadtxt('tempY.txt', delimiter=',')
print "Assigning Clusters........"
testClusterID, testDist = clusterAssign.clusterAssign(Xtest,1,centers)
print "Computing corresponding vectors......."
testStructure = fileStructure.imgFeatures(Ytest, testClusterID, testDist, centers)
#removing rows corresponding to poular words
testStructure = np.delete(testStructure,del_rows, axis=0)
print "Computing TF-IDF score.........."
idftest, testStructure = tfidf.tfidf(testStructure)
print "loading medians and embeddings......."
median = np.load('matrices/data72/median72.npy')
npz = np.load('matrices/data72/hamEmb72.npz')
ML = npz['arr_0']
CIL = npz['arr_1']
print "computing hamming embeddings of test data...."
HE = hamEmbed.binEmbeddings(Xtest, testClusterID, median, 1)
numTestImgs = Ytest[-1]
numTrainImgs = invFileTable.shape[1]
numTestFeatures = Xtest.shape[0]
scores = np.zeros([numTestImgs,numTrainImgs]) #matrix to store scores
print "computing scores for each test image......."
for i in range(numTestFeatures):
tfc = row_index[testClusterID[i]]
if tfc==-1:
continue
indm = ML[testClusterID[i]]
#iterating over features assigned to corresponding clusterAssign
for j in range(np.shape(indm)[0]):
g = np.logical_xor(HE[i,:],indm[j,:])
if np.sum(g) < he_threshold:
indv = CIL[testClusterID[i]][j]-1
scores[Ytest[i]-1, indv] = scores[Ytest[i]-1, indv] + idftrain[tfc] * idftest[tfc]#testStructure[tfc,Ytest[i]-1] *trainStructure[tfc,indv]
scores = np.divide(scores,np.sum(invFileTable, axis=0))
ranks = np.argsort(-scores, axis=1)
for i in range(numTestImgs):
print i/testImgsperFolder+1,">>>>>", ranks[i,:10]/trainImgsperFolder+1
with open('folders.txt') as f:
folders = f.readlines()
folders = [x.strip() for x in folders]
with open('images.txt') as f:
images = f.readlines()
images = [x.strip() for x in images]
filename = 'test$.txt'
for i in 1+range(numTestImgs):
fileID = open(filename.replace("$",str(i)),'w')
for j in range(numTrainImgs):
fileID.write(images[j],folders[j/72])
fileID.close()
|
import abc
import os
import re
import threading
import time
import socket
import subprocess
import typing
import devcluster as dc
class AtomicOperation(metaclass=abc.ABCMeta):
"""
Only have one atomic operation in flight at a time. You must wait for it to finish but you may
request it ends early if you know you will ignore its output.
An example would be a connector which is trying to connect to the master binary, except if the
master binary has already exited, we will want to exit the connector.
"""
@abc.abstractmethod
def __str__(self) -> str:
"""Return a one-word summary of what the operation is"""
pass
@abc.abstractmethod
def cancel(self) -> None:
pass
@abc.abstractmethod
def join(self) -> None:
pass
class ConnCheck(threading.Thread, AtomicOperation):
"""ConnCheck is an AtomicOperation."""
def __init__(self, host: str, port: int, report_fd: int):
self.host = host
self.port = port
self.report_fd = report_fd
self.quit = False
threading.Thread.__init__(self)
# AtomicOperations should not need a start() call.
self.start()
def __str__(self) -> str:
return "connecting"
def run(self) -> None:
success = False
try:
# 30 seconds to succeed
deadline = time.time() + 30
while time.time() < deadline:
if self.quit:
break
s = socket.socket()
try:
# try every 20ms
waittime = time.time() + 0.02
s.settimeout(0.02)
s.connect((self.host, self.port))
except (socket.timeout, ConnectionError):
now = time.time()
if now < waittime:
time.sleep(waittime - now)
continue
s.close()
success = True
break
finally:
# "S"uccess or "F"ail
os.write(self.report_fd, b"S" if success else b"F")
def cancel(self) -> None:
self.quit = True
class LogCheck(AtomicOperation):
"""
Wait for a log stream to print out a phrase before allowing the state to progress.
"""
def __init__(
self,
logger: dc.Logger,
stream: str,
report_fd: int,
regex: typing.Union[str, bytes],
):
self.logger = logger
self.stream = stream
self.report_fd = report_fd
self.pattern = re.compile(dc.asbytes(regex))
self.canceled = False
self.logger.add_callback(self.log_cb)
def __str__(self) -> str:
return "checking"
def cancel(self) -> None:
if not self.canceled:
self.canceled = True
os.write(self.report_fd, b"F")
self.logger.remove_callback(self.log_cb)
def join(self) -> None:
pass
def log_cb(self, msg: bytes, stream: str) -> None:
if stream != self.stream:
return
if len(self.pattern.findall(msg)) == 0:
return
os.write(self.report_fd, b"S")
self.logger.remove_callback(self.log_cb)
class AtomicSubprocess(AtomicOperation):
def __init__(
self,
poll: dc.Poll,
logger: dc.Logger,
stream: str,
report_fd: int,
cmd: typing.List[str],
quiet: bool = False,
callbacks: typing.Iterable[typing.Callable[[bool], None]] = (),
) -> None:
self.poll = poll
self.logger = logger
self.stream = stream
self.report_fd = report_fd
self.callbacks = callbacks
self.start_time = time.time()
self.dying = False
self.proc = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL if quiet else subprocess.PIPE,
stderr=subprocess.PIPE,
) # type: typing.Optional[subprocess.Popen]
self.out = None
if not quiet:
assert self.proc.stdout
self.out = self.proc.stdout.fileno()
assert self.proc.stderr
self.err = self.proc.stderr.fileno() # type: typing.Optional[int]
if self.out is not None:
dc.nonblock(self.out)
dc.nonblock(self.err)
if self.out is not None:
self.poll.register(self.out, dc.Poll.IN_FLAGS, self._handle_out)
self.poll.register(self.err, dc.Poll.IN_FLAGS, self._handle_err)
def __str__(self) -> str:
return "building"
def _maybe_wait(self) -> None:
"""Only respond after both stdout and stderr have closed."""
assert self.proc
if self.out is None and self.err is None:
ret = self.proc.wait()
self.proc = None
success = False
if self.dying:
self.logger.log(f" ----- {self} canceled -----\n", self.stream)
elif ret != 0:
self.logger.log(f" ----- {self} exited with {ret} -----\n", self.stream)
else:
build_time = time.time() - self.start_time
self.logger.log(
f" ----- {self} complete! (%.2fs) -----\n" % (build_time),
self.stream,
)
success = True
for cb in self.callbacks:
cb(success)
os.write(self.report_fd, b"S" if success else b"F")
def _handle_out(self, ev: int, _: int) -> None:
assert self.out
if ev & dc.Poll.IN_FLAGS:
self.logger.log(os.read(self.out, 4096), self.stream)
if ev & dc.Poll.ERR_FLAGS:
self.poll.unregister(self._handle_out)
os.close(self.out)
self.out = None
self._maybe_wait()
def _handle_err(self, ev: int, _: int) -> None:
assert self.err
if ev & dc.Poll.IN_FLAGS:
self.logger.log(os.read(self.err, 4096), self.stream)
if ev & dc.Poll.ERR_FLAGS:
self.poll.unregister(self._handle_err)
os.close(self.err)
self.err = None
self._maybe_wait()
def cancel(self) -> None:
assert self.proc
self.dying = True
self.proc.kill()
def join(self) -> None:
pass
class DockerRunAtomic(AtomicSubprocess):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
kwargs["quiet"] = True
super().__init__(*args, **kwargs)
def __str__(self) -> str:
return "starting"
def cancel(self) -> None:
# Don't support canceling at all; it creates a race condition where we don't know when
# we can docker kill the container.
pass
|
class Solution(object):
def __init__(self):
self.ans = ""
def DFS(self, node, visited, k):
for num in map(str, range(k)):
nodeCur = node + num
if nodeCur not in visited:
visited.add(nodeCur)
self.DFS(nodeCur[1:], visited, k)
self.ans += num
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
node = "0" * (n - 1)
visited = set()
self.DFS(node, visited, k)
return self.ans + "0" * (n - 1)
mySolution = Solution()
print(mySolution.crackSafe(3, 2)) |
import ast
import json
import logging
import math
import tempfile
import random
import string
from abc import ABC, abstractmethod
from math import inf
import graphviz
import requests
from dulwich import porcelain
ccsGitCache = {}
""" Caches all cloned git repositories """
importedClasses = {
"VMAsAService": {
"className": "VMAsAService",
"extendsId": "ServerAsAService"
},
"ServerAsAService": {
"className": "ServerAsAService",
"extendsId": "IaaS"
},
"SaaS": {
"className": "SaaS",
"extendsId": "CCS"
},
"IaaS": {
"className": "IaaS",
"extendsId": "CCS"
},
"StorageAsAService": {
"className": "StorageAsAService",
"extendsId": "IaaS"
},
"NumericAttribute": {
"className": "NumericAttribute",
"extendsId": "Attribute"
},
"ChoiceAttribute": {
"className": "ChoiceAttribute",
"extendsId": "Attribute"
},
"BoolAttribute": {
"className": "BoolAttribute",
"extendsId": "Attribute"
},
"OptionAttribute": {
"className": "OptionAttribute",
"extendsId": "Attribute"
},
"CCS": {
"className": "CCS",
"extendsId": "Attribute"
},
"Attribute": {
"className": "Attribute",
"extendsId": "object"
},
"Region": {
"className": "Region",
"extendsId": "ChoiceAttribute"
},
"NorthAmerica": {
"className": "NorthAmerica",
"extendsId": "OptionAttribute"
},
"Australia": {
"className": "Australia",
"extendsId": "OptionAttribute"
},
"Africa": {
"className": "Africa",
"extendsId": "OptionAttribute"
},
"EastAsia": {
"className": "EastAsia",
"extendsId": "OptionAttribute"
},
"SouthAmerica": {
"className": "SouthAmerica",
"extendsId": "OptionAttribute"
},
"Europe": {
"className": "Europe",
"extendsId": "OptionAttribute"
},
"Antarctica": {
"className": "Antarctica",
"extendsId": "OptionAttribute"
},
"Storage": {
"className": "Storage",
"extendsId": "NumericAttribute"
},
"StorageWriteSpeed": {
"className": "StorageWriteSpeed",
"extendsId": "NumericAttribute"
},
"StorageReadSpeed": {
"className": "StorageReadSpeed",
"extendsId": "NumericAttribute"
},
"OperatingSystem": {
"className": "OperatingSystem",
"extendsId": "ChoiceAttribute"
},
"CpuCores": {
"className": "CpuCores",
"extendsId": "NumericAttribute"
},
"CpuClockSpeed": {
"className": "CpuClockSpeed",
"extendsId": "NumericAttribute"
},
"Ram": {
"className": "Ram",
"extendsId": "NumericAttribute"
},
"RamClockSpeed": {
"className": "RamClockSpeed",
"extendsId": "NumericAttribute"
},
"RamWriteSpeed": {
"className": "RamWriteSpeed",
"extendsId": "NumericAttribute"
},
"RamReadSpeed": {
"className": "RamReadSpeed",
"extendsId": "NumericAttribute"
},
"NetworkCapacity": {
"className": "NetworkCapacity",
"extendsId": "NumericAttribute"
},
"NetworkUploadSpeed": {
"className": "NetworkUploadSpeed",
"extendsId": "NumericAttribute"
},
"NetworkDownloadSpeed": {
"className": "NetworkDownloadSpeed",
"extendsId": "NumericAttribute"
},
"PricingModel": {
"className": "PricingModel",
"extendsId": "ChoiceAttribute"
},
"PayAndGo": {
"className": "PayAndGo",
"extendsId": "OptionAttribute"
},
"Subscription": {
"className": "Subscription",
"extendsId": "OptionAttribute"
},
"Price": {
"className": "Price",
"extendsId": "Attribute"
},
"DatabaseAsAService": {
"className": "DatabaseAsAService",
"extendsId": "SaaS"
},
"SQLDatabaseAsAService": {
"className": "SQLDatabaseAsAService",
"extendsId": "DatabaseAsAService"
},
"NoSQLDatabaseAsAService": {
"className": "NoSQLDatabaseAsAService",
"extendsId": "DatabaseAsAService"
}
}
""" AttributeIds that have already been imported mapped to their class names and extendsIds. Whenever a new Attribute
is added (not imported) to the framework, it needs to be included in here. """
def cleanGitCache():
""" Delete all temporary directories that were created to clone git repositories into """
gitRepos = []
for gitRepo in ccsGitCache:
ccsGitCache[gitRepo].cleanup()
gitRepos += [gitRepo]
for gitRepo in gitRepos:
del ccsGitCache[gitRepo]
def randName():
""" Returns a cryptographically secure 16 digit random string starting with the letter C"""
return "C" + "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(15))
class Attribute:
""" Instances of the Attribute class have a unique id that encodes from where they can be loaded. Classes that
extend the Attribute class can be matched with other Attributes if they are related, which helps with user
requirement matching and price calculation. Also, classes that have an Attribute type ancestor can be extended
arbitrarily by other Attribute type instances (however, note that existing duplicate fields will be overwritten.
"""
def __init__(self):
super().__init__()
self.name = None
self.gitRepo = None
self.commit = None
self.branch = None
self.filePath = None
self.id = "Attribute"
self.extendsId = "object"
self.mutable = False
self.searchKeyWords = None
self.description = None
self.matched = False
self.value = None
def setId(self, gitRepo, filePath, branch=None, commit=None):
""" Set the id of an Attribute type instance. This id has to be unique and specify from where the Attribute can
be loaded.
Args:
gitRepo (str): The URI to a git repository where the file is stored. Defaults to "local", indicating that it is stored on the local machine.
filePath (str): The path to the file inside the git repository.
branch (str): The git branch name, defaults to None (indicates that master should be fetched).
commit (str): The commit id of the git branch, defaults to None (indicates that the latest commit should be fetched)
"""
self.gitRepo = gitRepo
self.commit = commit
self.filePath = filePath
self.id = gitRepo + "@" + filePath
if self.commit is not None:
self.id += "@" + self.commit
self.commit = "refs/heads/master/" + self.commit
else:
self.id += "@latest"
def inject(self, gitRepo, filePath, branch=None, commit=None, onlyFetchDependency=False):
""" Updates self with the fields of the Attribute that are attained from the given filePath.
Args:
gitRepo (str): The URI to a git repository where the file is stored. Defaults to "local", indicating that it is stored on the local machine.
filePath (str): The path to the file inside the git repository.
branch (str): The git branch name, defaults to None (indicates that master should be fetched).
commit (str): The commit id of the git branch, defaults to None (indicates that the latest commit should be fetched)
onlyFetchDependency (bool): Does not inject any fields to self if set to True, defaults to False.
"""
self.setId(gitRepo, filePath, branch=branch, commit=commit)
# git clone metamodel repository
if gitRepo not in ccsGitCache:
tempDir = tempfile.TemporaryDirectory()
porcelain.clone(self.gitRepo, tempDir.name)
ccsGitCache[gitRepo] = tempDir
if self.commit is not None:
print("checkout commit " + self.commit)
porcelain.update_head(ccsGitCache[gitRepo].name, self.commit)
# TODO else checkout latest commit and branch...
# inject metamodel into this Attribute
moduleId = gitRepo + "@" + filePath + "@" + "latest" # TODO make this work with given commit id and branch too
modulePath = ccsGitCache[gitRepo].name + "/" + filePath
moduleName = filePath.split("/")[-1].split(".py")[0]
# moduleDir = ''.join(modulePath.split(moduleName + ".py"))
# print("modulePath: ", modulePath)
# print("moduleName: ", moduleName)
# print("moduleDir: ", moduleDir)
# parse module for extendsId field
source = open(modulePath, "r").read()
root = ast.parse(source)
closestClass = None
try:
classes = [node for node in ast.walk(root) if isinstance(node, ast.ClassDef) and node.name == moduleName]
closestClass = classes[0]
except Exception as e:
print(e)
logging.error("can not inject %s because it does not define a class with the same name as its filename" % modulePath)
closestFunction = None
try:
functions = [node for node in ast.walk(closestClass) if isinstance(node, ast.FunctionDef) and node.name == "__init__"]
closestFunction = functions[0]
except Exception as e:
print(e)
logging.error("can not inject %s because it does not define a __init__ function in its main class" % modulePath)
closestAssign = None
try:
assigns = [node for node in ast.walk(closestFunction) if isinstance(node, ast.Assign)]
assigns = [node for node in assigns if isinstance(node.targets[0], ast.Attribute)]
assigns = [node for node in assigns if node.targets[0].attr == "extendsId"]
closestAssign = assigns[0]
except Exception as e:
print(e)
logging.error("can not inject %s because it does set the field extendsId" % modulePath)
extendsId = closestAssign.value.value
# print(modulePath, "extendsId: ", extendsId)
# import extendsId module if it was not imported yet
if extendsId not in importedClasses:
#print("fetch dependency first:", extendsId)
dummy = CCS()
try:
dummyGitRepo = extendsId.split("@")[0]
dummyFilePath = extendsId.split("@")[1]
except Exception as e:
print(e)
logging.error("%s sets an extendsId field that is not formatted correctly. non-common attributes/CCS have \
to be of the form link/to/repo@file/path.py@(commitID|latest), however this was the given extendsId: '%s'" % (
modulePath, extendsId))
return
dummy.inject(dummyGitRepo, dummyFilePath, onlyFetchDependency=True)
# extract class name from extendsId
extendsClassName = importedClasses[extendsId]["className"]
# refactor source main class name to a non-colliding class name
# , as well as the extension class to the class name derived from its extendsId field
if moduleId not in importedClasses:
nonCollidingClassName = randName() # this prevents accidental class overwriting when class names of
# injected custom attributes happen to be identical
try:
extensionDigitStart = source.find("class " + moduleName + "(") + len("class ") # NOTE this means that moduleName and the model main class name HAVE to be identical
extensionDigitEnd = source.find(")", extensionDigitStart)
source = source[:extensionDigitStart] + nonCollidingClassName + "(" + extendsClassName + source[extensionDigitEnd:]
except Exception as e:
print(e)
logging.error("the injected module has to define a main class with the same name as its file name. the \
injected ccs/attribute models moduleName was %s, however no main class name that was identical to it was found in %s" % (moduleName, modulePath))
return
# debugging
# print(source)
# import module directly from refactored source
exec(source, globals())
importedClasses[moduleId] = {}
importedClasses[moduleId]["className"] = nonCollidingClassName
importedClasses[moduleId]["extendsId"] = extendsId
print("imported", moduleName, "with id", moduleId, "as", nonCollidingClassName)
if not onlyFetchDependency:
# inject custom class fields (overwrites/overrides existing fields and functions)
exec("self.__dict__.update(" + importedClasses[moduleId]["className"] + "().__dict__)") # read https://stackoverflow.com/questions/1216356/is-it-safe-to-replace-a-self-object-by-another-object-of-the-same-type-in-a-meth/37658673#37658673
# print("injected", moduleName, "into", self.extendsId)
# some asserts for early failure
if commit is not None:
assert self.id == gitRepo + "@" + filePath + "@" + commit, "failed to inject CCS model properly. got %s but wanted %s" % (self.id, gitRepo + "@" + filePath + "@" + commit)
else:
assert self.id == gitRepo + "@" + filePath + "@latest", "failed to inject CCS model properly. got %s but wanted %s" % (self.id, gitRepo + "@" + filePath + "@latest")
class BoolAttribute(Attribute):
def __init__(self):
super().__init__()
self.id = "BoolAttribute"
self.extendsId = "Attribute"
self.value = None
class ChoiceAttribute(Attribute):
""" ChoiceAttribute
Attributes:
options (list): A dictionary of OptionAttributes
value (Attribute): The dictionary key of the chosen OptionAttribute from self.options
"""
def __init__(self):
super().__init__()
self.id = "ChoiceAttribute"
self.extendsId = "Attribute"
self.options = None # dictionary of Attribute instances
self.value = None # dictionary key of chosen Attribute instance
class OptionAttribute(Attribute):
def __init__(self):
super().__init__()
self.id = "OptionAttribute"
self.extendsId = "Attribute"
class NumericAttribute(Attribute):
def __init__(self):
super().__init__()
self.id = "NumericAttribute"
self.extendsId = "Attribute"
self.value = None
self.minVal = -inf
self.maxVal = inf
self.stepSize = None
self.makeInt = False
self.moreIsBetter = True
class PricingModel(ChoiceAttribute):
def __init__(self):
super().__init__()
self.id = "PricingModel"
self.extendsId = "ChoiceAttribute"
self.options = {
"payAndGo": PayAndGo(),
"subscription": Subscription()
}
self.value = None
class PricingModelInterface(OptionAttribute, ABC):
def __init__(self):
super().__init__()
@abstractmethod
def getPrice(self, req, priceFuncs, currencyConversion=1):
for p in priceFuncs:
print(p.description, ":", p.run(req) * currencyConversion)
pass
class PayAndGo(PricingModelInterface):
def __init__(self):
super().__init__()
self.id = "PayAndGo"
self.extendsId = "OptionAttribute"
self.description = "you (pay) an upFrontCost once (and go) on to use the service"
self.upFrontCost = None
def getPrice(self, req, priceFuncs, currencyConversion=1):
for p in priceFuncs:
print(p.description, ":", p.run(req) * currencyConversion)
self.upFrontCost = sum([pf.run(req) for pf in priceFuncs]) * currencyConversion
return self.upFrontCost
class Subscription(PricingModelInterface):
def __init__(self):
super().__init__()
self.id = "Subscription"
self.extendsId = "OptionAttribute"
self.description = "you pay a billingPeriodCost per billingPeriod. the unit of billingPeriod is per hour"
self.upfrontCost = 0
self.billingPeriodCost = None
self.billingPeriod = None # per hour
def getPrice(self, req, priceFuncs, currencyConversion=1):
self.billingPeriodCost = sum([pf.run(req) for pf in priceFuncs])
return ((self.billingPeriod * self.billingPeriodCost) + self.upfrontCost) * currencyConversion
class Price(Attribute):
""" everything to evaluate the final price based on CCS configuration and the pricing model enforced by the CCS """
def __init__(self):
super().__init__()
self.id = "Price"
self.extendsId = "Attribute"
self.currency = None # expecting ISO 4217 currency code as string
self.priceFuncs = []
self.model = PricingModel()
# an interface as per https://stackoverflow.com/questions/2124190/how-do-i-implement-interfaces-in-python
class PriceFunc(ABC):
def __init__(self):
super().__init__()
self.description = None
@abstractmethod
def run(self, req):
""" returns the value of this price function """
pass
def extractConfigurationTree(ccs):
""" extract the configuration of all CCS and their sub CCS """
def helper(attr):
res = {attr.id: {}}
fields = vars(attr) # https://stackoverflow.com/a/55320647
hasAttributes = False
for key in fields:
# scan all fields and filter out the attributes
try:
if isAncestorOf("Attribute", fields[key].id):
# if fields[key] is an attribute ...
hasAttributes = True
res[attr.id].update(helper(fields[key]))
except Exception as e:
#print(e)
pass
if not hasAttributes:
res[attr.id]["value"] = fields["value"]
res[attr.id]["description"] = fields["description"]
return res
return helper(ccs)
def estimatePrice(req, ccs, currency="EUR"):
""" returns a dict containing the total price and a dict of configurations that it resulted from """
# get prices of all CCS and their subCCS
allSubCCSPrices = extractPrices(ccs)
# convert prices to the same currency
# TODO inject API key using command line flag or environment variable or config file
apiKey = "080197719e5c4ef0b73f339e208f1f67"
# TODO cache this table for at least one day
ratesRelativeToUSD = json.loads(
requests.get("https://openexchangerates.org/api/latest.json?app_id=" + apiKey + "&base=USD").content)[
"rates"]
currencyConversion = 1
try:
currencyConversion = ratesRelativeToUSD[currency] # how many of requirements currency is 1 USD
except Exception as e:
logging.error("your requirement uses a currency with a currency code that does not comply with ISO_4217:",
currency)
print(e)
try:
currencyConversion /= ratesRelativeToUSD[ccs.price.currency] # how many of CCSs currency is 1 USD
except Exception as e:
logging.error(ccs.price.id, "uses a currency with a currency code that does not comply with ISO_4217:",
ccs.price.currency)
print(e)
if ccs.price.model.value is None:
logging.error(ccs.price.id, "does not provide a pricing model choice")
totalPrice = {"price": 0, "billingPeriod": "None", "config": extractConfigurationTree(ccs)}
# get cheapest price of all of the subCCS
for price in allSubCCSPrices:
# get cheapest pricing method for this subCCS price and add it to the totalPrice
cheapestPrice = inf
for choice in price.model.options:
curPrice = price.model.options[choice].getPrice(req, price.priceFuncs, currencyConversion=currencyConversion)
if curPrice < cheapestPrice:
cheapestPrice = curPrice
if choice == "subscription":
totalPrice["billingPeriod"] = price.model.options[choice].billingPeriod
totalPrice["price"] += cheapestPrice
return totalPrice
class CCS(Attribute):
def __init__(self):
super().__init__()
self.id = "CCS"
self.extendsId = "Attribute"
self.pageUrl = None
self.price = Price()
class IaaS(CCS):
def __init__(self):
super().__init__()
self.id = "IaaS"
self.extendsId = "CCS"
self.region = Region()
class StorageAsAService(IaaS):
def __init__(self):
super().__init__()
self.id = "StorageAsAService"
self.extendsId = "IaaS"
self.storage = Storage()
self.storageWriteSpeed = StorageWriteSpeed()
self.storageReadSpeed = StorageReadSpeed()
class ServerAsAService(IaaS):
def __init__(self):
super().__init__()
self.id = "ServerAsAService"
self.extendsId = "IaaS"
self.operatingSystem = OperatingSystem()
self.cpuCores = CpuCores()
self.cpuClockSpeed = CpuClockSpeed()
self.ram = Ram()
self.ramClockSpeed = RamClockSpeed()
self.ramWriteSpeed = RamWriteSpeed()
self.ramReadSpeed = RamReadSpeed()
self.storage = Storage()
self.storageWriteSpeed = StorageWriteSpeed()
self.storageReadSpeed = StorageReadSpeed()
self.networkCapacity = NetworkCapacity()
self.networkUploadSpeed = NetworkUploadSpeed()
self.networkDownloadSpeed = NetworkDownloadSpeed()
class VMAsAService(ServerAsAService):
def __init__(self):
super().__init__()
self.id = "VMAsAService"
self.extendsId = "ServerAsAService"
class SaaS(CCS):
def __init__(self):
super().__init__()
self.id = "SaaS"
self.extendsId = "CCS"
class DatabaseAsAService(SaaS):
def __init__(self):
super().__init__()
self.id = "DatabaseAsAService"
self.extendsId = "SaaS"
class SQLDatabaseAsAService(DatabaseAsAService):
def __init__(self):
super().__init__()
self.id = "SQLDatabaseAsAService"
self.extendsId = "DatabaseAsAService"
class NoSQLDatabaseAsAService(DatabaseAsAService):
def __init__(self):
super().__init__()
self.id = "NoSQLDatabaseAsAService"
self.extendsId = "DatabaseAsAService"
def extractAttributes(attribute):
""" Recursively get all fields and subfields of an `Attribute` instance that are also of type `Attribute`. Also
scans the options field of ChoiceAttributes
Args:
attribute (Attribute): The CCS of which all fields of type Attribute are to be extracted
Returns:
list(Attribute): A list of Attribute instances
Note:
CCS also inherits from Attribute
"""
res = []
fields = vars(attribute) # https://stackoverflow.com/a/55320647
for key in fields:
try:
if isAncestorOf("Attribute", fields[key].id):
res += [fields[key]] + extractAttributes(fields[key])
if isAncestorOf("ChoiceAttribute", fields[key].id):
# also extract all choices of the choiceAttribute and their Attribute type subfields
# TODO mark extracted optionattribute subfield as such. only the selected optionattributes of choiceattributes must be considered in matchCCS, otherwise unselected optionattributes will spoof the price estimate
res += [fields[key].options[choice] for choice in fields[key].options]
for attrs in [extractAttributes(fields[key].options[choice]) for choice in fields[key].options]:
res += attrs
except Exception as e:
pass
return res
def extractPrices(attribute):
""" Recursively get all fields and subfields of an `Attribute` instance that are of type `Price`. Also
scans the options field of ChoiceAttributes
Args:
attribute (Attribute): The CCS of which all fields of type Attribute are to be extracted
Returns:
list(Attribute): A list of Attribute instances
Note:
CCS also inherits from Attribute
"""
priceFuncs = []
attrs = extractAttributes(attribute)
for attr in attrs:
if isAncestorOf("Price", attr.id):
priceFuncs += [attr]
return priceFuncs
def matchAttribute(ccs, *attributeIds):
""" Recursively get the first field of type `Attribute` of - either the given `CCS` or one if its subfields of type
`Attribute` - that are related to the given `attributeIds` in the given order. Options of Attributes of type
ChoiceAttribute will also be scanned for matches
Args:
ccs (CCS): The CCS whose fields will be searched
attributeIds (tuple(str)): The id that a field of type Attribute should match with
Returns:
Attribute: The field that matched with attributeId
Note:
CCS also inherits from Attribute
Example:
>>> # returns the `storage` field of the VMAsAService instance
>>> matchAttribute(VMAsAService, "Storage")
>>> # returns the `storage` field of the StorageAsAService instance inside the VMAsAService instance
>>> matchAttribute(VMAsAService(), "StorageAsAService", "Storage")
>>> # returns the VMAsAService instance
>>> matchAttribute(VMAsAService(), "VMAsAService")
"""
def matchAttributeHelper(_ccs, *_attributeIds):
if len(_attributeIds) < 1:
# done
return None
# its the extendsId that has to match because requirements (of type CCS) never have ids that relate to anything
if isAncestorOf(_ccs.extendsId, _attributeIds[0]):
if len(_attributeIds) == 1:
# done
return _ccs
# continue search for next attributeId
return matchAttributeHelper(_ccs, *_attributeIds[1:])
attributes = extractAttributes(_ccs)
for attr in attributes:
if isAncestorOf(attr.id, _attributeIds[0]):
if len(_attributeIds) == 1:
# done
return attr
# continue search for next attributeId
return matchAttributeHelper(attr, *_attributeIds[1:])
# no match found
return None
match = matchAttributeHelper(ccs, *attributeIds)
if match is None:
print("WARNING: your requirements did not match", attributeIds,
"in that order. for better price estimation, you should set those attributes in that order in your "
"requirements!")
return match
def getExtendsId(attributeId):
""" get the `extendsId` field of a class instance that matches with the given `attributeId`
Args:
attributeId (str): id of the Attribute whose extendsId field is sought
Returns:
str: extendsId field of the Attribute that matches with the given attributeId
Note:
- This function can only be called on Attributes that have already been imported.
- CCS also inherits from Attribute
"""
if attributeId in importedClasses:
return importedClasses[attributeId]["extendsId"]
return None
def isAncestorOf(rid, cid):
""" checks if an Attribute is related to another Attribute
Args:
rid (str): id of the first Attribute
cid (str): id for the second Attribute
Returns:
bool: True if the Attributes are related, else False
Note:
- two Attributes are related if either their ids match or an extendsId of the second attribute in any depth matches with the first attribute
- CCS also inherits from Attribute
"""
if rid is None or cid is None:
return False
if rid == cid:
return True
extendsId = getExtendsId(cid)
while extendsId is not None:
if rid == extendsId:
return True
extendsId = getExtendsId(extendsId)
return False
def matchCCS(req, ccs):
""" check if a requirement matches with a CCS and return a satisfying configuration.
Args:
req (CCS): The requirements
ccs (CCS): The CCS to check for a match
Returns:
bool: True if they match, else False
dict: configuration, None if unsatisfied
Note:
- A requirement matches with a CCS if and only if every single Attribute field of the requirement is satisfied through a related Attribute field in the CCS
- Attribute fields must be unique. Each instance of CCS may not have multiple Attribute fields with the same id
- If a requirement or a CCS has an Attribute field whose subfields have an Attribute with a duplicate id, then only the first matching Attribute with that id will be considered.
"""
print("checking", ccs.name, "for potential match")
# requirement is a custom attribute ... because "@" in req.id
if "@" in req.id:
# if the parent of req is not related to ccs, then it does not matter whether their attributes match or not
if not isAncestorOf(req.extendsId, ccs.id):
print("requirement is not in any way related to", ccs.id)
return False, None
else:
# requirement is a framework attribute
if not isAncestorOf(req.id, ccs.id):
print("requirement is not in any way related to", ccs.id)
return False, None
reqAttributes = extractAttributes(req)
ccsAttributes = extractAttributes(ccs)
configuration = {}
# pair-wise compare attributes and check if they match
for ra in reqAttributes:
configuration[ra.id] = ra.value
for ca in ccsAttributes:
if isAncestorOf(ra.id, ca.id):
if isAncestorOf("NumericAttribute", ra.id):
if ra.value is not None: # both requirement and CCS set this attribute
if not ca.mutable and ca.value is None:
print(ra.id, "is set as a requirement, but", ccs.id, "can not set it")
return False, None
if ca.moreIsBetter:
if not ca.mutable:
if ra.value > ca.value: # value is too small and not mutable
print(ra.id, "is too small and cannot be made large enough:", "got", ca.value, "wanted", ra.value)
return False, None
if ca.maxVal is not None:
if ca.maxVal < ra.value: # value cannot be made large enough
print(ra.id, "is too small and cannot be made large enough:", "got", ca.maxVal, "wanted", ra.value)
return False, None
else:
if ra.value < ca.value: # value is too large and not mutable
print(ra.id, "is too large and cannot be made small enough:", "got", ca.value, "wanted", ra.value)
return False
if ca.minVal is not None:
if ca.minVal > ra.value: # value cannot be made small enough
print(ra.id, "is too large and cannot be made small enough:", "got", ca.minVal, "wanted", ra.value)
return False, None
# get configuration
if not ca.mutable:
configuration[ra.id] = ca.value
# ra is satisfied by ca
elif isAncestorOf("BoolAttribute", ra.id):
if not ca.mutable:
if ra.value != ca.value: # value does not match and is not mutable
print(ra.id, "does not match and is not mutable:", "wanted", ra.value, "got", ca.value)
return False, None
# get configuration
if not ca.mutable:
configuration[ra.id] = ca.value
# ra is satisfied by ca
elif isAncestorOf("ChoiceAttribute", ra.id):
if ra.value is not None:
if ca.mutable:
if not any([isAncestorOf(ra.options[ra.value].id, ca.options[choice].id) for choice in ca.options]): # value mutable but not available
print(ra.id, "option not available:", ra.options[ra.value].id, "not related to any of", [ca.options[choice].id for choice in ca.options])
return False, None
else:
if not isAncestorOf(ra.options[ra.value].id, ca.options[ca.value].id): # value does not match and is not mutable
print(ra.id, "does not match:", ra.options[ra.value].id, "not related to", ca.options[ca.value].id)
return False, None
# get configuration
if not ca.mutable:
configuration[ra.id] = ca.value
# ra is satisfied by ca
# req is satisfied by ccs
return True, configuration
def renderHierarchy():
""" render the class hierarchy of all imported attributes """
dot = graphviz.Digraph(comment="Attribute hierarchy", format="svg")
dot.graph_attr.update({
"rankdir": "LR"
})
def renderFields(d2, attrId):
glob = globals()
global importedClasses
for className in glob:
# get globally imported classes
if className == importedClasses[attrId]["className"]:
vs = vars(glob[className]())
# get fields of new class instance
for field in vs:
# check if field is an Attribute
try:
if isAncestorOf("Attribute", vs[field].id):
# field edge
d2.edge(attrId.replace("https://", "").replace("http://", ""), vs[field].id.replace("https://", "").replace("http://", ""), color="red")
renderFields(d2, vs[field].id)
except Exception as a:
pass
for attributeId in importedClasses:
dot2 = graphviz.Digraph(comment=attributeId.replace("https://", "").replace("http://", "") + " fields", format="svg")
dot2.graph_attr.update({
"rankdir": "LR"
})
# recursively create field edges
renderFields(dot2, attributeId)
# render individual attributes/ccs and their class fields and their fields' fields
dot2.render("docs/renders/" + attributeId.replace("https://", "").replace("http://", ""), view=False)
# hierarchy edge
dot.edge(attributeId.replace("https://", "").replace("http://", ""), importedClasses[attributeId]["extendsId"].replace("https://", "").replace("http://", ""), color="black")
# render the entire attribute/ccs class hierarchy
dot.render("docs/renders/hierarchy", view=False)
class Region(ChoiceAttribute):
def __init__(self):
super().__init__()
self.id = "Region"
self.extendsId = "ChoiceAttribute"
self.description = "The continent in which the CCS resides"
self.options = {
"europe": Europe(),
"northAmerica": NorthAmerica(),
"southAmerica": SouthAmerica(),
"eastAsia": EastAsia(),
"antarctica": Antarctica(),
"africa": Africa(),
"australia": Australia()
}
self.value = None
class Europe(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Europe"
self.extendsId = "OptionAttribute"
self.name = "Europe"
class NorthAmerica(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "NorthAmerica"
self.extendsId = "OptionAttribute"
self.name = "North America"
class SouthAmerica(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "SouthAmerica"
self.extendsId = "OptionAttribute"
self.name = "South America"
class EastAsia(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "EastAsia"
self.extendsId = "OptionAttribute"
self.name = "East Asia"
class Antarctica(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Antarctica"
self.extendsId = "OptionAttribute"
self.name = "Antarctica"
class Africa(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Africa"
self.extendsId = "OptionAttribute"
self.name = "Africa"
class Australia(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Australia"
self.extendsId = "OptionAttribute"
self.name = "Australia"
class Storage(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "Storage"
self.extendsId = "NumericAttribute"
self.description = "Storage amount in GB"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class StorageWriteSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "StorageWriteSpeed"
self.extendsId = "NumericAttribute"
self.description = "Storage write speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class StorageReadSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "StorageReadSpeed"
self.extendsId = "NumericAttribute"
self.description = "Storage read speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class OperatingSystem(ChoiceAttribute):
def __init__(self):
super().__init__()
self.id = "OperatingSystem"
self.extendsId = "ChoiceAttribute"
self.description = "The operating system a CCS runs on"
self.options = {
"linux": Linux(),
"windows": Windows(),
"mac": Mac()
}
self.value = None
class Linux(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Linux"
self.extendsId = "OptionAttribute"
self.value = "Linux (Unix)"
class Windows(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Windows"
self.extendsId = "OptionAttribute"
self.value = "Windows"
class Mac(OptionAttribute):
def __init__(self):
super().__init__()
self.id = "Mac"
self.extendsId = "OptionAttribute"
self.value = "Mac (Unix)"
class CpuCores(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "CpuCores"
self.extendsId = "NumericAttribute"
self.description = "The amount of CPU cores"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class CpuClockSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "CpuClockSpeed"
self.extendsId = "NumericAttribute"
self.description = "CPU clock speed in GHz"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class Ram(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "Ram"
self.extendsId = "NumericAttribute"
self.description = "The amount of Ram in GB"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class RamClockSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "RamClockSpeed"
self.extendsId = "NumericAttribute"
self.description = "RAM clock speed in GHz"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class RamWriteSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "RamWriteSpeed"
self.extendsId = "NumericAttribute"
self.description = "RAM write speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class RamReadSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "RamReadSpeed"
self.extendsId = "NumericAttribute"
self.description = "RAM read speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class NetworkCapacity(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "NetworkCapacity"
self.extendsId = "NumericAttribute"
self.description = "Network capacity in GB"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class NetworkUploadSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "NetworkUploadSpeed"
self.extendsId = "NumericAttribute"
self.description = "Network upload speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
class NetworkDownloadSpeed(NumericAttribute):
def __init__(self):
super().__init__()
self.id = "NetworkDownloadSpeed"
self.extendsId = "NumericAttribute"
self.description = "Network download speed in GB/s"
self.value = None
self.makeInt = True
self.minVal = 0
self.maxVal = inf
self.moreIsBetter = True
|
import os
from Bio import SeqIO
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
## script to find mean charge of protein region by sliding window
fasta_file_path = "/Users/jon/Google Drive (WIBR)/projects/active_repressive/181217_charge_analysis_of_ChIP_factors/HDGF_enriched.fasta"
kmer_len = 5
## read in fasta file
identifier = []
seq = []
for seq_record in SeqIO.parse(fasta_file_path, "fasta"):
identifier.append(seq_record.id)
seq.append(str(seq_record.seq))
## generate amino acid interaction matrix
pos_charge_weight = 1.0
neg_charge_weight = -1.0
histidine_charge_weight = 0.5
# amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
amino_acids = ['X','G', 'A', 'S', 'T', 'C', 'V', 'L', 'I', 'M', 'P', 'F', 'Y', 'W', 'D', 'E', 'N', 'Q', 'H', 'K', 'R']
# define regular expressions
charge_pos_regex = re.compile('[RK]')
charge_neg_regex = re.compile('[DE]')
histidine_regex = re.compile('[H]')
## retrieve individual IDR sequences
for idx, s in enumerate(seq):
seq_name = identifier[idx]
seq_length = len(s)
print("Protein: " + seq_name)
print("Length: ", seq_length)
## sliding window of k-mers and get score of interaction
num_of_windows = seq_length - kmer_len + 1
score = np.zeros(shape=(num_of_windows, 1))
for i in range(num_of_windows):
kmer = s[i:(i + kmer_len)]
kmer_window_score = np.zeros(shape=(kmer_len,1))
for count, k in enumerate(kmer):
if re.match(charge_pos_regex, k):
kmer_window_score[count] = kmer_window_score[count] + pos_charge_weight
elif re.match(charge_neg_regex, k):
kmer_window_score[count] = kmer_window_score[count] + neg_charge_weight
elif re.match(histidine_regex, k):
kmer_window_score[count] = kmer_window_score[count] + histidine_charge_weight
score[i] = score[i] + np.mean(kmer_window_score) # calculate mean charge of window
# make axes
fig, (ax1, ax2) = plt.subplots(2, 1)
# plot top sequence grid
grid_length = seq_length
sequence = s
amino_acid_grid = np.zeros(shape=(len(amino_acids), grid_length))
count = 0
for a in sequence:
aa_index = amino_acids.index(a)
amino_acid_grid[aa_index, count] = 1
count = count + 1
im = ax1.imshow(amino_acid_grid, cmap='binary', vmin=0, vmax=1)
ax1.set_xlim(left=-5, right=grid_length + 10)
ax1.set_yticks(range(len(amino_acids)))
ax1.set_yticklabels(amino_acids, fontsize=6)
ax1.set_aspect('auto')
# plot IDR structure lines
upper_charge = 0.25
lower_charge = -0.25
mask_upper = np.ma.masked_where(score < upper_charge, score)
mask_lower = np.ma.masked_where(score > lower_charge, score)
# mask_middle = np.ma.masked_where(np.logical_or(score < -0.15, score > 0.15), score)
x = range(seq_length - kmer_len + 1)
ax2.plot(x, score, '-', color='0.8')
ax2.plot(x, mask_upper, '-b')
ax2.plot(x, mask_lower, '-r')
ax2.lines[0].set_linewidth(0.5)
ax2.lines[1].set_linewidth(1.0)
ax2.lines[2].set_linewidth(1.0)
ax2.lines[1].set_alpha(0.3)
ax2.lines[2].set_alpha(0.3)
ax2.set_ylim(bottom=-1, top=1)
ax2.set_xlim(left=-5, right=grid_length + 10)
ax2.set_aspect('auto')
fig.suptitle(seq_name)
output_dir = os.path.dirname(fasta_file_path)
plt.savefig(os.path.join(output_dir, seq_name + "_" + str(kmer_len) + "mer" + ".eps"))
plt.savefig(os.path.join(output_dir, seq_name + "_" + str(kmer_len) + "mer" + ".png"), dpi=300)
plt.close()
|
# https://blog.csdn.net/caimouse/article/details/79692118
'''
从CAN的通道0向通道1来发送一帧CAN数据
'''
#python3.6 32位
#https://blog.csdn.net/caimouse/article/details/51749579
#开发人员:蔡军生(QQ:9073204) 深圳 2018-3-25
#
import os
from ctypes import *
VCI_USBCAN2A = 4
STATUS_OK = 1
class VCI_INIT_CONFIG(Structure):
_fields_ = [("AccCode", c_ulong),
("AccMask", c_ulong),
("Reserved", c_ulong),
("Filter", c_ubyte),
("Timing0", c_ubyte),
("Timing1", c_ubyte),
("Mode", c_ubyte)
]
class VCI_CAN_OBJ(Structure):
_fields_ = [("ID", c_uint),
("TimeStamp", c_uint),
("TimeFlag", c_ubyte),
("SendType", c_ubyte),
("RemoteFlag", c_ubyte),
("ExternFlag", c_ubyte),
("DataLen", c_ubyte),
("Data", c_ubyte*8),
("Reserved", c_ubyte*3)
]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CanDLLName = 'ControlCAN.dll' #DLL是32位的,必须使用32位的PYTHON
canDLL = windll.LoadLibrary(os.path.join(BASE_DIR,CanDLLName))
print(CanDLLName)
# 打开设备
ret = canDLL.VCI_OpenDevice(VCI_USBCAN2A, 0, 0)
print(ret)
if ret != STATUS_OK:
print('调用 VCI_OpenDevice出错\r\n')
#初始0通道
vci_initconfig = VCI_INIT_CONFIG(0x80000008, 0xFFFFFFFF, 0,
2, 0x00, 0x1C, 0)
ret = canDLL.VCI_InitCAN(VCI_USBCAN2A, 0, 0, byref(vci_initconfig))
if ret != STATUS_OK:
print('调用 VCI_InitCAN出错\r\n')
ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 0)
if ret != STATUS_OK:
print('调用 VCI_StartCAN出错\r\n')
#初始1通道
ret = canDLL.VCI_InitCAN(VCI_USBCAN2A, 0, 1, byref(vci_initconfig))
if ret != STATUS_OK:
print('调用 VCI_InitCAN 1 出错\r\n')
ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 1)
if ret != STATUS_OK:
print('调用 VCI_StartCAN 1 出错\r\n')
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(1,2,3,4, 5, 6, 7, 64)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj = VCI_CAN_OBJ(0x0, 0, 0, 1, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj), 1)
if ret != STATUS_OK:
print('调用 VCI_Transmit 出错\r\n')
#通道1接收数据
a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0)
vci_can_obj = VCI_CAN_OBJ(0x0, 0, 0, 1, 0, 0, 8, a, b)
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 1, byref(vci_can_obj), 1, 0)
print(ret)
while ret <= 0:
print('调用 VCI_Receive 出错\r\n')
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 1, byref(vci_can_obj), 1, 0)
if ret > 0:
print(vci_can_obj.DataLen)
print(list(vci_can_obj.Data))
#关闭
canDLL.VCI_CloseDevice(VCI_USBCAN2A, 0)
|
# Generated by Django 2.2 on 2019-04-25 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('input', '0014_auto_20190425_1530'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='image',
field=models.ImageField(blank=True, default='unknown.jpg', upload_to='profile_image'),
),
]
|
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import *
from django.urls import path, include
from . import views
urlpatterns = [
#path('movies/', create_movie_view, name="create"),
#path('<slug>/', detail_movie_view, name="detail"),
#path('<slug>/edit/', edit_movie_view, name="edit"),
#path('<slug>/edit/', edit_movie_view, name="edit"),
url(r'^movies$', views.detail_movie_view),
url(r'^movies/one/(?P<pk>[0-9]+)$', views.detail_onemovie_view),
path('movies/every/<int:pk>/<str:titulo>/<int:calificacion>/<str:pais>/',views.detail_everymovie_view),
path('movies/ver', views.get_movie_queryset),
path('movies/summary/<str:pais>/', views.detail_summarymovie_view),
path('movies/top/', views.detail_topymovie_view),
url(r'^movies/delete/(?P<pk>[0-9]+)$', views.delete_movie_view),
url(r'^movies/edit/(?P<pk>[0-9]+)$', views.edit_movie_view),
url(r'^movies/add$', views.create_movie_view),
]
|
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
import csv
import numpy as np
import pandas as pd
import random
from matplotlib import pyplot as plt
from scipy import stats
from bhtsne import tsne
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import hypertools as hyp
from sklearn import decomposition
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import GradientBoostingClassifier #GBM algorithm
from sklearn import tree
import sys
f5 = open("output_leaf-depth.txt", 'w')
sys.stdout = f5
def plot_grid_search(cv_results, grid_param_1, grid_param_2, name_param_1, name_param_2):
# Get Test Scores Mean and std for each grid search
scores_mean = cv_results['mean_test_score']
scores_mean = np.array(scores_mean).reshape(len(grid_param_2),len(grid_param_1))
# Plot Grid search scores
_, ax = plt.subplots(1,1)
# Param1 is the X-axis, Param 2 is represented as a different curve (color line)
for idx, val in enumerate(grid_param_2):
ax.plot(grid_param_1, scores_mean[idx,:], '-o', label= name_param_2 + ': ' + str(val))
ax.set_title("Grid Search Scores", fontsize=20, fontweight='bold')
ax.set_xlabel(name_param_1, fontsize=16)
ax.set_ylabel('CV Average Score', fontsize=16)
ax.legend(loc="best", fontsize=15)
ax.grid('on')
plt.savefig(name_param_1+'_'+name_param_2+'.png')
train = pd.read_csv('./../../trainDatafinalData.csv')
x = train.drop(['target'], axis=1)
y = np.array(train['target'])
test = pd.read_csv('./../../testDatafinalData.csv')
x_test=test.drop(['id'], axis=1)
max_depth=[8, 13, 16]
min_samples_leaf= [20,100,150]
print "max_depth",max_depth
print "min_samples_leaf",min_samples_leaf
parameters={'min_samples_leaf' : min_samples_leaf,'max_depth':max_depth }
model_gb = GradientBoostingClassifier(learning_rate=0.1,subsample=0.8,random_state=10,n_estimators=300,max_features='auto')
model=GridSearchCV(model_gb,parameters,scoring='roc_auc',)
model.fit(x, y)
print model.best_params_
print model.grid_scores_
print model.cv_results_
plot_grid_search(model.cv_results_,max_depth,min_samples_leaf,'max_depth','min_samples_leaf')
'''
score_sqrt=[]
score_auto=[]
score_log2=[]
for a in model.grid_scores_:
if(a[0]['max_features']=='log2'):
score_log2.append(a[1])
for a in model.grid_scores_:
if(a[0]['max_features']=='auto'):
score_auto.append(a[1])
for a in model.grid_scores_:
if(a[0]['max_features']=='sqrt'):
score_sqrt.append(a[1])
score_sqrt=np.array(score_sqrt).reshape(len(min_samples_leaf),len(max_depth))
score_auto=np.array(score_auto).reshape(len(min_samples_leaf),len(max_depth))
score_log2=np.array(score_log2).reshape(len(min_samples_leaf),len(max_depth))
print "shape",np.shape(x)
print "shape",np.shape(y)
print "leaf shape:",np.shape(min_samples_leaf)
_,ax=plt.subplots(1,1)
for id,val in enumerate(max_depth):
print "loop:",score_sqrt[id,:],"--",str(val)
ax.plot(min_samples_leaf,score_sqrt[id,:],'-o',label='max_depth :'+str(val))
ax.set_title("GS_score(max_features=sqrt)", fontsize=20, fontweight='bold')
ax.set_xlabel('min_sample_split', fontsize=12)
ax.set_ylabel('ROC value', fontsize=12)
ax.legend(loc="best", fontsize=10)
ax.grid('on')
plt.savefig('leaf_vs_depth_vs_sqrt.png')
_,ax=plt.subplots(1,1)
for id,val in enumerate(max_depth):
ax.plot(min_samples_leaf,score_auto[id,:],'-o',label='max_depth :'+str(val))
ax.set_title("GS_score(max_features=auto)", fontsize=20, fontweight='bold')
ax.set_xlabel('min_sample_split', fontsize=12)
ax.set_ylabel('ROC value', fontsize=12)
ax.legend(loc="best", fontsize=10)
ax.grid('on')
plt.savefig('leaf_vs_depth_vs_auto.png')
_,ax=plt.subplots(1,1)
for id,val in enumerate(max_depth):
ax.plot(min_samples_leaf,score_log2[id,:],'-o',label='max_depth :'+str(val))
ax.set_title("GS_score(max_features=log2)", fontsize=20, fontweight='bold')
ax.set_xlabel('min_sample_split', fontsize=12)
ax.set_ylabel('ROC value', fontsize=12)
ax.legend(loc="best", fontsize=10)
ax.grid('on')
plt.savefig('leaf_vs_depth_vs_log2.png')
y_pred = model.predict(x_test)
print "creating output for DTC"
c=0
with open('leaf_vs_depth_output.csv','w') as f:
f.write('id,target\n')
for i in y_pred:
f.write(str(c))
f.write(",")
f.write(str(i))
f.write("\n")
c=c+1
'''
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 18:21:15 2019
@author: exame
"""
def dvu(u,v):
return u*(u/2 + 1)*v**3 + (u + 5/2)*v**2
def rk4(u,uf,v,h):
while (u < uf):
d1 = h*dvu(u,v)
d2 = h*dvu(u + (h/2), v+ (d1/2))
d3 = h*dvu(u + (h/2), v + (d2/2))
d4 = h*dvu(u + h, v + d3)
v+= (d1/6) + (d2/3) + (d3/3) + (d4/6)
u+= h
return v
#h=0.08
S=rk4(1,1.8,0.15,0.08)
#h'=0.08/2=0.04
S1=rk4(1,1.8,0.15,0.04)
#h''=0.04/2=0.02
S2=rk4(1,1.8,0.15,0.02)
QC=(S1-S)/(S2-S1)
E=abs((S2-S1)/15)
print("Usando h, v(1,8)= ",S)
print("Usando h', v(1,8)= ",S1)
print("Usando h'', v(1,8)= ",S2)
print("QC= ",QC)
print("Erro= ",E)
|
# -*- coding: utf-8 -*-
"""Top-level package for Bulker.gr Python API Client."""
__author__ = """Yiannis Inglessis """
__email__ = 'negtheone@gmail.com'
__version__ = '0.1.4'
from .bulkergr_api import Bulkergr
from .exceptions import AuthKeyMissingError
|
# coding=utf8
class Record:
def __init__(self, stkcd, dealseq, inscode, biddercode, price_normal, shares, policy_flag):
self.stkcd = str(stkcd)
self.dealseq = int(dealseq)
self.inscode = str(inscode)
self.biddercode = str(biddercode)
self.price_normal = float(price_normal)
self.shares = float(shares)
self.policy_flag = int(policy_flag)
|
from selenium.webdriver.common.keys import Keys
from seleniumbase.fixtures.base_case import BaseCase
from common.login_page import MyTest
from common.Element_API import WeiGeLi, ComoonPagee, WangZhanHoumenpage, SuoYouZhuJi, RuoMimaPage
from selenium.common.exceptions import NoSuchElementException
import time
import re
import datetime
class WangZhanHouMen(BaseCase):
# 终端tab
list_fenzu = [ComoonPagee.xuanzeweifenzu_zhongduan, ComoonPagee.xuanzewindowfenzu,
ComoonPagee.xuanzewindowzifenzu, ComoonPagee.xuanzelinuxfenzu,
ComoonPagee.xuanzelinuxzifenzu]
# 下方策略的时间
xiafacelv_time = None
# 创建策略
def chuangjian(self, dr, mingcheng, fenzu,path):
#MyTest().admin_loggin(dr)
dr.click(ComoonPagee.fengxiangguanl_tab) # 点击风险管理tab
dr.click(ComoonPagee.wangzhanhoumen_jixian_tab) # 点击网站漏洞后门tab
dr.click(WangZhanHoumenpage.XinZen_bnt) # 点击新增按钮
dr.send_keys(WangZhanHoumenpage.Jihua_mingcheng_input, mingcheng) # 计划名称
if 'linux' in mingcheng:
dr.click(WangZhanHoumenpage.caozuoxitong) #操作系统选择 此出,目前默认是windows,将来要改成可以判断选择是windows还是linux
dr.click(WangZhanHoumenpage.guizeku_select) # 规则库选择
dr.click(WangZhanHoumenpage.webshell_select) # 选择webshell规则库
nowtime = datetime.datetime.now() + datetime.timedelta(hours=1,
minutes=3) # 获取当前时间,加一个小时,加三分钟,因为下拉框是从00开始的所以时间需要加1个小时。
times = nowtime.strftime("%H:%M") # 格式化时间,只取时分
dr.click(WangZhanHoumenpage.zhixingshijian_input) # 点击时间输入控件
dr.find_element(RuoMimaPage.zhixingshijian_input).send_keys(Keys.CONTROL, "a") # 删除默认时间
dr.find_element(RuoMimaPage.zhixingshijian_input).send_keys(Keys.DELETE) # 删除默认时间
dr.click(WangZhanHoumenpage.zhixingshijian_input) # 点击时间输入控件
# dr.send_keys(RuoMimaPage.zhixingshijian_input,times) 此处无法输入只能点击下拉选择框所以采用别的方式。
dr.click(
"//div[contains(text(),'网站后门策略')]/../..//div[2]/div[5]/div[11]/div[2]/div/div[2]/div/div/div/div/div/div[1]") # 点击下拉选择框
dr.scroll_click(
"//div[contains(text(),'网站后门策略')]/../..//div[2]/div[5]/div[11]/div[2]/div/div[2]/div/div/div/div/div/div[1]/ul/li[{}]".format(
times[0:2])) # 输入时间
dr.click(
"//div[contains(text(),'网站后门策略')]/../..//div[2]/div[5]/div[11]/div[2]/div/div[2]/div/div/div/div/div/div[2]") # 点击下拉选择框
dr.scroll_click(
"//div[contains(text(),'网站后门策略')]/../..//div[2]/div[5]/div[11]/div[2]/div/div[2]/div/div/div/div/div/div[2]/ul/li[{}]".format(
times[3:5])) # 输入分
dr.send_keys(WangZhanHoumenpage.wangzhanlujing_input, path) # 输入路径
dr.double_click(WangZhanHoumenpage.tianjlujing_bnt, timeout=10) # 点击添加路径按钮
dr.click(WangZhanHoumenpage.tianjiazhixingmubiao_bnt) # 添加执行目标
dr.click(fenzu) # 选择未分组终端
dr.click(ComoonPagee.quedingxuanze_bnt) # 确定添加目标
dr.slow_click(WangZhanHoumenpage.quedingtianj_wangzhanhoumen) # 确定添加网站后门
dr.click(WangZhanHoumenpage.qued_tianjiacelv_bant) # 确定添加策略
loca_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 获取下发策略的时间
time.sleep(2)
dr.xiafacelv_time = datetime.datetime.strptime(loca_time, '%Y-%m-%d %H:%M:%S') # 时间格式化
# 检测版本情况
def check(self, dr):
# MyTest().admin_loggin(dr)
dr.click(ComoonPagee.zichanguanl_tab) # 点击资产管理标签
dr.click(ComoonPagee.suoyouzhuji_tab) # 点击所有主机
dr.refresh()
dr.click(SuoYouZhuJi.xianshilie) # 点击显示列
dr.double_click(SuoYouZhuJi.qingchusuoyou) # 双击清除所有列
dr.click(SuoYouZhuJi.wangzhanhouomencelv_tab) # 点击显示网站后门策略
dr.click(SuoYouZhuJi.queding_xuanxiang, timeout=10) # 点击确定
loca_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
time.sleep(2)
dr.xiafacelv_time = datetime.datetime.strptime(loca_time, '%Y-%m-%d %H:%M:%S')
# 开始获取版本信息
while True:
check_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 获取当前时间
check_timestr = datetime.datetime.strptime(check_time, '%Y-%m-%d %H:%M:%S') # 获取的时间格式化
resutlt = (check_timestr - dr.xiafacelv_time).seconds # 将时间差转换为秒
dr.refresh() # 刷新浏览器
time.sleep(10) # 休眠时间
print(1111)
if resutlt <= 3600: # 如果时间差小于3600
version_num = dr.find_elements(SuoYouZhuJi.ceilv_zhuangtai_text) # 获取一组元素list返回
for i in version_num: # 循环每个元素,取出title值
version = (i.get_attribute('title'))
res = re.findall(r'\d+', version) # 处理结果,只取当前版本和最新版本号
print(res[0], res[1])
if res[0] != res[1]: # 如果版本不相同跳出循环,进行下一次比较
# print(res[0] == res[1])
break # 跳出for循环,进入wile循环。
if res[0] == res[1]:
if i == version_num[-1]: # 如果比较的元素时列表中的最后一个元素,那么证明,所有元素的值都相等,这时返回比较结果。
print("比较完毕,结果一样")
return dr.assert_equal(res[0], res[1], msg="比较完毕")
if resutlt > 3600:
return dr.assert_equal(1, 2, msg="超时了,比较失败") # 如果超时直接判断为失败
def data_check(self,dr):
#MyTest().admin_loggin(dr) #登陆
str=[]
# dr.click(ComoonPagee.fengxiangguanl_tab) # 点击风险管理tab
dr.click(ComoonPagee.wangzhanhoumen_jixian_tab) # 点击网站漏洞后门tab
dr.click(WangZhanHoumenpage.web_data_tab)
a = dr.find_elements("//td/div")
for i in a:
result = i.text
str.append(result)
# if u'\u4e00' <= result <= u'\u9fff': #如果包含中文则存储在列表里
# str.append(result)
print(str)
return str
|
#HAND GESTURE RECOGNIZATION USING OPENCV
import numpy as np
import cv2
import math
capture=cv2.VideoCapture(0)
while capture.isOpened():
check,frame=capture.read()
cv2.rectangle(frame,(100,100),(400,400), (0,255,0),0)
crop_image=frame[100:350,100:350]
blur=cv2.GaussianBlur(crop_image,(3,3),0)
hsv=cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
#create a binary image where white will be skin colors and rest is black
mask2=cv2.inRange(hsv,np.array([2,0,0]),np.array([20,255,255]))
#kernal for morphological transformation
kernel=np.ones((5,5), np.uint8)
#apply morphological transformarions to filter out the background noise
dilated=cv2.dilate(mask2,kernel,iterations=0)
erosion=cv2.erode(dilated,kernel,iterations=0)
filtered=cv2.GaussianBlur(erosion,(3,3),0)
check,thres=cv2.threshold(filtered, 200, 225, cv2.THRESH_BINARY)
cv2.imshow("Threshold",thres)
cv2.imshow("Gesture",frame)
#close the camera if 'q' is pressed
if cv2.waitKey(1)==ord('q'):
break
capture.release()
cv2.destroyAllWindows() |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from tina.base.db.models.fields import JSONField
class Webhook(models.Model):
project = models.ForeignKey("projects.Project", null=False, blank=False,
related_name="webhooks")
name = models.CharField(max_length=250, null=False, blank=False,
verbose_name=_("name"))
url = models.URLField(null=False, blank=False, verbose_name=_("URL"))
key = models.TextField(null=False, blank=False, verbose_name=_("secret key"))
class Meta:
ordering = ['name', '-id']
class WebhookLog(models.Model):
webhook = models.ForeignKey(Webhook, null=False, blank=False,
related_name="logs")
url = models.URLField(null=False, blank=False, verbose_name=_("URL"))
status = models.IntegerField(null=False, blank=False, verbose_name=_("status code"))
request_data = JSONField(null=False, blank=False, verbose_name=_("request data"))
request_headers = JSONField(null=False, blank=False, verbose_name=_("request headers"), default={})
response_data = models.TextField(null=False, blank=False, verbose_name=_("response data"))
response_headers = JSONField(null=False, blank=False, verbose_name=_("response headers"), default={})
duration = models.FloatField(null=False, blank=False, verbose_name=_("duration"), default=0)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created', '-id']
|
##import itertools
##T = input()
##for _ in range(int(T)):
## entries = list(map(int,input().split()))
## n1,n2,n = entries[0],entries[1],entries[2]
## arr = []
##
## for i in range(1,n+1):
## num,x=[],0
## if i == 1:
## arr.append(i)
## continue
## l = [j for j in range(1,i) if j<i]
##
## j_k_list = list(itertools.product(l,repeat = 2))
## for j_k in j_k_list:
## j = j_k[0]
## k = j_k[1]
## num.append((n1 * arr[k-1]) - (n2 * arr[j-1]))
## x = arr[i-2]+1
## while(x in num):
## x = x + 1
## arr.append(x)
##
## print(" ".join(list(map(str,arr))))
##http://practice.geeksforgeeks.org/problems/arithmetic-progression/0
##Construct the sequence arr[1], arr[2], ... by the following rules. For i=1 we put arr[1]=1. Now let i >= 2. Then arr[i] is the least positive integer such that the following two conditions hold
##(i) arr[i] > arr[i - 1];
##(ii) for all k, j < i we have arr[i] is not equal to n1 * arr[k] - n2 * arr[j].
##Find the first n terms of this sequence.
def addrestricted(n,out,n1,n2,newadded):
global restrict
for i in out:
r1 = n1*newadded -n2*i
if((r1 > newadded) ):
restrict[r1] = True
r2 = n1*i -n2*newadded
if((r2 > newadded)):
restrict[r2] = True
t = int(raw_input().strip())
while(t>0):
t = t-1
[n1,n2,n] = [int(x) for x in (raw_input().strip()).split(' ')]
out = [1]
restrict = {}
restrict[n1-n2] = True
for i in range(0, n-1):
newpossible = out[len(out)-1] + 1
while(True):
if(newpossible in restrict):
newpossible = newpossible + 1
else:
out = out + [newpossible]
break
newadded = newpossible
addrestricted(n,out,n1,n2,newadded)
for j in out:
print j,
print ""
|
import boto3
import StringIO
import zipfile
import mimetypes
def lambda_handler(event, context):
sns = boto3.resource('sns')
topic = sns.Topic('arn:aws:sns:us-east-1:418230580652:SLReactS3Website')
try:
s3 = boto3.resource('s3')
website_build_bucket = s3.Bucket('sl-react-website-build')
website_bucket = s3.Bucket('sl-react-website')
website_zip = StringIO.StringIO()
website_build_bucket.download_fileobj('slReactBuild.zip', website_zip)
with zipfile.ZipFile(website_zip) as buildZip:
for name in buildZip.namelist():
print name
obj = buildZip.open(name)
website_bucket.upload_fileobj(obj, name,
ExtraArgs={'ContentType': mimetypes.guess_type(name)[0]})
website_bucket.Object(name).Acl().put(ACL='public-read')
topic.publish(Subject='Build Run Success', Message="Build deployed successfully")
except:
topic.publish(Subject='Build Run Failed', Message="Build deployed failed")
return 'Done from Lambda!'
|
"""
These are used for data-aware declare deviance mining.
Each template gives back: locations activations, which were fulfilled and activations, which were violated.
No short-circuiting of conditions, which could previously be done in other templates
"""
def template_not_responded_existence(trace, event_set):
"""
if A occurs and Cond holds, B can never occur
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
# All activations are either fulfilled or all are violated!
if event_1 in trace:
if event_2 not in trace:
return len(trace[event_1]), False
else:
# Return all indices as violations!
# violation at every activation!
return -1, False
else:
# Vacuous if no event_1
return 0, True
def template_not_responded_existence_data(trace, event_set):
"""
if A occurs and Cond holds, B can never occur
:param trace:
:param event_set:
:return:
- number of fulfillments if all fulfilled, second if vacuous, third fulfilled activations, fourth violated activations
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
# All activations are either fulfilled or all are violated!
if event_1 in trace:
if event_2 not in trace:
return len(trace[event_1]), False, trace[event_1], [] # Third one is the place of activations!
else:
# Return all indices as violations!
# violation at every activation!
return -1, False, [], trace[event_2]
else:
# Vacuous if no event_1
return 0, True, [], []
def template_not_precedence(trace, event_set):
"""
if B occurs and Cond holds, A cannot have occurred before
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 not in trace:
return len(trace[event_2]), False # frequency for every activation
else:
# For every B occurred, have to check that A hasn't occurred before.
# This means, that last B must have occurred before first A
first_pos_event_1 = trace[event_1][0] # first A position
last_pos_event_2 = trace[event_2][-1] # last B position
if last_pos_event_2 < first_pos_event_1:
# todo: check frequency condition
count = len(trace[event_2])
return count, False
return -1, False
else:
# Vacuous if no event_2
return 0, True
def template_not_precedence_data(trace, event_set):
"""
if B occurs and Cond holds, A cannot have occurred before
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
fulfillments = []
violations = []
# Find first A, all B's which are before A are fulfilled, others violated
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 not in trace:
return len(trace[event_2]), False, trace[event_2], [] # Then all are fulfillments
else:
first_pos_event_1 = trace[event_1][0] # first A position
# Check every pos_2
for pos_event_2 in trace[event_2]:
if pos_event_2 < first_pos_event_1:
# pos_event_1 is AFTER B, therefore precedence does not hold
fulfillments.append(pos_event_2)
else:
# pos_event_1 is BEFORE B, therefore precedence holds!
violations.append(pos_event_2)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# Vacuous if no event_2
return 0, True, [], []
def template_not_chain_response(trace, event_set):
"""
if A occurs and Cond holds, B cannot be executed next
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
# for each event_1_position, check if there is event_2_position after
for pos1 in event_1_positions:
if pos1+1 in event_2_positions:
# this means that there is!
return -1, False
# None of pos1+1 was in event2, therefore there is no chain_response
return len(event_1_positions), False
else:
return len(trace[event_1]), False # no response for event1, therefore all fulfilled
return 0, True # todo, vacuity
def template_not_chain_response_data(trace, event_set):
"""
if A occurs and Cond holds, B cannot be executed next
For every A, check if B comes next. If not, then violted
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
fulfillments = []
violations = []
if event_1 in trace:
if event_2 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
# for each event_1_position, check if there is event_2_position after
for pos1 in event_1_positions:
if pos1+1 in event_2_positions:
# this means that there is!
violations.append(pos1)
else:
fulfillments.append(pos1)
if len(violations) > 0:
# at least one violation,
return -1, False, fulfillments, violations
else:
# None of pos1+1 was in event2, therefore there is no chain_response
return len(fulfillments), False, fulfillments, violations
else:
return len(trace[event_1]), False, trace[event_1], [] # no response for event1, therefore all fulfilled
return 0, True, [], [] # todo, vacuity
def template_not_chain_precedence(trace, event_set):
"""
if B occurs and Cond holds, A cannot have occurred immediately before
:param trace:
:param event_set:
:return:
"""
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 in trace:
# Every event2 must NOT be chain preceded by event1
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
# for each event_2_position, check if there is event_1_position before
for pos2 in event_2_positions:
if pos2-1 in event_1_positions:
# this means that there is!
return -1, False
count = len(event_2_positions)
return count, False
else:
return len(trace[event_2]), False # no response for event1
return 0, True # todo, vacuity
def template_not_chain_precedence_data(trace, event_set):
"""
if B occurs and Cond holds, A cannot have occurred immediately before
:param trace:
:param event_set:
:return:
"""
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
fulfillments = []
violations = []
if event_2 in trace:
if event_1 in trace:
# Every event2 must NOT be chain preceded by event1
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
# for each event_2_position, check if there is event_1_position before
for pos2 in event_2_positions:
if pos2-1 in event_1_positions:
# this means that there is!
violations.append(pos2)
else:
fulfillments.append(pos2)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
return len(trace[event_2]), False, trace[event_2], [] # no response for event1
return 0, True, [], [] # todo, vacuity
def template_alternate_precedence(trace, event_set):
"""
precedence(A, B) template indicates that event B
should occur only if event A has occurred before.
Alternate condition:
"events must alternate without repetitions of these events in between"
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 in trace:
# Go through two lists, one by one
# first events pos must be before 2nd lists first pos etc...
# A -> A -> B -> A -> B
# efficiency check
event_1_count = len(trace[event_1])
event_2_count = len(trace[event_2])
# There has to be more or same amount of event A's compared to B's
if event_2_count > event_1_count:
return -1, False
event_1_positions = trace[event_1]
event_2_positions = trace[event_2]
# Go through all event 2's, check that there is respective event 1.
# Find largest event 1 position, which is smaller than event 2 position
# implementation
# Check 1-forward, the 1-forward has to be greater than event 2 and current one has to be smaller than event2
event_1_ind = 0
for i, pos2 in enumerate(event_2_positions):
# find first in event_2_positions, it has to be before next in event_1_positions
while True:
if event_1_ind >= len(event_1_positions):
# out of preceding events, but there are still event 2's remaining.
return -1, False
next_event_1_pos = None
if event_1_ind < len(event_1_positions) - 1:
next_event_1_pos = event_1_positions[event_1_ind + 1]
event_1_pos = event_1_positions[event_1_ind]
if next_event_1_pos:
if event_1_pos < pos2 and next_event_1_pos > pos2:
# found the largest preceding event
event_1_ind += 1
break
elif event_1_pos > pos2 and next_event_1_pos > pos2:
# no event larger
return -1, False
else:
event_1_ind += 1
else:
# if no next event, check if current is smaller
if event_1_pos < pos2:
event_1_ind += 1
break
else:
return -1, False # since there is no smaller remaining event
count = len(event_2_positions)
return count, False
else:
# impossible because there has to be at least one event1 with event2
return -1, False
return 0, True # todo: vacuity condition!!
def template_alternate_precedence_data(trace, event_set):
"""
precedence(A, B) template indicates that event B
should occur only if event A has occurred before.
Alternate condition:
"events must alternate without repetitions of these events in between"
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
fulfillments = []
violations = []
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 in trace:
# Go through two lists, one by one
# first events pos must be before 2nd lists first pos etc...
# A -> A -> B -> A -> B
event_1_positions = trace[event_1]
event_2_positions = trace[event_2]
# FOR every EVENT 2: Going before in the log: THERE MUST BE EVENT 1 BEFORE EVENT 2!
# Keep track of largest event_1_pos before current event_2_pos. Sorting array.
merged = []
event_1_ind = 0
event_2_ind = 0
while event_1_ind < len(event_1_positions) and event_2_ind < len(event_2_positions):
if event_1_positions[event_1_ind] < event_2_positions[event_2_ind]:
merged.append((1, event_1_positions[event_1_ind]))
event_1_ind += 1
else:
merged.append((2, event_2_positions[event_2_ind]))
event_2_ind += 1
# Merge leftovers
while event_1_ind < len(event_1_positions):
merged.append((1, event_1_positions[event_1_ind]))
event_1_ind += 1
while event_2_ind < len(event_2_positions):
merged.append((2, event_2_positions[event_2_ind]))
event_2_ind += 1
# Go through array, at every point check if (2, x). If 2, then check if previous is 2 or 1.
for i in range(len(merged)):
if merged[i][0] == 2:
if i == 0:
# If first one, then previous cant be 1. violation.
violations.append(merged[i][1])
elif merged[i-1][0] == 1:
# If is not same, then no violation
fulfillments.append(merged[i][1])
else:
# Therefore if previous is same... then violation
violations.append(merged[i][1])
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# impossible because there has to be at least one event1 with event2. Therefore all activations are violated
return -1, False, [], trace[event_2]
return 0, True, [], [] # todo: vacuity condition!!
def template_alternate_response(trace, event_set):
"""
If there is A, it has to be eventually followed by B.
Alternate: there cant be any further A until first next B
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
event_2_ind = 0
event_1_positions = trace[event_1]
event_2_positions = trace[event_2]
for i, pos1 in enumerate(event_1_positions):
# find first in event_2_positions, it has to be before next in event_1_positions
next_event_1_pos = None
if i < len(event_1_positions) - 1:
next_event_1_pos = event_1_positions[i + 1]
while True:
if event_2_ind >= len(event_2_positions):
# out of response events
return -1, False
if event_2_positions[event_2_ind] > pos1:
# found first greater than event 1 pos
# check if it is smaller than next event 1
if next_event_1_pos and event_2_positions[event_2_ind] > next_event_1_pos:
# next event 2 is after next event 1..
return -1, False
else:
# consume event 2 and break out to next event 1
event_2_ind += 1
break
event_2_ind += 1
count = len(event_1_positions)
return count, False
# every event 2 position has to be after respective event 1 position and before next event 2 position
else:
return -1, False
# Vacuously
return 0, True
def template_alternate_response_data(trace, event_set):
"""
If there is A, it has to be eventually followed by B.
Alternate: there cant be any further A until first next B
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
fulfillments = []
violations = []
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
# Go through two lists, one by one
# first events pos must be before 2nd lists first pos etc...
# A -> A -> B -> A -> B
event_1_positions = trace[event_1]
event_2_positions = trace[event_2]
# FOR every EVENT 2: Going before in the log: THERE MUST BE EVENT 1 BEFORE EVENT 2!
# Keep track of largest event_1_pos before current event_2_pos. Sorting array.
merged = []
event_1_ind = 0
event_2_ind = 0
while event_1_ind < len(event_1_positions) and event_2_ind < len(event_2_positions):
if event_1_positions[event_1_ind] < event_2_positions[event_2_ind]:
merged.append((1, event_1_positions[event_1_ind]))
event_1_ind += 1
else:
merged.append((2, event_2_positions[event_2_ind]))
event_2_ind += 1
# Merge leftovers
while event_1_ind < len(event_1_positions):
merged.append((1, event_1_positions[event_1_ind]))
event_1_ind += 1
while event_2_ind < len(event_2_positions):
merged.append((2, event_2_positions[event_2_ind]))
event_2_ind += 1
# Go through array, at every point check if (2, x). If 2, then check if next is 2 or 1.
for i in range(len(merged)):
if merged[i][0] == 1:
if i == len(merged) - 1:
# Last in list! Will not be responded! Violated.
violations.append(merged[i][1])
elif merged[i+1][0] == 2:
# If no violation is not same, then no violation
fulfillments.append(merged[i][1])
else:
# Therefore if previous is same... then violation
violations.append(merged[i][1])
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# impossible because there has to be at least one event2 with event1. Therefore all activations are violated
return -1, False, [], trace[event_1]
return 0, True, [], [] # todo: vacuity condition!!
def template_chain_precedence(trace, event_set): # exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
if len(event_1_positions) < len(event_2_positions):
return -1, False # impossible to fulfill
# For every pos2, check if pos2-1 is in event1 set
for pos2 in event_2_positions:
if pos2-1 not in event_1_positions:
# Then no possible to preceed!!
return -1, False
count = len(event_2_positions)
return count, False
else:
return -1, False # no response for event1
return 0, True # todo, vacuity
def template_chain_precedence_data(trace, event_set): # exactly 2 event
"""
if B occurs and Cond holds, A must have occurred immedi-ately before
:param trace:
:param event_set:
:return:
"""
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
violations = []
fulfillments = []
if event_2 in trace:
if event_1 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
# For every pos2, check if pos2-1 is in event1 set
for pos2 in event_2_positions:
if pos2 - 1 not in event_1_positions:
# Then no possible to preceed!!
violations.append(pos2)
else:
fulfillments.append(pos2)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
return -1, False, [], trace[event_2] # no response for event1
return 0, True, [], [] # todo, vacuity
def template_chain_response(trace, event_set):
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
if len(event_1_positions) > len(event_2_positions):
return -1, False # impossible to fulfill
for pos1 in event_1_positions:
if pos1 + 1 not in event_2_positions:
# Exists event 1 which is not chained by event 2
return -1, False
count = len(event_1_positions)
return count, False
else:
return -1, False # no response for event1
return 0, True # todo, vacuity
def template_chain_response_data(trace, event_set):
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
violations = []
fulfillments = []
if event_1 in trace:
if event_2 in trace:
# Each event1 must instantly be followed by event2
event_1_positions = set(trace[event_1])
event_2_positions = set(trace[event_2])
for pos1 in event_1_positions:
if pos1 + 1 not in event_2_positions:
# Exists event 1 which is not chained by event 2
violations.append(pos1)
else:
fulfillments.append(pos1)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
return -1, False, [], trace[event_1] # no response for event1
return 0, True, [], [] # todo, vacuity
def template_precedence(trace, event_set):
"""
precedence(A, B) template indicates that event B
should occur only if event A has occurred before.
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
violations = []
fulfillments = []
if event_2 in trace:
if event_1 in trace:
first_pos_event_1 = trace[event_1][0]
first_pos_event_2 = trace[event_2][0]
# All event B's, which are before first event A are violated. Every other is fulfilled.
if first_pos_event_1 < first_pos_event_2:
# todo: check frequency condition
count = len(trace[event_2])
return count, False
else:
# first position of event 2 is before first event 1
return -1, False
else:
# impossible because there has to be at least one event1 with event2
return -1, False
# Vacuously fulfilled
return 0, True
def template_precedence_data(trace, event_set):
"""
precedence(A, B) template indicates that event B
should occur only if event A has occurred before.
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
violations = []
fulfillments = []
event_1 = event_set[0]
event_2 = event_set[1]
if event_2 in trace:
if event_1 in trace:
first_pos_event_1 = trace[event_1][0]
event_2_positions = trace[event_2]
# All event B's, which are before first event A are violated. Every other is fulfilled.
for event_2_pos in event_2_positions:
if event_2_pos < first_pos_event_1:
# There is event
violations.append(event_2_pos)
else:
fulfillments.append(event_2_pos)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# impossible because there has to be at least one event1 with event2
return -1, False, [], trace[event_2]
# Vacuously fulfilled
return 0, True, [], []
def template_not_response(trace, event_set):
"""
if A occurs and Cond holds, B can never occur
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
first_pos_event_1 = trace[event_1][0]
last_pos_event_2 = trace[event_2][-1]
# Last event 2, must be before first event 1
if last_pos_event_2 < first_pos_event_1:
# todo: check frequency counting How to count fulfillments? min of A and B?
count = len(trace[event_1])
return count, False
else:
# last event2 is before event1
return -1, False
else:
# impossible for event 2 to be after event 1 if there is no event 2
return len(trace[event_1]), False
return 0, True # not vacuity atm..
def template_not_response_data(trace, event_set):
"""
if A occurs and Cond holds, B can never occur
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
violations = []
fulfillments = []
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
event_1_positions = trace[event_1]
last_pos_event_2 = trace[event_2][-1]
# Go through all event 1, if it is after last_pos_event_2 then fulfilled, otherwise false
for pos1 in event_1_positions:
if pos1 < last_pos_event_2:
violations.append(pos1)
else:
fulfillments.append(pos1)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# impossible for event 2 to be after event 1 if there is no event 2
return len(trace[event_1]), False, trace[event_1], []
return 0, True, [], [] # not vacuity atm..
def template_response(trace, event_set):
"""
If event B is the response of event A, then when event
A occurs, event B should eventually occur after A.
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
last_pos_event_1 = trace[event_1][-1]
last_pos_event_2 = trace[event_2][-1]
if last_pos_event_2 > last_pos_event_1:
# todo: check frequency counting How to count fulfillments? min of A and B?
count = len(trace[event_1])
return count, False
else:
# last event2 is before event1
return -1, False
else:
# impossible for event 2 to be after event 1 if there is no event 2
return -1, False
return 0, True # not vacuity atm..
def template_response_data(trace, event_set):
"""
If event B is the response of event A, then when event
A occurs, event B should eventually occur after A.
:param trace:
:param event_set:
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
violations = []
fulfillments = []
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
event_1_positions = trace[event_1]
last_pos_event_2 = trace[event_2][-1]
for pos_1 in event_1_positions:
if pos_1 < last_pos_event_2:
fulfillments.append(pos_1)
else:
violations.append(pos_1)
if len(violations) > 0:
return -1, False, fulfillments, violations
else:
return len(fulfillments), False, fulfillments, violations
else:
# impossible for event 2 to be after event 1 if there is no event 2
return -1, False, [], trace[event_1]
return 0, True, [], [] # not vacuity atm..
def template_responded_existence(trace, event_set):
"""
The responded existence(A, B) template specifies that
if event A occurs, event B should also occur (either
before or after event A).
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
return len(trace[event_1]), False
else:
return -1, False
return 0, True # 0, if vacuity condition
def template_responded_existence_data(trace, event_set):
"""
The responded existence(A, B) template specifies that
if event A occurs, event B should also occur (either
before or after event A).
:return:
All either true or false!
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
return len(trace[event_1]), False, trace[event_1], []
else:
return -1, False, [], trace[event_1]
return 0, True, [], [] # 0, if vacuity condition
# Does order matter in template?
template_order = {
"alternate_precedence": True,
"alternate_response": True,
"chain_precedence": True,
"chain_response": True,
"responded_existence": True,
"response": True,
"precedence": True,
"not_responded_existence": True,
"not_precedence": True,
"not_response": True,
"not_chain_response": True,
"not_chain_precedence": True
}
not_templates = ["not_responded_existence",
"not_precedence",
"not_response",
"not_chain_response",
"not_chain_precedence"]
templates = ["alternate_precedence", "alternate_response", "chain_precedence", "chain_response",
"responded_existence", "response", "precedence"]
template_sizes = {"alternate_precedence": 2,
"alternate_response": 2,
"chain_precedence": 2,
"chain_response": 2,
"responded_existence": 2,
"response": 2,
"precedence": 2,
"not_responded_existence": 2,
"not_precedence": 2,
"not_response": 2,
"not_chain_response": 2,
"not_chain_precedence": 2
}
def apply_data_template(template_str, trace, event_set):
template_map = {
"alternate_precedence": template_alternate_precedence_data,
"alternate_response": template_alternate_response_data,
"chain_precedence": template_chain_precedence_data,
"chain_response": template_chain_response_data,
"responded_existence": template_responded_existence_data,
"response": template_response_data,
"precedence": template_precedence_data,
"not_responded_existence": template_not_responded_existence_data,
"not_precedence": template_not_precedence_data,
"not_response": template_not_response_data,
"not_chain_response": template_not_chain_response_data,
"not_chain_precedence": template_not_chain_precedence_data
}
lower = template_str.lower()
if lower in template_map:
return template_map[lower](trace["events"], event_set)
else:
raise Exception("Template not found")
def apply_template(template_str, trace, event_set):
template_map = {
"alternate_precedence": template_alternate_precedence,
"alternate_response": template_alternate_response,
"chain_precedence": template_chain_precedence,
"chain_response": template_chain_response,
"responded_existence": template_responded_existence,
"response": template_response,
"precedence": template_precedence,
"not_responded_existence": template_not_responded_existence,
"not_precedence": template_not_precedence,
"not_response": template_not_response,
"not_chain_response": template_not_chain_response,
"not_chain_precedence": template_not_chain_precedence
}
lower = template_str.lower()
if lower in template_map:
return template_map[lower](trace["events"], event_set)
else:
raise Exception("Template not found")
|
""" display menu
get choice
while choice != quit option
if choice == first option
do first task
else if choice == <second option>
do second task
...
else if choice == <n-th option>
do n-th task
else
display invalid input error message
display menu
get choice
do final thing, if needed"""
MENU = """Menu:
1 - Option 1
2 - Option 2
Q - Quit"""
print(MENU)
menu_input = input()
while menu_input != "Q":
if menu_input == "1":
print("You have chosen option 1")
elif menu_input == "2":
print("You have chosen option 2")
else:
print("Invalid Input")
print(MENU)
menu_input = input() |
import pandas as pd
import numpy as np
from numpy import array
import psycopg2
import math
import csv
import traceback
import matplotlib.pyplot as plt
from collections import defaultdict
import time
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
from sklearn import cross_validation
from sklearn.decomposition import PCA
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
# connect to database and get cursor
try:
conn = psycopg2.connect(database = 'complex features', user = 'postgres', host = 'localhost', port = '5432', password = '6371')
except psycopg2.Error as e:
print("I am unable to connect to the database")
print(e)
print(e.pgcode)
print(e.pgerror)
print(traceback.format_exc())
cur = conn.cursor()
merchant_matrix=np.zeros((4995,4995),dtype=np.int)
cur.execute('select cast(um_purchase_num.user_id as int), cast(um_purchase_num.merchant_id as int) as merchant_id from um_purchase_num order by merchant_id')
um_purchase_num=pd.DataFrame(cur.fetchall(), columns = [i[0] for i in cur.description])
def extract_PCA_feature():
t=um_purchase_num.filter(regex='mer')
#print(t[0:100])
for i in range(1,424171):
temp=um_purchase_num[um_purchase_num['user_id']==i]
for i in temp['merchant_id']:
for j in temp['merchant_id']:
merchant_matrix[i-1,j-1]+=1
with open("merchant_matrix.csv","w") as f:
writer = csv.writer(f)
writer.writerows(merchant_matrix)
#print(merchant_matrix[56,374],merchant_matrix[374,56])
pca=PCA(n_components=10)
prin_comp=pca.fit_transform(merchant_matrix)
with open("pca.csv","w") as f:
writer = csv.writer(f)
writer.writerows(prin_comp)
if __name__ == '__main__':
extract_PCA_feature()
|
import requests, csv
from bs4 import BeautifulSoup
url = 'http://www.mtime.com/top/tv/top100/index-2.html'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
h2_list = soup.find_all("h2", class_ = "px14 pb6")
cvs_file = open("Top10.csv", 'w', newline='', encoding='utf-8')
writer = csv.writer(cvs_file)
for h2 in h2_list:
name = h2.find('a').text
writer.writerow(name)
cvs_file.close() |
import cv2
import os
import numpy as np
import moments_calc
import clf_finder
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn import svm
INPUT_DIR = "input_images"
OUTPUT_DIR = "output"
CATEGORIES = np.array(["walking", "jogging", "running", "boxing", "handwaving", "handclapping"])
def parse_sequence():
filepath = os.path.join(INPUT_DIR, "input_sequence_multi.txt")
d = {}
with open(filepath) as f:
for line in f:
split_line = line.strip('\n').split("\t")
(key, val) = split_line[0].strip(), split_line[-1]
d[key] = val.split(",")
return d
frame_dict = parse_sequence()
def video_frame_generator(filename):
video = cv2.VideoCapture(filename)
while video.isOpened():
ret, frame = video.read()
if ret:
yield frame
else:
break
video.release()
yield None
def mp4_video_writer(filename, frame_size, fps=20):
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
return cv2.VideoWriter(filename, -1, fps, frame_size)
def analyze(vid_dir, video, theta, clf, scaler, action_num, tau=20, full_frame=False):
my_image_gen = video_frame_generator(os.path.join(vid_dir,video))
img = my_image_gen.__next__()
h, w, d = img.shape
fps = 20
video_name = video.replace("_uncomp.avi","")
t_minus_img = img
frame_num = 1
history = np.zeros((h, w))
s_frames = []
e_frames = []
for f in frame_dict[video_name]:
s_frames.append(int(f.split('-')[0]))
e_frames.append(int(f.split('-')[1]))
i = 0
if full_frame:
video_name = "ff_" + video_name
else:
video_name = "tau_" + video_name
out_path = "output/" + video_name + "_.mp4"
video_out = mp4_video_writer(out_path, (w, h), fps)
res = np.zeros(6)
full_motion = np.zeros(6)
color = (0, 50, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
while img is not None:
myImg = np.copy(img)
if full_frame:
tau = int(e_frames[i]) - int(s_frames[i])
motion = moments_calc.bg_subtraction(t_minus_img, img, theta=theta)
history = moments_calc.motion_img(history, motion, tau)
energy = cv2.threshold(history, 0, 1, cv2.THRESH_BINARY)[1]
if full_frame:
if frame_num == e_frames[i]:
i += 1
moments = np.concatenate([moments_calc.get_hu_moment(history), moments_calc.get_hu_moment(energy)])
res_i = clf.predict(scaler.transform(moments.reshape(1, 28)))
res[int(res_i)] += 1
if i >= len(s_frames):
break
cv2.putText(myImg, CATEGORIES[int(res_i)], (50, 50), font, 0.5, color, 1)
else:
if np.sum(motion) < 5:
pass
elif frame_num > tau:
moments = np.concatenate([moments_calc.get_hu_moment(history), moments_calc.get_hu_moment(energy)])
#res_i = clf.predict(scaler.transform(moments.reshape(1, 28)))
#res[int(res_i)] += 1
res[(clf.predict_proba(scaler.transform(moments.reshape(1, 28))) > 0.7).flatten()] += 1
if (clf.predict_proba(scaler.transform(moments.reshape(1, 28))) > 0.7).any():
res_i = clf.predict(scaler.transform(moments.reshape(1, 28)))
cv2.putText(myImg, CATEGORIES[int(res_i)], (50, 50), font, 0.5, color, 1)
video_out.write(myImg)
t_minus_img = img
img = my_image_gen.__next__()
frame_num += 1
video_out.release()
print("The occurrences of each action:")
print(res)
def run(full_frame=False):
print("################")
print("The categories are numbered in order:")
print(CATEGORIES)
print("################")
vids_list = [f for f in os.listdir(INPUT_DIR)
if f[0] != '.' and f.endswith('.avi')]
vids_list.sort()
theta = 10
if full_frame:
clf = svm.SVC(C=1000000, gamma=1e-05, kernel='rbf', probability=True)
datafile = "train_data_10th_full_frame.npy"
else:
clf = svm.SVC(C=10000, gamma=0.0001, kernel='rbf', probability=True)
datafile = "train_data_10th_20tau.npy"
X_train, X_test, y_train, y_test, scaler = clf_finder.split_data(datafile)
clf.fit(X_train, y_train)
y_res = clf.predict(X_test)
print(accuracy_score(y_test, y_res))
for vid in vids_list:
print(vid)
analyze(INPUT_DIR, vid, theta, clf, scaler, 3, full_frame=full_frame)
if __name__ == "__main__":
run()
run(True)
|
from setuptools import setup
setup (
name='img2pdf',
version='0.1.3',
author = "Johannes 'josch' Schauer",
author_email = 'j.schauer@email.de',
description = "Convert images to PDF via direct JPEG inclusion.",
long_description = open('README.md').read(),
license = "LGPL",
keywords = "jpeg pdf converter",
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Programming Language :: Python',
'Natural Language :: English',
'Operating System :: OS Independent'],
url = 'https://github.com/josch/img2pdf',
download_url = 'https://github.com/josch/img2pdf/archive/0.1.3.tar.gz',
package_dir={"": "src"},
py_modules=['img2pdf', 'jp2'],
include_package_data = True,
test_suite = 'tests.test_suite',
zip_safe = True,
install_requires=(
'Pillow',
),
entry_points='''
[console_scripts]
img2pdf = img2pdf:main
''',
)
|
#!/usr/bin/env python
import sys,HTSeq,time,re,math,gc
#
# Test HTSeq's GTF parsing speed against doing it manually
gc.disable()
try:
fin = open(sys.argv[1])
except IOError,e:
print e
else:
fin.close()
_CHROM = 0
_START = 1
_END = 2
_STRAND = 3
_TYPE = 4
_ATTR = 5
#==============================================================================
# gtfToDict
#
# Parse a GTF file into a dict keyed by sz_key.
#==============================================================================
def gtfToDict(sz_file,sz_key="transcript_id"):
#
# variables
fin = None
szl = ""
d_out = {}
ll = []
lattr = []
dattr = []
lnew = []
i = 0
#
# try opening file
try:
fin = open(sz_file,"r")
except IOError,e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno,e.strerror))
sys.exit()
#
# continue
for szl in fin:
szl = szl.strip()
ll = szl.split("\t")
# parse attributes
lattr = ll[8].split(";")
dattr = {}
i = 0
while i < len(lattr)-1:
ltmp = lattr[i].split("\"")
dattr[ltmp[0].strip()] = ltmp[1].strip()
i += 1
lnew = []
lnew.append(ll[0]) # chrom
lnew.append(int(ll[3])-1) # start
lnew.append(int(ll[4])) # end
lnew.append(ll[6]) # strand
lnew.append(ll[2]) # type
lnew.append(dattr.copy()) # attributes dict
szId = dattr[sz_key]
if szId not in d_out:
d_out[szId] = lnew
fin.close()
return d_out
#==============================================================================
# gtfToList
#
# Parse a GTF file into a list with one entry per feature
#==============================================================================
def gtfToList(sz_file,sz_type="exon"):
#
# variables
fin = None
szl = ""
l_out = []
ll = []
lattr = []
dattr = []
lnew = []
i = 0
#
# try opening file
try:
fin = open(sz_file,"r")
except IOError,e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno,e.strerror))
sys.exit()
#
# continue
for szl in fin:
szl = szl.strip()
ll = szl.split("\t")
if ll[2] == sz_type:
# parse attributes
lattr = ll[8].split(";")
dattr = {}
i = 0
while i < len(lattr)-1:
ltmp = lattr[i].split("\"")
dattr[ltmp[0].strip()] = ltmp[1].strip()
i += 1
lnew = []
lnew.append(ll[0]) # chrom
lnew.append(int(ll[3])-1) # start
lnew.append(int(ll[4])) # end
lnew.append(ll[2]) # type
lnew.append(ll[6]) # strand
lnew.append(dattr.copy()) # attributes dict
l_out.append(lnew)
fin.close()
return l_out
#==============================================================================
# indexSortedGTFList
# Build a dict index of a sorted (by chrom and start position) list of
# GTF features. Index holds start and end indices for each chromosome of
# features in lgtf.
#==============================================================================
def indexSortedGTFList(lgtf):
d_index = {}
n_index = 0
szlast = ""
for lf in lgtf:
if lf[_CHROM] not in d_index:
d_index[lf[_CHROM]] = []
# start of current chromosome in list
d_index[lf[_CHROM]].append(n_index)
# mark the end of the last chromosome as well
if szlast != "":
d_index[szlast].append(n_index)
szlast = lf[_CHROM]
n_index += 1
d_index[szlast].append(n_index)
return d_index
#==============================================================================
# ivMergeSort
# Mergesort a bunch of parsed GTF features in a list
#==============================================================================
def ivMergeSort(lv):
# if list is length 1 then it's sorted - sweet
if len(lv) == 1:
return lv
# else we need to split the list in two and pass both halves to this function
n_mid = len(lv)/2
l_left = [lv[i] for i in range(n_mid)]
l_right = [lv[i] for i in range(n_mid,len(lv))]
# recurse
l_left = ivMergeSort(l_left)
l_right = ivMergeSort(l_right)
# merge lists
return ivMerge(l_left,l_right)
#==============================================================================
# ivMerge
# Merge segment of mergesort function
#==============================================================================
def ivMerge(ll,lr):
l_result = []
n_ll = len(ll)
n_lr = len(lr)
nir = 0
nil = 0
# while counters are less than the lengths of left and right vectors
while nir < n_lr or nil < n_ll:
# if both counters are not at the end of their vectors, compare
# next value from each
if nil < n_ll and nir < n_lr:
if ll[nil][_CHROM] < lr[nir][_CHROM]:
l_result.append(ll[nil])
nil += 1
elif ll[nil][_CHROM] > lr[nir][_CHROM]:
l_result.append(lr[nir])
nir += 1
else:
# chrom values are equal so compare the start positions
if ll[nil][_START] <= lr[nir][_START]:
# append left value
l_result.append(ll[nil])
nil += 1
else:
# append right value
l_result.append(lr[nir])
nir += 1
elif nil < n_ll:
# left isn't done
l_result.append(ll[nil])
nil += 1
elif nir < n_lr:
# right isn't done
l_result.append(lr[nir])
nir += 1
# finished merging
return l_result
def copyFeature(lf):
l_new = [lf[i] for i in range(len(lf)-1)]
l_new.append(lf[_ATTR].copy())
return l_new
def featureOverlap(lf1,lf2):
if lf1[_START] < lf2[_END] and lf1[_END] > lf2[_START]:
return True
return False
#==============================================================================
# findOverlap
# Finds overlaps of feature in lfind within parsed GTF list lgtf with the help
# of a list of unique merged gtf features in lcomp and an index dict that points
# to the start and end of each chromosome of features in lcomp. This function uses a
# ping-pong kind of search method where the range of features in the chromosome
# is reduced iterativly by checking lfind's position relative the middle element
# of the chromosome in lcomp. The iteration continues to whichever side of the
# mid-point lfind is on or it finds an overlap. The overlap is then explored
# further by scanning the entire remaining range of points. Typically this
# yields 15 or 20 iterations per search regardless of whether there is an
# overlap found or not.
#==============================================================================
def findOverlap(lfind,lcomp,lgtf,dindex,asSet=True,szId="transcript_id"):
if lfind[_CHROM] not in dindex:
#raise Exception("invlaid chromosome request! {} not in Index".format(lfind[_CHROM]))
sys.stderr.write("invalid chromosome request. {} not in index.".format(lfind[_CHROM]))
print dindex.keys()
return set([])
# jump to start of chromosome
sz_chrom = lfind[_CHROM]
n_start = dindex[sz_chrom][0]
n_end = dindex[sz_chrom][1]
# see if we can narrow the iteration window a little
n_lastStart = n_start
n_lastEnd = n_end
b_done = False
# if start is a hit we don't need to loop
if featureOverlap(lcomp[n_start],lfind):
b_done = True
while not b_done:
n_mid = (n_start+n_end)/2
if lcomp[n_mid][_START] > lfind[_END]:
# too far to the right, so go left
# print "right",n_start,n_mid,n_end
n_end = n_mid
elif lcomp[n_mid][_START] < lfind[_END]:
# start of feature is left of end of alignment, is it a hit?
if lcomp[n_mid][_END] > lfind[_START]:
# mid is a hit, is the start a hit too?
if featureOverlap(lcomp[n_start],lfind):
sys.stderr.write("warning: wtf encountered\n")
n_start = n_lastStart
#n_end = n_mid
# mid is a hit so we're finished
b_done = True
else:
# not a hit, go right
n_start = n_mid
if n_lastStart == n_start and n_lastEnd == n_end:
# no more changes have been made so we're done - this is probably what happens
# when there isn't an overlap
b_done = True
n_lastStart = n_start
n_lastEnd = n_end
#print i,n_end
l_hits = []
i = n_start
ni = 0
# loop
while i < dindex[sz_chrom][1] and lcomp[i][_START] < lfind[_END]:
# check for overlap
#if lcomp[i][_START] < lfind[_END] and lcomp[i][_END] > lfind[_START]:
#l_hits.append(lcomp[i][_ATTR][szId])
#print lgtf[i][_ATTR][szId]
if featureOverlap(lcomp[i],lfind):
# l_comp points to a position in lgtf where the original elements live
# so we have to scan that to see which of those elements are a hit
ni = lcomp[i][-1]
while lgtf[ni][_START] < lfind[_END] and lgtf[ni][_CHROM] == sz_chrom and ni < len(lgtf):
if featureOverlap(lgtf[ni],lfind):
l_hits.append(lgtf[ni][_ATTR][szId])
ni += 1
i += 1
return set(l_hits)
#==============================================================================
# mergeList
# Merge overlapping adjacent features to produce a list of uniqe non overlapping
# features.
#==============================================================================
def mergeList(ll):
l_out = []
l_new = None
n_index = 0
for lf in ll:
if l_new is None:
l_new = copyFeature(lf)
l_new.append(n_index)
else:
# check for overlap
if l_new[_START] < lf[_END] and l_new[_END] > lf[_START]:
# found overlap
# modify l_new's boundaries to encompass both features
l_new[_START] = min(l_new[_START],lf[_START])
l_new[_END] = max(l_new[_END],lf[_END])
# modify l_new's attributes to include both
for szKey in l_new[_ATTR]:
s_new = set(l_new[_ATTR][szKey].split(","))
s_new.update(set(lf[_ATTR][szKey].split(",")))
l_new[_ATTR][szKey] = ",".join(list(s_new))
else:
# no overlap, append to output and start a new feature
l_out.append(l_new)
l_new = copyFeature(lf)
l_new.append(n_index)
n_index += 1
# append final feature
l_out.append(l_new)
return l_out
def findOverlap_o2(lfind,lgtf,dindex,asSet=True,szId="transcript_id"):
if lfind[_CHROM] not in dindex:
raise Exception("invlaid chromosome request!")
# jump to start of chromosome
sz_chrom = lfind[_CHROM]
n_start = dindex[sz_chrom][0]
n_end = dindex[sz_chrom][1]
# see if we can narrow the iteration window a little
n_lastStart = n_start
n_lastEnd = n_end
b_done = False
while not b_done:
n_mid = (n_start+n_end)/2
if lgtf[n_mid][_START] > lfind[_END]:
# too far to the right, so go left
# print "right",n_start,n_mid,n_end
n_end = n_mid
elif lgtf[n_mid][_START] < lfind[_END]:
# start of feature is left of end of alignment, is it a hit?
if lgtf[n_mid][_END] > lfind[_START]:
# hit, go left
n_end = n_mid
b_done = True
else:
# not a hit, go right
n_start = n_mid
if n_lastStart == n_start and n_lastEnd == n_end:
# no more changes have been made so we're done - this is probably what happens
# when there isn't an overlap
b_done = True
n_lastStart = n_start
n_lastEnd = n_end
#print i,n_end
l_hits = []
i = n_start
# loop
while i < dindex[sz_chrom][1] and lgtf[i][_START] <= lfind[_END]:
# check for overlap
if lgtf[i][_START] < lfind[_END] and lgtf[i][_END] > lfind[_START]:
l_hits += lgtf[i][_ATTR][szId].split(",")
#print lgtf[i][_ATTR][szId]
i += 1
return set(l_hits)
def findOverlap_oo(lfind,lgtf,dindex,asSet=True,szId="transcript_id"):
if lfind[_CHROM] not in dindex:
raise Exception("invlaid chromosome request!")
# jump to start of chromosome
sz_chrom = lfind[_CHROM]
i = dindex[sz_chrom][0]
n_end = dindex[sz_chrom][1]
# see if we can narrow the iteration window a little
b_done = False
while not b_done:
if n_end-i == 1:
b_done = True
else:
n_mid = (n_end-i)/2 + i
# overlap?
if lgtf[n_mid][_START] < lfind[_END] and lgtf[n_mid][_END] > lfind[_START]:
b_done = True
# rewind until no overlap
i = n_mid
while lgtf[i][_START] < lfind[_END] and lgtf[i][_END] > lfind[_START]:
i -= 1
else:
# no overlap - which way should we go?
if lgtf[n_mid][_START] < lfind[_START]:
# gtf is left so we go right
#print "right",i,n_mid,n_end
i = n_mid
else:
# gtf is right so go left
#print "left",i,n_mid,n_end
n_end = n_mid
#print i,n_end
l_hits = []
# loop
while i < n_end and lgtf[i][_START] <= lfind[_END]:
# check for overlap
if lgtf[i][_START] < lfind[_END] and lgtf[i][_END] > lfind[_START]:
l_hits += lgtf[i][_ATTR][szId].split(",")
print lgtf[i][_ATTR][szId]
i += 1
return set(l_hits)
# here we go
#n_ts = time.time()
#fin = HTSeq.GFF_Reader(sys.argv[1])
#for feature in fin:
# convert to bed
# sys.stderr.write("{0}\t{1}\t{2}\t{3}\n".format(feature.iv.chrom,feature.iv.start,feature.iv.end,feature.name))
#n_te = time.time()
#print "HTSeq time: {:.4f} seconds".format(n_te-n_ts)
#n_ts = time.time()
#d_gtf = gtfToDict(sys.argv[1])
#n_te = time.time()
#print "Manual time: {:.4f} seconds".format(n_te-n_ts)
n_ts = time.time()
l_gtf = gtfToList(sys.argv[1])
n_te = time.time()
print "List time: {:.4f} seconds".format(n_te-n_ts)
n_ts = time.time()
l_gtfSorted = ivMergeSort(l_gtf)
n_te = time.time()
print "Sort time: {:.4f} seconds".format(n_te-n_ts)
# compress list
l_comp = mergeList(l_gtfSorted)
# index
d_index = indexSortedGTFList(l_comp)
# loop up overlaps
# chr1:4,483,121-4,483,703
lfind = []
lfind.append(["chr1",6483278,6483378])
lfind.append(["chr1",106830754,106830856]) # 106,830,754-106,830,856
lfind.append(["chr1",4483238,4483338])
lfind.append(["chr1",4482500,4483338])
for lf in lfind:
n_ts = time.time()
l_hits = findOverlap(lf,l_comp,l_gtfSorted,d_index)
n_te = time.time()
print "search completed in {:.8f}\n".format(n_te-n_ts),l_hits
|
#!/usr/bin/python3
import gi
gi.require_version('Gtk', '3.0')
import sys, os, threading, re, gettext
from gi.repository import Gio, Gtk, GLib, GObject, Pango, GdkPixbuf
gettext.install("placesCenter@scollins", os.environ['HOME'] + "/.local/share/locale")
def launch(path):
fileObj = Gio.File.new_for_path(path)
Gio.app_info_launch_default_for_uri(fileObj.get_uri(), Gio.AppLaunchContext())
class SearchBox(Gtk.Entry):
def __init__(self, searchCommand):
Gtk.Entry.__init__(self)
self.searchCommand = searchCommand
self.set_icon_from_icon_name(Gtk.EntryIconPosition.PRIMARY, "edit-find-symbolic")
self.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "edit-clear-symbolic")
self.connect("icon-press", self.onClick)
self.connect("activate", self.searchCommand)
def onClick(self, target, pos, event):
if pos == Gtk.EntryIconPosition.PRIMARY and not self.get_text == "":
self.searchCommand()
if pos == Gtk.EntryIconPosition.SECONDARY:
self.set_text("")
class Context(Gtk.Menu):
def __init__(self, parent):
Gtk.Menu.__init__(self)
self.parent = parent
self.attach_to_widget(parent, None)
launch = Gtk.MenuItem()
launch.set_label(_("Open..."))
launch.connect("activate", self.launchItem)
self.append(launch)
openFolder = Gtk.MenuItem()
openFolder.set_label(_("Open Containing Folder..."))
openFolder.connect("activate", self.openFolder)
self.append(openFolder)
parent.connect("button-press-event", self.onButtonPress)
parent.connect("popup-menu", self.openContext)
self.show_all()
def onButtonPress(self, target, event):
if event.button == 3:
selection = target.get_selection()
info = target.get_path_at_pos(int(event.x), int(event.y))
if info is None:
return True
selection.select_path(info[0])
self.openContext()
return True
return False
def openContext(self):
self.popup(None, None, None, None, 0, Gtk.get_current_event_time())
def launchItem(self, a):
model, iter = self.parent.get_selection().get_selected()
launch(os.path.join(model[iter][2], model[iter][1]))
def openFolder(self, a):
model, iter = self.parent.get_selection().get_selected()
launch(model[iter][2])
class SearchWindow(Gtk.Window):
searching = False
def __init__(self, basePath):
Gtk.Window.__init__(self, title=(_("Search")), icon_name="edit-find", default_height=400, default_width=650)
self.connect("destroy", self.quit)
self.search = None
self.results = Gtk.ListStore(str, str, str)
mainBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, border_width = 10)
self.add(mainBox)
contentBox = Gtk.Box(border_width=0)
mainBox.pack_start(contentBox, True, True, 0)
## left pane
leftPane = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, border_width = 10)
contentBox.pack_start(leftPane, False, False, 0)
# search box
self.searchBox = SearchBox(self.startSearch)
leftPane.pack_start(self.searchBox, False, False, 5)
# location selector
leftPane.pack_start(Gtk.Label(_("Start in"), halign=Gtk.Align.START), False, False, 5)
self.location = Gtk.FileChooserButton.new(_("Select a folder"), Gtk.FileChooserAction.SELECT_FOLDER)
leftPane.add(self.location)
if not basePath is None:
self.location.set_filename(basePath)
# follow symlinks
self.symlinks = Gtk.CheckButton.new_with_label(_("Follow symlinks"))
leftPane.add(self.symlinks)
# display hidden files/folders
self.hidden = Gtk.CheckButton.new_with_label(_("Search hidden"))
leftPane.add(self.hidden)
# use regex
self.regex = Gtk.CheckButton.new_with_label(_("Use regular expressions"))
leftPane.add(self.regex)
# stop button
self.stopButton = Gtk.Button(label = (_("Stop")), sensitive=False)
leftPane.pack_end(self.stopButton, False, False, 5)
self.stopButton.connect("clicked", self.stopSearch)
## right pane
rightPane = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, border_width = 10)
contentBox.pack_start(rightPane, True, True, 0)
# results display tree
scrollBox = Gtk.ScrolledWindow()
rightPane.pack_start(scrollBox, True, True, 5)
tree = Gtk.TreeView(self.results)
scrollBox.add(tree)
fileNameColumn = Gtk.TreeViewColumn()
fileNameColumn.set_title(_("File"))
fileNameColumn.set_resizable(True)
fileNameColumn.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
fileNameColumn.set_fixed_width(200)
fileNameColumn.set_min_width(200)
tree.append_column(fileNameColumn)
iconRenderer = Gtk.CellRendererPixbuf()
fileNameColumn.pack_start(iconRenderer, expand=False)
fileNameColumn.add_attribute(iconRenderer, "icon-name", 0)
fileNameRenderer = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.END)
fileNameColumn.pack_start(fileNameRenderer, expand=False)
fileNameColumn.add_attribute(fileNameRenderer, "text", 1)
pathRenderer = Gtk.CellRendererText()
pathColumn = Gtk.TreeViewColumn(_("Path"), pathRenderer, text=2)
pathColumn.set_resizable(True)
tree.append_column(pathColumn)
# context menu
Context(tree)
tree.connect("row-activated", self.launchItem)
# status text
hbox = Gtk.Box()
mainBox.pack_start(hbox, False, False, 0)
self.currentLabel = Gtk.Label()
self.currentLabel.set_ellipsize(Pango.EllipsizeMode.END)
hbox.pack_start(self.currentLabel, False, False, 5)
self.show_all()
def quit(self, widget):
self.stopSearch()
Gtk.main_quit()
def startSearch(self, a=None):
if self.searching:
self.stopSearch()
GObject.idle_add(self.startSearch)
return
self.results.clear()
self.setStatusText(_("Searching..."))
self.searching = True
self.dirs = []
self.search = threading.Thread(target=self.searchDirectory, args=[self.location.get_filename(), self.searchBox.get_text(), True])
self.stopButton.set_sensitive(True)
self.search.start()
def stopSearch(self, a=None):
self.searching = False
if not self.search is None:
self.search.join();
self.search = None
GObject.idle_add(self.setStatusText, (_("Search Stopped")))
self.setStatusText(_("Search Stopped"))
self.stopButton.set_sensitive(False)
def launchItem(self, event, path, column):
row = self.results[path]
launch(os.path.join(row[2], row[1]))
def addResult(self, fileName, path, icon):
self.results.append([icon, fileName, path])
def setStatusText(self, text):
self.currentLabel.set_text(text)
def searchDirectory(self, directory, key, firstRun=False):
if not self.searching:
return
GObject.idle_add(self.setStatusText, (_("Searching: ") + directory))
try:
children = os.listdir(directory)
except:
GObject.idle_add(self.setStatusText, (_("Error: insufficient permissions to read folder ") + directory))
return
subdirs = []
for child in children:
if child[0] != "." or self.hidden.get_active():
path = os.path.join(directory, child)
if self.isMatch(key, child):
fileObj = Gio.File.new_for_path(path)
info = fileObj.query_info("standard::icon", 0, None)
iconNames = info.get_icon().get_names()
for icon in iconNames:
if Gtk.IconTheme.get_default().has_icon(icon):
break
if not self.searching:
return
GObject.idle_add(self.addResult, child, directory, icon)
try:
if os.path.isdir(path) and (self.symlinks.get_active() or not os.path.islink(path)) and not self.isRedundant(path):
subdirs.append(path)
self.dirs.append(os.path.realpath(path))
except:
pass
for directory in subdirs:
if not self.searching:
return
self.searchDirectory(directory, key)
if firstRun and self.searching:
self.searching = False
self.stopButton.set_sensitive(False)
GObject.idle_add(self.setStatusText, (_("Search Completed")))
def isRedundant(self, path):
if not self.symlinks.get_active() or not os.path.islink(path) or not os.path.isdir(path):
return False
else:
if os.path.realpath(path) in self.dirs:
GObject.idle_add(self.setStatusText, _("Skipping") + ' ' + path + (_(" - directory already searched")))
return True
def isMatch(self, key, child):
if self.regex.get_active():
if re.search(key, child) == None:
return False
else:
return True
else:
if key in child:
return True
else:
return False
if __name__ == "__main__":
GLib.threads_init()
if len(sys.argv) > 1:
SearchWindow(sys.argv[1])
else:
SearchWindow("/")
Gtk.main()
|
"""
single root plots - soil potentials of 1-3 soils, with fancy background
from xls result files (in results/)
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
fancy = False
ls = ['-']
# add_str = "_dry"
# fnames = ["results/sink_" + "small_sra" + add_str + ".xls",
# "results/sink_" + "small_sra" + add_str + ".xls"]
# days = 7.1
# titles = ["steady rate", "aggregated"] # "steady rate", "aggregated", "rhizosphere",
add_str = "_dry0"
fnames = ["results/sink_" + "small_cyl" + add_str + ".xls",
"results/sink_" + "small_sra" + add_str + ".xls",
"results/sink_" + "small_agg" + add_str + ".xls"]
days = 7.1
titles = ["rhizosphere", "steady rate", "aggregated"] # "steady rate", "aggregated", "rhizosphere",
plot_times = range(0, 7)
L = 110 # soil depth
SMALL_SIZE = 16
MEDIUM_SIZE = 16
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig, ax = plt.subplots(1, len(fnames), figsize=(14, 14))
if len(fnames) == 1:
ax = [ax]
for i in range(0, len(fnames)):
ax[i].set_xlabel("sink [cm$^3$/day]")
ax[i].set_title(titles[i] + " (" + add_str[1:] + ")")
ax[i].plot([0, 0], [-L, 0.], "k:")
ax[0].set_ylabel("depth [cm]")
for i in range(0, len(fnames)):
df2 = pd.read_excel(fnames[i], header=None) # open file
sink_ = -df2.to_numpy()
z_ = np.linspace(-L + 0.25, -0.25, sink_.shape[1]) # single root 100 segments, 0 - (-50) cm, segment mids
color_intensity = np.ones((sink_.shape[0]),) * 0.2 + np.linspace(1., 0., sink_.shape[0]) * 0.8
peak_id = np.round(sink_.shape[0] / days * np.array([0.5 + i for i in range(0, 7)]))
peak_id = peak_id.astype(int)
redistribution_id = np.round(sink_.shape[0] / days * np.array([i for i in range(0, 7)]))
redistribution_id = redistribution_id.astype(int)
for j in range(0, sink_.shape[0]):
if fancy:
ax[i].plot(sink_[j,:], z_, 'k', alpha=0.01)
for j in range(0, sink_.shape[0]):
if j == peak_id[0]:
ax[i].plot(sink_[j,:], z_, color=[color_intensity[j], 0., 0.], linestyle=ls[0], label="peak")
if j in peak_id[1:]:
ax[i].plot(sink_[j,:], z_, color=[color_intensity[j], 0., 0.], linestyle=ls[0])
if j == redistribution_id[0]:
ax[i].plot(sink_[j,:], z_, 'b:', label="initial")
if j == redistribution_id[1]:
ax[i].plot(sink_[j,:], z_, color=[0., color_intensity[j], 0.], linestyle=ls[0], label="redistribution")
if j in redistribution_id[2:]:
ax[i].plot(sink_[j,:], z_, color=[0., color_intensity[j], 0.], linestyle=ls[0])
# minx = min(minx, np.min(sink_))
# maxx = min(maxx, np.max(sink_))
for i in range(0, len(fnames)):
ax[i].legend()
if add_str == "_dry":
ax[i].set_xlim(-1., 0.75)
if add_str == "_dry0":
ax[i].set_xlim(-0.7, 0.7)
plt.tight_layout()
plt.show()
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
import copy
import re
import time
from oslo_config import cfg
from oslo_log import log
from vitrage.common.constants import EdgeProperties as EProps
from vitrage.common.constants import VertexProperties as VProps
from vitrage.common.utils import md5
from vitrage.common.utils import recursive_keypairs
from vitrage.entity_graph.mappings.datasource_info_mapper \
import DatasourceInfoMapper
from vitrage.evaluator.actions.action_executor import ActionExecutor
from vitrage.evaluator.actions.base import ActionMode
from vitrage.evaluator.actions.base import ActionType
import vitrage.evaluator.actions.priority_tools as pt
from vitrage.evaluator.template_data import ActionSpecs
from vitrage.evaluator.template_data import EdgeDescription
from vitrage.evaluator.template_functions.function_resolver import is_function
from vitrage.evaluator.template_schema_factory import TemplateSchemaFactory
from vitrage.graph.algo_driver.algorithm import Mapping
from vitrage.graph.algo_driver.sub_graph_matching import \
NEG_CONDITION
from vitrage.graph.driver import Vertex
from vitrage import storage
from vitrage.storage.sqlalchemy import models
from vitrage.utils.datetime import utcnow
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Entry containing action info.
# specs - ActionSpecs
# mode - DO or UNDO (the action)
# action_id - the action id in scenario_repository
# Trigger_id - a unique identifier per match in graph (i.e., the subgraph
# that matched the action in the spec) for the specific action.
ActionInfo = \
namedtuple('ActionInfo', ['specs', 'mode', 'action_id', 'trigger_id'])
TARGET = 'target'
SOURCE = 'source'
class ScenarioEvaluator(object):
def __init__(self,
e_graph,
scenario_repo,
actions_callback,
enabled=False):
self._entity_graph = e_graph
self._db = storage.get_connection_from_config()
self._scenario_repo = scenario_repo
self._action_executor = ActionExecutor(actions_callback)
self._entity_graph.subscribe(self.process_event)
self.enabled = enabled
self.connected_component_cache = defaultdict(dict)
@property
def scenario_repo(self):
return self._scenario_repo
@scenario_repo.setter
def scenario_repo(self, scenario_repo):
self._scenario_repo = scenario_repo
def run_evaluator(self, action_mode=ActionMode.DO):
self.enabled = True
vertices = self._entity_graph.get_vertices()
start_time = time.time()
for vertex in vertices:
if action_mode == ActionMode.DO:
self.process_event(None, vertex, True)
elif action_mode == ActionMode.UNDO:
self.process_event(vertex, None, True)
LOG.info(
'Run %s Evaluator on %s items - took %s',
action_mode, len(vertices), (time.time() - start_time))
def process_event(self, before, current, is_vertex, *args, **kwargs):
"""Notification of a change in the entity graph.
:param is_vertex:
:param before: The graph element (vertex or edge) prior to the
change that happened. None if the element was just created.
:param current: The graph element (vertex or edge) after the
change that happened. Deleted elements should arrive with the
vitrage_is_deleted property set to True
"""
if not self.enabled:
LOG.debug("Process event disabled")
return
LOG.debug('Process event - starting')
LOG.debug("Element before event: %s, Current element: %s",
before,
current)
before_scenarios = self._get_element_scenarios(before, is_vertex)
current_scenarios = self._get_element_scenarios(current, is_vertex)
before_scenarios, current_scenarios = \
self._remove_overlap_scenarios(before_scenarios, current_scenarios)
if len(before_scenarios) + len(current_scenarios):
LOG.debug("Number of relevant scenarios found: undo = %s, do = %s",
len(before_scenarios),
len(current_scenarios))
actions = self._process_and_get_actions(before,
before_scenarios,
ActionMode.UNDO)
actions.extend(self._process_and_get_actions(current,
current_scenarios,
ActionMode.DO))
actions_to_preform = []
try:
actions_to_preform = self._analyze_and_filter_actions(actions)
except Exception:
LOG.exception("Evaluator error, will not execute actions %s",
actions)
self._action_executor.execute(actions_to_preform)
LOG.debug('Process event - completed')
def _get_element_scenarios(self, element, is_vertex):
if not element \
or element.get(VProps.VITRAGE_IS_DELETED) \
or element.get(EProps.VITRAGE_IS_DELETED):
return []
elif is_vertex:
return self._scenario_repo.get_scenarios_by_vertex(element)
else: # is edge
edge_desc = self._get_edge_description(element)
return self._scenario_repo.get_scenarios_by_edge(edge_desc)
def _get_edge_description(self, element):
source = self._entity_graph.get_vertex(element.source_id)
target = self._entity_graph.get_vertex(element.target_id)
edge_desc = EdgeDescription(element, source, target)
return edge_desc
@staticmethod
def _remove_overlap_scenarios(before, current):
intersection = list(filter(lambda x: x in before, current))
before = list(filter(lambda x: x not in intersection, before))
current = list(filter(lambda x: x not in intersection, current))
return before, current
def _process_and_get_actions(self, element, triggered_scenarios, mode):
actions = []
for triggered_scenario in triggered_scenarios:
LOG.debug("Processing: %s", triggered_scenario)
scenario_element = triggered_scenario[0]
scenario = triggered_scenario[1]
actions.extend(self._process_scenario(element,
scenario,
scenario_element,
mode))
return actions
def _process_scenario(self, element, scenario, scenario_elements, mode):
if not isinstance(scenario_elements, list):
scenario_elements = [scenario_elements]
actions = []
for action in scenario.actions:
for scenario_element in scenario_elements:
matches = self._evaluate_subgraphs(scenario.subgraphs,
element,
scenario_element,
action.targets[TARGET])
actions.extend(self._get_actions_from_matches(scenario.version,
matches,
mode,
action))
return actions
def _evaluate_subgraphs(self,
subgraphs,
element,
scenario_element,
action_target):
if isinstance(element, Vertex):
return self._find_vertex_subgraph_matching(subgraphs,
action_target,
element,
scenario_element)
else:
return self._find_edge_subgraph_matching(subgraphs,
action_target,
element,
scenario_element)
def _get_actions_from_matches(self,
scenario_version,
combined_matches,
mode,
action_spec):
actions = []
for is_switch_mode, matches in combined_matches:
new_mode = mode
if is_switch_mode:
new_mode = ActionMode.UNDO \
if mode == ActionMode.DO else ActionMode.DO
template_schema = \
TemplateSchemaFactory().template_schema(scenario_version)
for match in matches:
match_action_spec = self._get_action_spec(action_spec, match)
items_ids = \
[match_item[1].vertex_id for match_item in match.items()]
match_hash = md5(tuple(sorted(items_ids)))
self._evaluate_property_functions(template_schema, match,
match_action_spec.properties)
actions.append(ActionInfo(match_action_spec, new_mode,
match_action_spec.id, match_hash))
return actions
def _evaluate_property_functions(self, template_schema, match,
action_props):
"""Evaluate the action properties, in case they contain functions
In template version 2 we introduced functions, and specifically the
get_attr function. This method evaluate its value and updates the
action properties, before the action is being executed.
Example:
- action:
action_type: execute_mistral
properties:
workflow: evacuate_vm
input:
vm_name: get_attr(instance1,name)
force: false
In this example, the method will iterate over 'properties', and then
recursively over 'input', and for 'vm_name' it will replace the
call for get_attr with the actual name of the VM. The input for the
Mistral workflow will then be:
vm_name: vm_1
force: false
"""
for key, value in action_props.items():
if isinstance(value, dict):
# Recursive call for a dictionary
self._evaluate_property_functions(template_schema,
match, value)
elif value is not None and is_function(value):
# The value is a function
func_and_args = re.split('[(),]', value)
func_name = func_and_args.pop(0)
args = [arg.strip() for arg in func_and_args if len(arg) > 0]
# Get the function, execute it and update the property value
func = template_schema.functions.get(func_name)
action_props[key] = func(match, *args)
LOG.debug('Changed property %s value from %s to %s', key,
value, action_props[key])
@staticmethod
def _get_action_spec(action_spec, match):
targets = action_spec.targets
real_items = {
target: match[target_id] for target, target_id in targets.items()
}
return ActionSpecs(action_spec.id,
action_spec.type,
real_items,
copy.deepcopy(action_spec.properties))
@staticmethod
def _generate_action_id(action_spec):
"""Generate a unique action id for the action
BEWARE: The value created here should not be stored in database,
as in python3, the hash function seed changes after program restart
"""
targets = [(k, v.vertex_id) for k, v in action_spec.targets.items()]
return hash(
(action_spec.type,
tuple(sorted(targets)),
tuple(sorted(recursive_keypairs(action_spec.properties))))
)
def _analyze_and_filter_actions(self, actions):
LOG.debug("Actions before filtering: %s", actions)
if not actions:
return []
active_actions = ActiveActionsTracker(self._db, actions)
for action_info in actions:
if action_info.mode == ActionMode.DO:
active_actions.calc_do_action(action_info)
elif action_info.mode == ActionMode.UNDO:
active_actions.calc_undo_action(action_info)
active_actions.flush_db_updates()
unique_ordered_actions = OrderedDict()
for action in active_actions.actions_to_perform:
if isinstance(action, models.ActiveAction):
action = self._db_action_to_action_info(action)
id_ = self._generate_action_id(action.specs)
unique_ordered_actions[id_] = action
return unique_ordered_actions.values()
def _find_vertex_subgraph_matching(self,
subgraphs,
action_target,
vertex,
scenario_vertex):
"""calculates subgraph matching for vertex
iterates over all the subgraphs, and checks if the triggered vertex is
in the same connected component as the action then run subgraph
matching on the vertex and return its result, otherwise return an
empty list of matches.
"""
matches = []
for subgraph in subgraphs:
connected_component = self.get_connected_component(subgraph,
action_target)
is_switch_mode = \
connected_component.get_vertex(scenario_vertex.vertex_id)
if is_switch_mode:
initial_map = Mapping(scenario_vertex, vertex, True)
mat = self._entity_graph.algo.sub_graph_matching(subgraph,
initial_map)
matches.append((False, mat))
else:
matches.append((True, []))
return matches
def _find_edge_subgraph_matching(self,
subgraphs,
action_target,
edge,
scenario_edge):
"""calculates subgraph matching for edge
iterates over all the subgraphs, and checks if the triggered edge is a
negative edge then mark it as deleted=false and negative=false so that
subgraph matching on that edge will work correctly. after running
subgraph matching, we need to remove the negative vertices that were
added due to the change above.
"""
matches = []
for subgraph in subgraphs:
subgraph_edge = subgraph.get_edge(scenario_edge.source.vertex_id,
scenario_edge.target.vertex_id,
scenario_edge.edge.label)
if not subgraph_edge:
continue
is_switch_mode = subgraph_edge.get(NEG_CONDITION, False)
connected_component = self.get_connected_component(subgraph,
action_target)
# change the vitrage_is_deleted and negative_condition props to
# false when is_switch_mode=true so that when we have an event on a
# negative_condition=true edge it will find the correct subgraph
self._switch_edge_negative_props(is_switch_mode, scenario_edge,
subgraph, False)
initial_map = Mapping(scenario_edge.edge, edge, False)
curr_matches = \
self._entity_graph.algo.sub_graph_matching(subgraph,
initial_map)
# switch back to the original values
self._switch_edge_negative_props(is_switch_mode, scenario_edge,
subgraph, True)
self._remove_negative_vertices_from_matches(curr_matches,
connected_component)
matches.append((is_switch_mode, curr_matches))
return matches
def get_connected_component(self, subgraph, target):
connected_component = self.connected_component_cache.get(
id(subgraph), {}).get(id(target))
if not connected_component:
connected_component = subgraph.algo.graph_query_vertices(
root_id=target,
edge_query_dict={'!=': {NEG_CONDITION: True}})
self.connected_component_cache[id(subgraph)][id(target)] = \
connected_component
return connected_component
def _db_action_to_action_info(self, db_action):
target = self._entity_graph.get_vertex(db_action.target_vertex_id)
targets = {TARGET: target}
if db_action.source_vertex_id:
source = self._entity_graph.get_vertex(db_action.source_vertex_id)
targets[SOURCE] = source
scenario_action = self._scenario_repo.actions.get(db_action.action_id)
properties = copy.copy(scenario_action.properties)
action_specs = ActionSpecs(
id=db_action.action_id,
type=db_action.action_type,
targets=targets,
properties=properties,
)
action_info = ActionInfo(
specs=action_specs,
mode=ActionMode.DO,
action_id=db_action.action_id,
trigger_id=db_action.trigger,
)
return action_info
@staticmethod
def _switch_edge_negative_props(is_switch_mode,
scenario_edge,
subgraph,
status):
if is_switch_mode:
scenario_edge.edge[NEG_CONDITION] = status
scenario_edge.edge[EProps.VITRAGE_IS_DELETED] = status
subgraph.update_edge(scenario_edge.edge)
@staticmethod
def _remove_negative_vertices_from_matches(matches, connected_component):
for match in matches:
ver_ids = [v.vertex_id for v in connected_component.get_vertices()]
ver_to_remove = [id for id in match.keys() if id not in ver_ids]
for v_id in ver_to_remove:
del match[v_id]
class ActiveActionsTracker(object):
"""Keeps track of all active actions and relative dominance/priority.
Actions are organized according to resource-id and action details.
Examples:
- all set_state actions on a given resource are considered similar action
regardless of state
- all raise_alarm of type alarm_name on a given resource are considered
similar action, regardless of severity
Each action is assigned a score by mapping the value property to the
priority defined in datasource values config.
- Alarm: severity
- Resource: state
The score is used to determine which action in each group of similar
actions to be executed next.
"""
action_tools = None
def __init__(self, db, actions):
self.db = db
self.data = defaultdict(set)
self.actions_to_create = {}
self.actions_to_remove = set()
self.actions_to_perform = [] # use a list to keep the insertion order
self._init_action_tools()
# Query DB for all actions with same properties
actions_keys = set([self._get_key(action) for action in actions])
db_rows = self.db.active_actions.query_similar(actions_keys) or []
for db_row in db_rows:
self.data[(db_row.source_vertex_id, db_row.target_vertex_id,
db_row.extra_info, db_row.action_type)].add(db_row)
@classmethod
def _init_action_tools(cls):
if cls.action_tools:
return
info_mapper = DatasourceInfoMapper()
alarms_score = info_mapper.get_datasource_priorities('vitrage')
all_scores = info_mapper.get_datasource_priorities()
cls.action_tools = {
ActionType.SET_STATE: pt.SetStateTools(all_scores),
ActionType.RAISE_ALARM: pt.RaiseAlarmTools(alarms_score),
ActionType.ADD_CAUSAL_RELATIONSHIP: pt.BaselineTools,
ActionType.MARK_DOWN: pt.BaselineTools,
ActionType.EXECUTE_MISTRAL: pt.BaselineTools
}
def calc_do_action(self, action_info):
"""Add this action to active_actions table, if not exists
return value to help decide if action should be performed
Only a top scored action that is new should be performed
:return: (is top score, is it already existing)
"""
similar_actions = self._get_similar(action_info)
exists = any(
a.action_id == action_info.action_id and
a.trigger == action_info.trigger_id for a in similar_actions)
if not exists:
self._add(action_info)
if not exists and self._is_highest_score(similar_actions, action_info):
self.actions_to_perform.append(action_info)
def calc_undo_action(self, action_info):
"""Delete this action form active_actions table, if exists
decide if action should be performed
A top scored action should be 'undone' if there is not a second action.
If there is a second, it should now be 'done' and become the dominant
:param action_info: action to delete
"""
similar_actions = self._get_similar(action_info)
if not self._is_highest_score(similar_actions, action_info):
self._remove(action_info)
return
second_highest = self._sort_db_actions(similar_actions)[1]\
if len(similar_actions) > 1 else None
# We should 'DO' the Second highest scored action so
# to override the existing dominant action.
# or, if there is no second highest scored action
# So we just 'UNDO' the existing dominant action
if second_highest:
self.actions_to_perform.append(second_highest)
else:
self.actions_to_perform.append(action_info)
self._remove(action_info)
def flush_db_updates(self):
self.db.active_actions.bulk_create(self.actions_to_create.values())
self.db.active_actions.bulk_delete(self.actions_to_remove)
def _add(self, action_info):
db_row = self._to_db_row(action_info)
self._get_similar(action_info).add(db_row)
id_ = ScenarioEvaluator._generate_action_id(action_info.specs)
if id_ not in self.actions_to_create:
self.actions_to_create[id_] = db_row
def _remove(self, action_info):
similar_actions = self._get_similar(action_info)
for action in similar_actions:
if action.trigger == action_info.trigger_id and \
action.action_id == action_info.action_id:
similar_actions.remove(action)
break
self.actions_to_remove.add(
(action_info.trigger_id, action_info.action_id))
def _get_similar(self, action_info):
return self.data.get(self._get_key(action_info), set())
def _get_key(self, action_info):
src = action_info.specs.targets.get(SOURCE, {}).get(VProps.VITRAGE_ID)
trg = action_info.specs.targets.get(TARGET, {}).get(VProps.VITRAGE_ID)
extra_info = self.action_tools[action_info.specs.type].get_extra_info(
action_info.specs)
action_type = action_info.specs.type
return src, trg, extra_info, action_type
def _to_db_row(self, action_info):
source = action_info.specs.targets.get(SOURCE, {})
target = action_info.specs.targets.get(TARGET, {})
action_score = self.action_tools[action_info.specs.type]. \
get_score(action_info)
extra_info = self.action_tools[action_info.specs.type]. \
get_extra_info(action_info.specs)
return storage.sqlalchemy.models.ActiveAction(
action_type=action_info.specs.type,
extra_info=extra_info,
source_vertex_id=source.get(VProps.VITRAGE_ID),
target_vertex_id=target.get(VProps.VITRAGE_ID),
action_id=action_info.action_id,
trigger=action_info.trigger_id,
score=action_score)
@classmethod
def _is_highest_score(cls, db_actions, action_info):
"""Get the top action from the list and compare to action_info
Actions are sorted according to:
score - primary, ascending
created_at - secondary, descending
"""
if not db_actions:
return True
highest_score_action = min(
db_actions, key=lambda action: (-action.score, action.created_at
or utcnow(False)))
return highest_score_action.trigger == action_info.trigger_id and \
highest_score_action.action_id == action_info.action_id
@staticmethod
def _sort_db_actions(db_actions):
"""Sort ActiveAction items by two fields
score - primary, ascending
created_at - secondary, descending
"""
return sorted(
db_actions,
key=lambda action: (-action.score, action.created_at),
reverse=False)
|
#!/usr/bin/env python
# coding=utf-8
'''
> File Name: database.py
> Author: vassago
> Mail: f811194414@gmail.com
> Created Time: 五 8/10 18:57:22 2018
'''
from contextlib import contextmanager
import logging
from alembic import command
from alembic.config import Config
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import create_database
from app.config.common import config
from app.models.base import Base
from app.storage.dbAlchemy import SQLAlchemy
LOG = logging.getLogger(__name__)
db = SQLAlchemy()
@contextmanager
def global_sqlalchemy_session():
'''
每次都是一个单独的session
:return:
'''
session = sessionmaker(bind=db.engine, expire_on_commit=False)()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.expunge_all()
session.close()
def sqlalchemy_session():
"""
scoped_session
用法:
1.每次with都开一个子事务,退出顶层with进行提交,内部请用session.flush()提交到数据库(只在本事务内能看到效果),禁止事务内使用commit()
例:
with sqlalchemy_session as session:
do1
with sqlalchemy_session as session2:
do2()
with sqlalchemy_session as session2:
do3()
:param rollcack_and_new_session: 默认为false,如果为true,会回滚之前的session,并开一个新的session
:return:
"""
return db.get_session()
def init_app_webhook(app):
# 每次请求完成后,自动提交session
@app.after_request
def after_clean(resp, *args, **kwargs):
db.session.commit()
return resp
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
def create_database(db_url, encoding):
from sqlalchemy_utils import database_exists, create_database as origin_create_database
if database_exists(config.DATABASE_URL):
return
if 'mysql' in db_url:
origin_create_database(db_url, encoding)
else:
origin_create_database(db_url)
def create_db(app=None):
create_database(config.DATABASE_URL, config.DATABASE_URL_ENCODING)
db.update_engine(config.DATABASE_URL, echo=config.SHOW_SQL)
if app:
init_app_webhook(app)
def init_db():
if not config.DB_MIGRATION:
Base.metadata.create_all(db.engine)
else:
alembic_cfg = Config(config.ALEMBIC_CONFIG)
alembic_cfg.set_main_option('script_location', config.ALEMBIC_SCRIPT_LOCATION)
command.upgrade(alembic_cfg, "head")
command.stamp(alembic_cfg, "head")
|
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from django.core.validators import MaxValueValidator, MinValueValidator
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db.models import Sum
from users.models import My_custom_user
from users.models import My_custom_user
import decimal
# Create your models here.
class User_point_input_model(models.Model):
"""Health Data in the total value of each specific catagory
attributes:
user(obj): the current user object
date(datefield): the date of the health actions
Hours_of_sleep(int): the hours of sleep
water_100oz(Boolean): True,if 100oz of water was drank
clean_eating(Boolean): True,if clean ate
workout_intensity(int): value of 1-4 based on intensity of workout
workout_amount_of_time(int): value in minutes of workout time
steps(int): amount of total steps
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True, blank=True )
date = models.DateField(
default=now, editable=True,
help_text='yyyy-mm-dd')
Hours_of_sleep = models.FloatField(default=0) #accept decimals
Water_100oz = models.BooleanField(default=False)
clean_eating = models.BooleanField(default=False)
workout_intensity = models.PositiveIntegerField(
default=0,
validators=[MaxValueValidator(4),MinValueValidator(0)],
help_text="""input 0-4, None, light, moderate,
intense, super intense""")
workout_amount_of_time = models.PositiveIntegerField(
default=0, verbose_name='Workout time (in minutes)')
steps = models.PositiveIntegerField(default = 0)
def __str__(self):
"""return the health input objects date as the representive string."""
return (str(self.date))
def get_absolute_url(self):
"""Return the user to the homepage, once the health input object is created. """
return reverse('home')
def create_point_object(self):
"""Create corresponding point model object from calculations of the health input data."""
def water_clean_eating_point_func(water_or_clean_eating_true_false):
""" if the input is true, return 10
Args:
water_or_clean_eating_true_false (boolean): True false, if the user
ate clean or drank 100oz of water.
return: int:10 if param true
"""
if water_or_clean_eating_true_false == True:
return 10
else:
return 0
def point_goal_for_this_date():
point_goal = 0
all_point_goals = Point_goals.objects.filter(user=self.user)
for obj in all_point_goals:
# only get the point_input related to the user that set the goal
if self.date >= obj.goal_start_date and self.date <= obj.goal_end_date:
point_goal = int(obj.point_goal)
else:
pass
return point_goal
sleep_points = self.Hours_of_sleep * 3.3
date = self.date
user = self.user
workout_points = self.workout_intensity * (self.workout_amount_of_time * .2)
clean_eating_points = water_clean_eating_point_func(self.clean_eating)
water_points = water_clean_eating_point_func(self.Water_100oz)
step_points = self.steps * .001
total_points = (water_points + workout_points + sleep_points +
clean_eating_points + step_points)
point_goal = point_goal_for_this_date()
health_points_object = Point_model.objects.create(
sleep_points=sleep_points, date=date,
water_points=water_points, workout_points=workout_points,
one_to_one_workout=self, total_points=total_points,
clean_eating_points=clean_eating_points,
user=user, daily_point_goal=point_goal,step_points=step_points )
health_points_object.save()
def update_points(self):
def water_clean_eating_point_func(water_or_clean_eating_true_false):
""" if the input is true, return 10
Args:
water_or_clean_eating_true_false (boolean): True false, if the user
ate clean or drank 100oz of water.
Return: int:10 if param true
"""
if water_or_clean_eating_true_false == True:
return 10
else:
return 0
sleep_points = self.Hours_of_sleep * 3.3
date = self.date
user = self.user
workout_points = self.workout_intensity * (self.workout_amount_of_time * .2)
clean_eating_points = water_clean_eating_point_func(self.clean_eating)
water_points = water_clean_eating_point_func(self.Water_100oz)
step_points = self.steps * .001
total_points = water_points + workout_points + sleep_points + clean_eating_points + step_points
the_object_to_update = Point_model.objects.filter(date=self.date)
the_object_to_update.update(
sleep_points=sleep_points, date=date,
water_points=water_points, workout_points=workout_points,
one_to_one_workout=self, total_points = total_points,
clean_eating_points=clean_eating_points,
step_points=step_points,
user=user )
def save(self, *args, **kwargs):
"""Save the healh input health object, and after create a corresponding point obj."""
is_new = True if not self.id else False # https://stackoverflow.com/questions/28264653/how-create-new-object-automatically-after-creating-other-object-in-django
super(User_point_input_model, self).save(*args, **kwargs)
if is_new:
self.create_point_object()
class Point_model(models.Model):
"""Point obj relating to a corresponing User_point_input_model object.
this object is generate automatically when the User_point_input_model
object is create by the user.
Attributes:
sleep_points(int): sleep hrs* 3.3
date(datefield): date of points
water_points(int): int of 10, if the user drank 100oz of water
workout_points(int): int based off of workouttime * workout intensity
one_to_one_workout(obj): corresponding User_point_input_model
total_points(int): sum of sleep_points, water_points,
workout_points,step points, and clean eating points
clean_eating_points(int): int of 10, if the user ate clean
step_points(int): total steps *.001
user(obj): user that input User_point_input_model
daily_point_goal(int): point total relating to a corresponing point_goal object
up_to_date_total_points_accumulated(int) = total for all users point models from
this date and before this date.
"""
sleep_points = models.FloatField(default=0) #max_digits=5, decimal_places=2 try
date = models.DateField(default=now, editable=True)
water_points = models.PositiveIntegerField(default=0)
workout_points = models.PositiveIntegerField(default=0)
one_to_one_workout = models.ForeignKey(
User_point_input_model,on_delete=models.CASCADE ,
null=True, blank=True )
total_points = models.PositiveIntegerField(
default=0, null=True, blank=True)
clean_eating_points = models.PositiveIntegerField(default=0)
step_points = models.PositiveIntegerField(default=0)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
null=True, blank=True )
daily_point_goal = models.PositiveIntegerField(
default=0, null=True, blank=True)
up_to_date_total_points_accumulated = models.PositiveIntegerField(
default=0, null=True, blank=True)
def update_total_points_for_goal(self):
"""Update the correspong goal objs current point total."""
all_point_goals = Point_goals.objects.filter(user=self.user)
# get all the goals for this user
for obj in all_point_goals: # each start date obj
if self.date >= obj.goal_start_date and self.date <= obj.goal_end_date:
current_sum = int(obj.current_point_total_input)
new_current_sum = current_sum + int(self.total_points)
my_set_obj = all_point_goals.filter(id = obj.id)
# get a queryset so i can update
my_set_obj.update(current_point_total_input = new_current_sum)
break
else:
pass
def update_total_points_for_user(self):
'''add points created to the users total points in this users model'''
current_user_updateable_form = My_custom_user.objects.filter(id=self.user_id)
# filter for the current user
current_user = My_custom_user.objects.get(id=self.user_id)
current_point_sum = current_user.total_points
new_sum = current_point_sum + self.total_points
current_user_updateable_form.update(total_points=new_sum)
def total_points_accumulated(self):
'''add points created to the users total points in this users model'''
this_user_point_models = Point_model.objects.filter(user=self.user).filter(date__lt = self.date).aggregate(Sum('total_points'))
if this_user_point_models['total_points__sum'] == None: # if this is the lowest date
self.up_to_date_total_points_accumulated = self.total_points
else:
sum_point_totals_including_this_date = this_user_point_models['total_points__sum'] + self.total_points
self.up_to_date_total_points_accumulated = sum_point_totals_including_this_date
def update_all_others_that_are_effected():
'''any date that is created while dates later than
it exist, then those later dates will have incorrect
versions of there accumulated up to date total because it will
not account for the new point input this function will reset those
later than this models date to their new correct accumulated total '''
# get all the models of this user
# then only get the models that have a higher date than this one
this_users_models = Point_model.objects.filter(user=self.user)
# now get the ones that have a higher date than this one
above_this_models_date = this_users_models.filter(date__gt=self.date)
for obj in above_this_models_date:
obj_user_point_models = Point_model.objects.filter(user=self.user).filter(date__lte = obj.date).aggregate(Sum('total_points'))
updateable_obj = Point_model.objects.filter(id=obj.id)
updateable_obj.update(
up_to_date_total_points_accumulated=obj_user_point_models['total_points__sum'])
update_all_others_that_are_effected()
# when a point thing is made, update the goals point total
def save(self, *args, **kwargs):
self.total_points_accumulated()
is_new = True if not self.id else False # https://stackoverflow.com/questions/28264653/how-create-new-object-automatically-after-creating-other-object-in-django
super(Point_model, self).save(*args, **kwargs)
if is_new:
self.update_total_points_for_goal()
self.update_total_points_for_user()
class Point_goals(models.Model):
"""A daily point total goal for a spanned time,start date - end date.
Attributes:
user(obj): the user who set the goal
goal_start_date(datefield): the first day the goal starts
goal_end_date(datefield): the last day the goal exists
point_goal(int): the total point goal for each day in the date range
goal_accomplished(str): a str represnting if the goal has been reached for the day
points_needed_for_goal_achieved(int): the total points needed based off of
number of days of the goal * point_goal
current_point_total_input(int) = sum total of every single point obj
that exists between goal_start_date and goal_end_date
"""
goal_health_field_choices = [
('sleep_points', 'Sleep'),('water_points', 'Water'),
('clean_eating_points', 'Clean Eating'), ('step_points', 'Steps'),
('total_points', 'Total Points'),('workout_points', 'Workout')]
goal_metric_choices = [
('activityMetric', 'Activity Metric'), ('total_points', 'Points')
]
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete = models.CASCADE,
null = True, blank=True )
goal_start_date = models.DateField(
default=now, editable=True, help_text='year-month-day')
goal_end_date = models.DateField(
default=now, editable=True, help_text='year-month-day')
point_goal = models.PositiveIntegerField(
default=0)
goal_accomplished = models.TextField(
default='no', null=True, blank=True)
points_needed_for_goal_achieved = models.PositiveIntegerField(
default=1, null=True, blank=True)
current_point_total_input = models.PositiveIntegerField(
default=0, null=True, blank=True)
goal_health_field = models.CharField(
max_length=10000, choices=goal_health_field_choices, default='total_points')
goal_metric_field = models.CharField(
max_length=10000, choices=goal_metric_choices, default='total_points',
help_text='Activity Metrics: Sleep = Hours, Water = 100oz, Clean Eating = 24hrs, Steps = Steps, Workout = Minutes'
)
def points_needed_to_reach_goal(self):
"""Calculate and update the points needed to acomplish this goal.
points needed to acomplish goal = days in goal * points per day
"""
#date_format = "%Y/%m/%d"
date_time_start = self.goal_start_date
date_time_end = self.goal_end_date
number_of_days = self.goal_end_date - self.goal_start_date
number_of_days = number_of_days.days
days_times_daily_points = int(number_of_days) * self.point_goal
current_goal_obj = Point_goals.objects.filter(id=self.id)
current_goal_obj.update(
points_needed_for_goal_achieved=days_times_daily_points)
def remove_goal_from_individual_point_inputs(self):
"""Update all point objects relating to goal object to 0. """
# find all individual days that this goal was set for
# update thier goal count to 0
# get a date range, can do with a filter, then can update an entire filter
point_obj_in_goal_date_range = Point_model.objects.filter(date__range=[self.goal_start_date, self.goal_end_date])
point_obj_in_goal_date_range.update(daily_point_goal=0.0)
def add_goal_field_to_point_object(self):
"""Add the daily point goal to each point model that falls between the goals start and end date."""
for obj in Point_model.objects.filter(user=self.user): # only get the point_input related to the user that set the goal
if obj.date >= self.goal_start_date and obj.date <= self.goal_end_date:
updatable_point_model_filter = Point_model.objects.filter(id=obj.id)
updatable_point_model_filter.update(daily_point_goal=self.point_goal)
def add_up_current_points_towards_goal(self):
"""Add the current points that exist towards this goal."""
current_sum_points_in_goal_date_range = 0
point_obj_for_user = Point_model.objects.filter(user=self.user)
point_obj_in_goal_date_range = point_obj_for_user.filter(date__range=[self.goal_start_date,self.goal_end_date])
for obj in point_obj_in_goal_date_range:
current_sum_points_in_goal_date_range += obj.total_points
# now update it
current_goal_obj = Point_goals.objects.filter(id=self.id)
current_goal_obj.update(
current_point_total_input=int(current_sum_points_in_goal_date_range) )
def convert_field_name_to_readable_field_name(self):
"""return a more readable version of the goal_health_field"""
return(dict(self.goal_health_field_choices).get(self.goal_health_field))
def save(self, *args, **kwargs):
"""Save the current goal and after call corresponding functions
corresonding functions: add_goal_field_to_point_object,
points_needed_to_reach_goal, add_up_current_points_towards_goal
"""
date_conflict = False
for obj in Point_goals.objects.filter(user=self.user):
if (self.goal_start_date >= obj.goal_start_date and self.goal_start_date <= obj.goal_end_date
and self.goal_metric_field == obj.goal_metric_field and self.goal_health_field == obj.goal_health_field ): # if the start date with same heatlh and metric field is inside preexisting goal
date_conflict = True
else:
pass
if date_conflict:
pass
else:
is_new = True if not self.id else False # https://stackoverflow.com/questions/28264653/how-create-new-object-automatically-after-creating-other-object-in-django
super(Point_goals, self).save(*args, **kwargs)
if is_new:
self.add_goal_field_to_point_object()
self.points_needed_to_reach_goal()
self.add_up_current_points_towards_goal()
def delete(self, *args, **kwargs):
self.remove_goal_from_individual_point_inputs()
super(Point_goals, self).delete(*args, **kwargs)
|
from django.contrib import admin
from ChatBot.models import UserMessage #importing the UserMessage module to register here.
# Register your models here.
admin.site.register(UserMessage) #Registering the model.
|
# -*- coding: utf-8 -*-
{
'name': 'Leave Management',
'version': '10.0.2.0.0',
'summary': 'Manage Leave',
'description': """
Helps you to manage Leave.\n
HR Leave extension functionality\n
""",
'category': 'Human Resources',
'author': 'Al Kidhma Group',
'website': "",
'company': '',
'depends': [
'base', 'hr_holidays', 'hr_payroll'
],
'data': [
'data/salary_rule_unpaid.xml',
'wizard/leave_extension.xml',
'wizard/salary_report_wizard.xml',
'views/reports.xml',
'views/salary_report.xml',
'views/report_payslip_templates.xml',
'views/hr_holidays_status.xml',
'views/leave.xml',
],
'demo': [],
'images': [],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': False,
}
|
import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import UserRegister
from resources.user import UserList
from resources.item import Item, ItemList
from resources.store import Store, StoreList
from resources.dbinit import DBinit
from flask import render_template
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'postgres://user:littleredflower@localhost:5432/user')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'EiEiO'
api = Api(app)
jwt = JWT(app, authenticate, identity) # /auth
# @app.before_first_request
# def create_tables():
# db.create_all()
@app.route('/')
def home():
return render_template('index.html')
api.add_resource(DBinit, '/createdb')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(StoreList, '/stores')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
api.add_resource(UserList, '/users')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(host='0.0.0.0', port=80, debug=True)
|
def q_04():
# 세 개의 정수를 입력 받아서 합계와 평균을 출력하시오.
# (단 평균은 소수 이하를 버리고 정수부분만 출력한다.)
while True:
try:
num1, num2, num3 = map(int, input("정수 세 개 입력: ").split(' '))
print("합계: %d" % (num1 + num2 + num3))
print("평균: %d" % ((num1 + num2 + num3) / 3))
return
except Exception as e:
print("올바른 값을 입력하세요.", e) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.6
# Email : muyanru345@163.com
###################################################################
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import third-party modules
from Qt import QtCore
from Qt import QtWidgets
class MSizeGrip(QtWidgets.QSizeGrip):
def __init__(self, parent=None):
super(MSizeGrip, self).__init__(parent)
class MTextEdit(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super(MTextEdit, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.SubWindow)
self._size_grip = MSizeGrip(self)
layout = QtWidgets.QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(
self._size_grip, 0, 0, QtCore.Qt.AlignBottom | QtCore.Qt.AlignRight
)
self.setLayout(layout)
self._size_grip.setVisible(False)
def autosize(self):
self.textChanged.connect(self._autosize_text_edit)
return self
def _autosize_text_edit(self):
# w = self.width()
doc = self.document()
print(self.width(), doc.lineCount(), doc.idealWidth())
def resizeable(self):
"""Show the size grip on bottom right. User can use it to resize MTextEdit"""
self._size_grip.setVisible(True)
return self
|
"""
LeetCode 189. Rotate Array (Easy)
blog : https://daimhada.tistory.com/113
problem : https://leetcode.com/problems/rotate-array
"""
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
nums.reverse()
# if len(num) < k, it is equal k % len(num)
n = k % len(nums)
# front
for i in range(0, n):
if n - 1 - i < i or i == n - 1 - i:
break
nums[i], nums[n - 1 - i] = nums[n - 1 - i], nums[i]
# back
for i in range(n, len(nums)):
if len(nums) - 1 - (i - n) < i or i == len(nums) - 1 - (i - n):
break
nums[i], nums[len(nums) - 1 - (i - n)] = nums[len(nums) - 1 - (i - n)], nums[i]
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root, sum):
def pathSumFrom (n, sum):
if n is None:
return 0
total = 0
if n.val == sum:
total = 1
total = total + pathSumFrom(n.left, sum - n.val) + pathSumFrom(n.right, sum - n.val)
return total
if root is None:
return 0
return pathSumFrom(root, sum) + self.pathSum(root.left, sum) + self.pathSum(root.right, sum) |
from decimal import Decimal
from BaseController import BaseController
import tornado.ioloop
import tornado.web
import re
class InfoController(BaseController):
def get(self):
"""Serves a GET request.
"""
server = self.get_argument("server")
redis_info = self.stats_provider.get_info(server)
databases=[]
for key in sorted(redis_info.keys()):
if key.startswith("db"):
database = redis_info[key]
database['name']=key
databases.append(database)
total_keys=0
for database in databases:
total_keys+=database.get("keys")
if(total_keys==0):
databases=[{"name" : "db0", "keys" : "0", "expires" : "0"}]
redis_info['databases'] = databases
redis_info['total_keys']= self.shorten_number(total_keys)
uptime_seconds = redis_info['uptime_in_seconds']
redis_info['uptime'] = self.shorten_time(uptime_seconds)
commands_processed = redis_info['total_commands_processed']
commands_processed = self.shorten_number(commands_processed)
redis_info['total_commands_processed_human'] = commands_processed
self.write(redis_info)
def shorten_time(self, seconds):
"""Takes an integer number of seconds and rounds it to a human readable
format.
Args:
seconds (int): Number of seconds to convert.
"""
if seconds < 60:
# less than 1 minute
val = str(seconds) + " sec"
elif seconds < 3600:
# if the seconds is less than 1hr
num = self.rounded_number(seconds, 60)
if num == "60":
val = '1h'
else:
val = num + "m"
elif (seconds < 60*60*24):
# if the number is less than 1 day
num = self.rounded_number(seconds, 60 * 60)
if num == "24":
val = "1d"
else:
val = num + "h"
else:
num = self.rounded_number(seconds, 60*60*24)
val = num + "d"
return val
def shorten_number(self, number):
"""Shortens a number to a human readable format.
Args:
number (int): Number to convert.
"""
if number < 1000:
return number
elif number >= 1000 and number < 1000000:
num = self.rounded_number(number, 1000)
val = "1M" if num == "1000" else num + "K"
return val
elif number >= 1000000 and number < 1000000000:
num = self.rounded_number(number, 1000000)
val = "1B" if num=="1000" else num + "M"
return val
elif number >= 1000000000 and number < 1000000000000:
num = self.rounded_number(number, 1000000000)
val = "1T" if num=="1000" else num + "B"
return val
else:
num = self.rounded_number(number, 1000000000000)
return num + "T"
def rounded_number(self, number, denominator):
"""Rounds a number.
Args:
number (int|float): The number to round.
denominator (int): The denominator.
"""
rounded = str(round(Decimal(number)/Decimal(denominator), 1))
replace_trailing_zero = re.compile('0$')
no_trailing_zeros = replace_trailing_zero.sub('', rounded)
replace_trailing_period = re.compile('\.$')
final_number = replace_trailing_period.sub('', no_trailing_zeros)
return final_number
|
'''
@package: dc
@author igor
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2014 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
'''
import glob
import os
import base64
from datetime import datetime
import dc.EventObjects
import dc.Constants as DC_CONSTANTS
import dc_db.Constants as Constants
from dc_db.URLFetchTask import URLFetchTask
from dc_db.StatisticLogManager import StatisticLogManager
from dc_db.ProcessedContentInternalStruct import ProcessedContentInternalStruct
from dc_db.AttrFetchTask import AttrFetchTask
from app.Utils import PathMaker
import app.Utils as Utils # pylint: disable=F0401
logger = Utils.MPLogger().getLogger()
# #process urlContent event
class URLContentTask(object):
# #constructor
#
# @param keyValueStorageDir path to keyValue storage work dir
# @param rawDataDir path to raw data dir
# @param dBDataTask instance of DBDataTask module
def __init__(self, keyValueStorageDir, rawDataDir, dBDataTask, dcSiteTemplate, keyValueDefaultFile, dcStatTemplate,
dcLogTemplate):
self.keyValueStorageDir = keyValueStorageDir
self.rawDataDir = rawDataDir
self.processedContents = []
self.rawContents = []
self.headers = []
self.requests = []
self.meta = []
self.cookies = []
self.contentMask = None
self.dbDataTask = dBDataTask
self.urlFetchTask = URLFetchTask(keyValueStorageDir, rawDataDir, dBDataTask, dcSiteTemplate,
keyValueDefaultFile, dcStatTemplate, dcLogTemplate,
Constants.DEFAULT_LOCK_TTL)
# #method clears main contants
def clearContents(self):
self.processedContents = []
self.rawContents = []
self.headers = []
self.requests = []
self.meta = []
self.cookies = []
# #make all necessary actions to get urls content data from storages
#
# @param urlContentRequests list of URLContentRequest objects
# @param queryCallback function for queries execution
# @return list of URLContentResponse objects
def process(self, urlContentRequests, queryCallback): # pylint: disable=W0613
urlContentResponses = []
# @todo add more complex case, implemented only from rawDataDir
for urlContentRequest in urlContentRequests:
if urlContentRequest is None:
urlContentResponses.append(None)
elif hasattr(urlContentRequest, "urlFetch") and urlContentRequest.urlFetch is not None:
urlFetches = []
urlFetches.append(urlContentRequest.urlFetch)
urls = self.urlFetchTask.process(urlFetches, queryCallback)
for url in urls:
urlContentRequest.urlMd5 = ""
urlContentRequest.siteId = url.siteId
urlContentRequest.url = url.url
self.calcEmptyFields(urlContentRequest)
StatisticLogManager.logUpdate(queryCallback, "LOG_URL_CONTENT", urlContentRequest, urlContentRequest.siteId,
urlContentRequest.urlMd5)
urlContentResponses.append(self.getURLContent(urlContentRequest, queryCallback))
else:
self.calcEmptyFields(urlContentRequest)
StatisticLogManager.logUpdate(queryCallback, "LOG_URL_CONTENT", urlContentRequest, urlContentRequest.siteId,
urlContentRequest.urlMd5)
urlContentResponses.append(self.getURLContent(urlContentRequest, queryCallback))
logger.debug(">>> urlContentResponses len = %s", str(len(urlContentResponses)))
# logger.debug("!!! urlContentResponses: %s", Utils.varDump(urlContentResponses, stringifyType=0, strTypeMaxLen=5000))
return urlContentResponses
# #calcEmptyFields method calculate values of empty fields
#
# @param urlContentRequest object of URLContentRequest type
def calcEmptyFields(self, urlContentRequest):
if urlContentRequest.siteId == "":
urlContentRequest.siteId = "0"
if urlContentRequest.urlMd5 is None or urlContentRequest.urlMd5 == "":
urlContentRequest.urlMd5 = urlContentRequest.fillMD5(urlContentRequest.url)
# #generates and returns dbFieldsDict
#
# @param dbFieldsList - list of db fields names
# @param dbFieldsListDefaultValues - dict with default values for DB fields names
# @param row db row, not None
# @return just generated dbFieldsDict dict
def genDBFields(self, dbFieldsList, dbFieldsListDefaultValues, row):
ret = {}
for fName in dbFieldsList:
if fName in dbFieldsListDefaultValues:
ret[fName] = dbFieldsListDefaultValues[fName]
for fName in dbFieldsList:
if fName is not None:
if fName in row:
if fName in ["UDate", "CDate", "LastModified", "TcDate", "PDate"]:
ret[str(fName)] = str(row[fName])
else:
ret[str(fName)] = row[fName]
else:
ret[str(fName)] = None
return ret
# #read content from KVDB if CONTENT_TYPE_PROCESSED have setted
#
# @param urlContentRequest object of URLContentRequest type
# @param dataDir - contains file directory
# @return list of Content objects
def contentProcessed(self, dataDir, urlContentRequest, contentMask, queryCallback): # pylint: disable=W0613
ret = []
dataFetchRequest = dc.EventObjects.DataFetchRequest(urlContentRequest.siteId, urlContentRequest.urlMd5)
dataFetchResponse = self.dbDataTask.process(dataFetchRequest, queryCallback)
if dataFetchResponse is not None and len(dataFetchResponse.resultDict) > 0:
if ProcessedContentInternalStruct.DATA_FIELD in dataFetchResponse.resultDict and \
dataFetchResponse.resultDict[ProcessedContentInternalStruct.DATA_FIELD] is not None and \
ProcessedContentInternalStruct.CDATE_FIELD in dataFetchResponse.resultDict and \
dataFetchResponse.resultDict[ProcessedContentInternalStruct.CDATE_FIELD] is not None:
ret = ProcessedContentInternalStruct.parseProcessedBuf(\
dataFetchResponse.resultDict[ProcessedContentInternalStruct.DATA_FIELD], \
dataFetchResponse.resultDict[ProcessedContentInternalStruct.CDATE_FIELD], contentMask)
logger.debug(">>> ret_content == " + str(ret))
logger.debug(">>> UrlContent result = " + str(dataFetchResponse.__dict__))
return ret
# #extract url fields from Database
#
# @param siteId site Id
# @param urlMD5 urls urlMD5
# @return first row of SQL request
def selectURLFromMySQL(self, siteId, urlMD5, queryCallback):
row = None
tableName = Constants.DC_URLS_TABLE_NAME_TEMPLATE % siteId
SELECT_URL_QUERY = "SELECT * FROM %s WHERE `URLMd5` = '%s'"
query = SELECT_URL_QUERY % (tableName, urlMD5)
res = queryCallback(query, Constants.SECONDARY_DB_ID, Constants.EXEC_NAME)
if hasattr(res, '__iter__') and len(res) >= 1:
row = res[0]
return row
# #fillLists - fills incoming list of file's content
#
# @param filePath - path to the content file
# @param elemList - incoming filled list
def fillLists(self, filePath, elemList, typeId=dc.EventObjects.Content.CONTENT_RAW_CONTENT):
if os.path.isfile(filePath):
try:
fd = open(filePath)
raw_content = fd.read()
localDate = datetime.fromtimestamp(os.path.getctime(filePath))
elemList.append(dc.EventObjects.Content(base64.b64encode(raw_content.decode('utf-8')), localDate.isoformat(' '), typeId))
fd.close()
except IOError as err:
elemList.append(None)
logger.debug(">>> IOError with file = %s MSG = %s", str(filePath), str(err.message))
else:
elemList.append(None)
logger.debug(">>> No file = %s", str(filePath))
# #contentRaw - content reader
#
# @param fList - incoming file list
# @param isBreak - break after firs element or not
def contentRaw(self, fList, isBreak, contentTypeId, parseAdditionType):
fd = None
wasOpen = False
for filePath in fList:
if os.path.isfile(filePath):
try:
fd = open(filePath)
raw_content = fd.read()
localDate = datetime.fromtimestamp(os.path.getctime(filePath))
self.rawContents.append(dc.EventObjects.Content(base64.b64encode(raw_content), localDate.isoformat(' '),
contentTypeId))
wasOpen = True
fd.close()
except IOError as err:
logger.debug(">>> IOError with file = %s MSG = %s", str(filePath), str(err.message))
if wasOpen and parseAdditionType:
filePath = filePath[0: len(DC_CONSTANTS.RAW_DATA_SUFF) * -1]
filePath += DC_CONSTANTS.RAW_DATA_HEADERS_SUFF
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_HEADERS:
self.fillLists(filePath, self.headers, dc.EventObjects.Content.CONTENT_HEADERS_CONTENT)
filePath = filePath[0: len(DC_CONSTANTS.RAW_DATA_HEADERS_SUFF) * -1]
filePath += DC_CONSTANTS.RAW_DATA_REQESTS_SUFF
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_REQUESTS:
self.fillLists(filePath, self.requests, dc.EventObjects.Content.CONTENT_REQUESTS_CONTENT)
filePath = filePath[0: len(DC_CONSTANTS.RAW_DATA_REQESTS_SUFF) * -1]
filePath += DC_CONSTANTS.RAW_DATA_META_SUFF
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_META:
self.fillLists(filePath, self.meta, dc.EventObjects.Content.CONTENT_META_CONTENT)
filePath = filePath[0: len(DC_CONSTANTS.RAW_DATA_META_SUFF) * -1]
filePath += DC_CONSTANTS.RAW_DATA_COOKIES_SUFF
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_COOKIES:
self.fillLists(filePath, self.cookies, dc.EventObjects.Content.CONTENT_COOKIES_CONTENT)
if isBreak:
break
# #contentRawCommon - common content reader
#
# @param dataDir - contains file directory
# @param localReverse - file reverse sorting (boolean)
# @param allFiles - all files read or not (boolean)
def contentRawCommon(self, dataDir, localReverse=False, allFiles=False, rawDataSuff=DC_CONSTANTS.RAW_DATA_SUFF,
contentTypeId=dc.EventObjects.Content.CONTENT_RAW_CONTENT, parseAdditionType=True):
fileMask = (dataDir + "/*" + rawDataSuff)
logger.debug(">>> contentRaw fList = " + str(fileMask))
fList = sorted(glob.glob(fileMask), key=os.path.getctime, reverse=localReverse)
self.contentRaw(fList, (not allFiles), contentTypeId, parseAdditionType)
# #fillAdditionContentTypes fills result with contents of addition raw content types.
#
# @param typeMask - typeMask of supported content type
# @param typeId - content type id, needs for filling Content obj
# @param suff - raw data file sufffix
# @param dataDir - raw data file storage dir
def fillAdditionContentTypes(self, typeMask, typeId, suff, dataDir):
if self.contentMask & typeMask:
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_LAST:
self.contentRawCommon(dataDir, True, False, suff, typeId, False)
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_FIRST:
self.contentRawCommon(dataDir, False, False, suff, typeId, False)
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_ALL:
self.contentRawCommon(dataDir, False, True, suff, typeId, False)
# #extract url content from mandatory storage - implemented RAW!!
#
# @param urlContentRequest instance of URLContentRequest objects
# @param queryCallback function for queries execution
# @return list of URLContentResponse objects
def getURLContent(self, urlContentRequest, queryCallback):
dataDir = self.rawDataDir + "/" + urlContentRequest.siteId + "/" + PathMaker(urlContentRequest.urlMd5).getDir()
self.clearContents()
self.contentMask = urlContentRequest.contentTypeMask
if self.contentMask & (dc.EventObjects.URLContentRequest.CONTENT_TYPE_PROCESSED | \
dc.EventObjects.URLContentRequest.CONTENT_TYPE_PROCESSED_INTERNAL | \
dc.EventObjects.URLContentRequest.CONTENT_TYPE_PROCESSED_CUSTOM):
self.processedContents.extend(self.contentProcessed(dataDir, urlContentRequest, self.contentMask, queryCallback))
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW:
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_LAST:
self.contentRawCommon(dataDir, True, False)
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_FIRST:
self.contentRawCommon(dataDir, False, False)
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_RAW_ALL:
self.contentRawCommon(dataDir, False, True)
self.fillAdditionContentTypes(dc.EventObjects.URLContentRequest.CONTENT_TYPE_TIDY,
dc.EventObjects.Content.CONTENT_TIDY_CONTENT, DC_CONSTANTS.RAW_DATA_TIDY_SUFF,
dataDir)
self.fillAdditionContentTypes(dc.EventObjects.URLContentRequest.CONTENT_TYPE_DYNAMIC,
dc.EventObjects.Content.CONTENT_DYNAMIC_CONTENT, DC_CONSTANTS.RAW_DATA_DYNAMIC_SUFF,
dataDir)
self.fillAdditionContentTypes(dc.EventObjects.URLContentRequest.CONTENT_TYPE_CHAIN,
dc.EventObjects.Content.CONTENT_CHAIN_PARTS, DC_CONSTANTS.RAW_DATA_CHAIN_SUFF,
dataDir)
logger.debug("!!!!! self.processedContents: %s", Utils.varDump(self.processedContents, stringifyType=0, ensure_ascii=False, strTypeMaxLen=5000))
ret = dc.EventObjects.URLContentResponse(urlContentRequest.url, self.rawContents, self.processedContents)
ret.headers = self.headers
ret.requests = self.requests
ret.meta = self.meta
ret.cookies = self.cookies
row = self.selectURLFromMySQL(urlContentRequest.siteId, urlContentRequest.urlMd5, queryCallback)
if row is not None:
if "Status" in row:
ret.status = row["Status"]
if "URL" in row:
ret.url = row["URL"]
if "URLMd5" in row:
ret.urlMd5 = row["URLMd5"]
if "RawContentMd5" in row:
ret.rawContentMd5 = row["RawContentMd5"]
if "ContentURLMd5" in row:
ret.contentURLMd5 = row["ContentURLMd5"]
if "Site_Id" in row:
ret.siteId = row["Site_Id"]
if hasattr(urlContentRequest.dbFieldsList, '__iter__') and len(urlContentRequest.dbFieldsList) > 0:
ret.dbFields = self.genDBFields(urlContentRequest.dbFieldsList, \
urlContentRequest.dbFieldsListDefaultValues, \
row)
if self.contentMask & dc.EventObjects.URLContentRequest.CONTENT_TYPE_ATTRIBUTES:
if ret.urlMd5 is not None and ret.urlMd5 != "" and ret.siteId is not None:
ret.attributes = AttrFetchTask.fetchUrlsAttributesByNames(ret.siteId,
ret.urlMd5,
queryCallback,
urlContentRequest.attributeNames)
return ret
|
from django.test import TestCase
from tarefas.models import Tarefa
from django.db.utils import IntegrityError
from django.contrib.auth.models import User
class CriaTarefaTestCase(TestCase):
def test_cria_tarefa_vazia(self):
self.assertIsNotNone(Tarefa.objects.create())
def test_cria_tarefa_sem_nome(self):
with self.assertRaises(IntegrityError):
Tarefa.objects.create(nome=None)
def test_cria_tarefa_com_nome(self):
self.assertIsNotNone(Tarefa.objects.create(nome='tarefa'))
class HomeTestCase(TestCase):
def setUp(self):
User.objects.create_user(username='admin', password='123')
def test_home_access(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'Olá')
def test_home_tem_link_login(self):
response = self.client.get('/')
self.assertContains(response, 'Login')
self.client.login(username='admin', password='123')
response = self.client.get('/')
self.assertNotContains(response, 'Login')
def test_login_volta_pra_home(self):
response = self.client.get('/')
if not response.context['user'].is_authenticated():
self.assertContains(response,
'<a href="admin/login/?next=/">Login</a>',
html=True)
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourceStrava
__all__ = ["SourceStrava"]
|
# create strong scaling plot data
import sys
from math import sqrt
# refinement level used for speedup study
r = 9
# list of number of processes
plist = list([1,2,4,8,16,20])
# list of timed sections
sectionlist = list(["assemble","initialize","output","setup","solve"])
# get lines from input file
inputfile = open("results.out",'r')
input = inputfile.readlines()
inputfile.close()
# loop over sections
for section in sectionlist:
# open plot data output file
output = open("strong_" + section + ".gpl",'w')
# loop over number of processes
mean_base = float()
std_base = float()
N_base = int()
found_base = False
for p in plist:
case = "p" + format(p,"02d") + "_r" + format(r,"02d") + " " + section
case_found = False
for line in input:
if case in line:
words = line.split()
mean = float(words[2])
std = float(words[3])
N = int(words[4])
# use p = 1 as base
if p == 1:
mean_base = mean
std_base = std
N_base = N
base_found = True
# ensure that base was already found
if not base_found:
sys.stderr.write("Base has not yet been found\n")
raise SystemExit(1)
# check that the same number of runs were used for serial and parallel
if N != N_base:
sys.stderr.write("Different numbers of runs for case " + case + "\n")
raise SystemExit(1)
case_found = True
if not case_found:
sys.stderr.write("Data for case " + case + " was not found\n")
raise SystemExit(1)
# compute speedup and uncertainty bounds
strong = mean_base / mean
std = strong * sqrt((std/mean)**2 + (std_base/mean_base)**2)
k = 1.96 * std / sqrt(N)
# write line to output file
output.write(str(p) + " " + str(strong) + " " + str(k) + "\n")
# close plot data file
output.close()
|
from .inference import (convert_SyncBN, inference_detector,
inference_multi_modality_detector, init_detector,
show_result_meshlab)
from .test import single_gpu_test
__all__ = [
'inference_detector', 'init_detector', 'single_gpu_test',
'show_result_meshlab', 'convert_SyncBN',
'inference_multi_modality_detector'
]
|
import datetime
import date_fun
import numpy as np
import mygis
from bunch import Bunch
def stats(data):
"""Calculate the rate of melt from peak to 0
Assumes that data starts at peak and decreases from there
Takes the first point data dips below peak as the onset of melt
Takes the first day data get to 0 as meltout
Rate is calculated as peak/time [m/day]
Also returns peak swe [m], melt time [days], melt DOY [days]
"""
melt_date=np.zeros(data.shape[1:])+999
peak_date=np.zeros(data.shape[1:])-1
# find the maximum SCA in the first half of the year (so a peak in Dec doesn't screw up melt "rates")
peak_sca=max_sca(data[:data.shape[0]/2,:,:])
# loop through time finding times that have reached "peak" or that have melted
# record first melt date and last peak date
for i in range(data.shape[0]):
nosnow=np.where(data[i,:,:]==0)
newmelt=np.where(melt_date[nosnow]>i)
melt_date[nosnow][newmelt]=i+1
notpeak=np.where(data[i,:,:]<peak_sca)
melt_start=np.where(peak_date[notpeak]==-1)
peak_date[notpeak][melt_start]=i
# not sure what to do if it predicts that SCA never gets to zero...
melt_date[melt_date==999]=data.shape[0]
melt_time=melt_date-peak_date
# if meltdate is before peak date just set it to 1
melt_time[melt_time<=0]=1
return Bunch(peak=peak_sca,melt_time=melt_time,melt_date=melt_date)
def max_sca(data):
"""Calculate the maximum SCA at each point (quite likely 1?)"""
return data.max(axis=0)
def fill(data):
"""Fill missing MODSCAG values by linear interpolation in time.
Some data in MODSCAG are missing (-9999 or >100 (various flags, water, cloud, ...))
"""
def bad_data(datavalue):
return (datavalue<0) or (datavalue>1)
# loop through time
for i in range(data.shape[0]):
# find all missing values in the current time step
tmp=np.where((data[i,:,:]<0)|(data[i,:,:]>1))
# loop through missing values
for j in range(len(tmp[0])):
# current spatial position to test
x=tmp[0][j]
y=tmp[1][j]
# backwards search index starting point
test_i=i-1
# note, after i=0 test_i should always = i-1
if (i==0):
# search backwards for a good datapoint
while bad_data(data[test_i,x,y]):test_i-=1
i_start=test_i
# forward search index starting point
test_i=i+1
# search forwards for a good datapoint
while bad_data(data[test_i,x,y]):test_i+=1
# linear interpolation between last good point and next good point
linterp=(float(test_i)-i)/(float(test_i)-i_start)
data[i,x,y]=data[test_i,x,y]*(1-linterp)+data[i_start,x,y]*linterp
# return data
def month_str2num(month):
"""convert a 3char month name to the number
e.g. JAN = 1, FEB = 2, etc.
"""
datenumbers=dict(JAN=1,FEB=2,MAR=3,APR= 4,MAY= 5,JUN= 6,
JUL=7,AUG=8,SEP=9,OCT=10,NOV=11,DEC=12)
return datenumbers[month.upper()]
def read_good_days(filename="dateselect.txt"):
"""Read dates QCed as good from filename (MODSCAGdir/dateselect.txt)
dateselect.txt has one line per date in the format """
with open(filename) as f:
dates=[]
indices=[]
for l in f:
day=int(l[0:2])
month=month_str2num(l[2:5])
year=int(l[5:])
dates.append(datetime.datetime(year,month,day))
indices.append(int(np.round(date_fun.date2mjd(year,month,day,0,0)
-date_fun.date2mjd(year,1,1,0,0))))
return Bunch(dates=dates,indices=indices)
# Assumes the following .ctl file
# DSET ^fsca.dat
# UNDEF -9999
# XDEF 1950 LINEAR -112.247916666666667 0.004166666666667
# YDEF 2580 LINEAR 33.002083333333333 0.004166666666667
# ZDEF 1 LEVELS 0
# TDEF 366 LINEAR 00Z01JAN200X 1dy
# VARS 1
# SCA 0 1 SCA []
# ENDVARS
def load(filename,startyear=2008,all_days=False):
"""Load a MODSCAG SCA file from disk
Assumes a flat binary file as described in the comments above.
Returns data, lat, lon, and date (based on the start year input)
"""
import glob
files=glob.glob(filename)
d=[]
for f in files:
if f[-4:]==".tif":
d.append(mygis.read_tiff(f))
else:
d.append(np.fromfile(filename,np.float32))
d=np.array(d)
startlon=-112.247916666666667
nlon=1950
dlon=0.004166666666667
lon=np.array([startlon+dlon*i for i in range(nlon)])
startlat=33.002083333333
nlat=2580
dlat=dlon
lat=np.array([startlat+dlat*i for i in range(nlat)])
startdate=datetime.datetime(startyear,1,1,0)
ntimes=d.size/nlon/nlat #366
dates=[startdate+datetime.timedelta(i) for i in range(int(ntimes))]
dateselectfile="/".join(filename.split("/")[:-1])+"/dateselect.txt"
if all_days:
gooddates=np.arange(ntimes)
else:
gooddates=read_good_days(dateselectfile)
# fill(data)
return Bunch(data=d.reshape((ntimes,nlat,nlon)),lat=lat,lon=lon,dates=dates,gooddates=gooddates)
|
import sys
import tensorflow as tf
from pyspark.sql import SparkSession
from recsys_tf_237.recsystf_model import TfrsModelMaker
NUM_TRAIN_EPOCHS = 3
items_path = "./input-data/items"
users_path = "./input-data/users"
events_path = "./input-data/events"
num_items = 494433
num_users = 3827078
num_events = 6757870
def main(args):
# Using allow_soft_placement=True allows TF to fall back to CPU when no GPU implementation is available.
tf.config.set_soft_device_placement(True)
spark = SparkSession.builder.appName("Recsys-TFRS").getOrCreate()
# Load the intermediary parquet data into TF datasets
model_maker = TfrsModelMaker(items_path, users_path, events_path, num_items, num_users, num_events)
# Build the model
model = model_maker.create_model()
# Train and evaluate the model
model_maker.train_and_evaluate(model, NUM_TRAIN_EPOCHS)
# Generate recomms...
spark.stop()
if __name__ == "__main__":
main(sys.argv)
|
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import plotly
import pandas as pd
import pymysql as mysql
import urllib.parse as urllib
from datetime import datetime as dt
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
def serve_layout():
return (
html.Div([
html.H2('Fotonaponska elektrana FN1-Riteh: Podaci o proizvodnji',
style={'color': 'rgb(255,255,255)',
'text-align': 'center',
'text-shadow':'1px 1px #000000'}),
html.Div(id='live-update-text'),
dcc.Graph(id='live-update-graph'),
dcc.Tabs(id="my-tabs", value='tab-1-example', children=[
dcc.Tab(label='Grafički prikaz prethodne proizvodnje', value='tab-1'),
dcc.Tab(label='Tablica s podacima o proizvodnji', value='tab-2')
]),
html.Div([
html.H5('Odaberi vremenski raspon:', style={'color': 'rgb(255,255,255)'}),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=dt(2019, 3, 6),
max_date_allowed=dt.now(),
initial_visible_month=dt(2019, 3, 2),
start_date='2019-03-06',
end_date='{}'.format(dt.now().date())
),
html.Div([html.A('Preuzmi podatke',
id='download-link',
download="Podaci.csv",
href="",
target="_blank",
style={'color':'#FFFFFF'})])
], style={'text-align':'center',
'margin-right': 20}),
html.Div(id='tabs-content'),
dcc.Interval(id='my-interval',
interval=45*1000,
n_intervals=0),
html.P('Izradio: Leon Kvež za Tehnički fakultet u Rijeci, veljača 2019.', style={'text-align':'right'})
], style={'background':'#4c4c4a',
'font-family':"Verdana"})
)
app.layout = serve_layout
@app.callback(Output('live-update-text', 'children'),
[Input('my-interval', 'n_intervals')])
def update_text(n):
conn = mysql.connect(host='pfw0ltdr46khxib3.cbetxkdyhwsb.us-east-1.rds.amazonaws.com',
user='kfd7pprqwrvy9uep',
password='zvg9opaacxqy4mmu',
db='oha3los99548olek')
query = "SELECT * FROM elektrana"
df = pd.read_sql(query, conn)
style={'color':'rgb(255,255,255)', 'text-align':'center'}
return html.Div([html.H4('Live podaci - korak 45 sek.:', style=style),
html.Div([
html.Div([
html.P("Vrijeme: {}".format(df.iloc[-1,0]), style=style),
html.P("Snaga AC: {} W".format(df.iloc[-1,2]), style=style),
html.P("Snaga DC: {} W".format(df.iloc[-1,1]), style=style),
html.P("Učinkovitost: {}%".format(df.iloc[-1,3]), style=style),
html.P("Max. snaga danas: {} W".format(df.iloc[-1,4]), style=style),
html.P("Frekvencija mreže: {} Hz".format(df.iloc[-1,5]), style=style),
html.P("Temp. konvertera: {}°C".format(df.iloc[-1,6]), style=style)],className='four columns'),
html.Div([
html.P('Proizvedeno:', style=style),
html.P("Energije danas: {} kWh".format(df.iloc[-1,7]), style=style),
html.P("Energije u tjednu: {} kWh".format(df.iloc[-1,8]), style=style),
html.P("Energije u mjesecu: {} kWh".format(df.iloc[-1,9]), style=style),
html.P("Energije u godini: {} kWh".format(df.iloc[-1,10]), style=style),
html.P("Energije ukupno: {} kWh".format(df.iloc[-1,11]), style=style),
html.P("Ušteda emisija CO2: {} T CO2".format(df.iloc[-1,11]*0.024), style=style)], className='four columns'),
html.Div([
html.P('Meteorološki podaci:', style=style),
html.P("Temperatura: {} °C".format(df.iloc[-1,12]), style=style),
html.P("Smjer i brzina vjetra: {} m/s".format(df.iloc[-1,13]), style=style),
html.P("Stanje vremena: {} ".format(df.iloc[-1,14]), style=style)], className='four columns')
], className='row')
])
@app.callback(Output('live-update-graph', 'figure'),
[Input('my-interval', 'n_intervals')])
def update_graph(n):
conn = mysql.connect(host='pfw0ltdr46khxib3.cbetxkdyhwsb.us-east-1.rds.amazonaws.com',
user='kfd7pprqwrvy9uep',
password='zvg9opaacxqy4mmu',
db='oha3los99548olek')
query = "SELECT * FROM elektrana"
df1 = pd.read_sql(query, conn)
data = {'Vrijeme':[],
'Snaga_AC':[],
'Snaga_DC':[],
'Temp_kon':[]}
for i in range(80):
vrijeme = df1.iloc[-(1+i),0]
snaga_ac = df1.iloc[-(1+i),2]
snaga_dc = df1.iloc[-(1+i),1]
temp_kon = df1.iloc[-(1+i),6]
data['Vrijeme'].append(vrijeme)
data['Snaga_AC'].append(snaga_ac)
data['Snaga_DC'].append(snaga_dc)
data['Temp_kon'].append(temp_kon)
fig = plotly.tools.make_subplots(rows=2, cols=1, shared_xaxes=True)
fig['layout']['legend'] = {'x': 1, 'y': 1, 'xanchor': 'left'}
fig['layout']['xaxis1'].update(title='Vrijeme')
fig['layout']['yaxis1'].update(title='Snaga [W]')
fig['layout']['yaxis2'].update(title='Temperatura [°C]')
fig['layout']['height'] = 600
fig['layout']['title'] = {'text':'Snaga AC/DC, Temperatura konvertera'}
fig['layout']['plot_bgcolor'] = '#4c4c4a'
fig['layout']['paper_bgcolor'] = '#4c4c4a'
fig['layout']['font'] = {'color':'#e98400'}
fig.append_trace({
'x': data['Vrijeme'],
'y': data['Snaga_AC'],
'name': 'Snaga AC',
'mode': 'lines+markers',
'type': 'scatter',
}, 1, 1)
fig.append_trace({
'x': data['Vrijeme'],
'y': data['Snaga_DC'],
'name': 'Snaga DC',
'mode': 'lines+markers',
'type': 'scatter',
}, 1, 1)
fig.append_trace({
'x': data['Vrijeme'],
'y': data['Temp_kon'],
'name': 'Temperatura konvertera',
'mode': 'lines+markers',
'type': 'scatter',
'marker': {'color':'green'}
}, 2, 1)
return fig
@app.callback(Output('tabs-content', 'children'),
[Input('my-tabs', 'value'),
Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')])
def render_content(tab, start_date, end_date):
conn = mysql.connect(host='pfw0ltdr46khxib3.cbetxkdyhwsb.us-east-1.rds.amazonaws.com',
user='kfd7pprqwrvy9uep',
password='zvg9opaacxqy4mmu',
db='oha3los99548olek')
query = "SELECT * FROM elektrana WHERE Vrijeme >= '{} 05:00:00' AND Vrijeme <= '{} 21:00:00'".format(start_date, end_date)
df2 = pd.read_sql(query, conn)
if tab == 'tab-1':
return html.Div([
html.H3('Prikaz sveukupne proizvodnje',
style={'color':'rgb(255,255,255)',
'text-shadow':'1px 1px #000000',
'text-align': 'center',
'font-family':"Verdana"}),
dcc.Graph(
figure=go.Figure(
data=[
go.Scatter(
x=df2['Vrijeme'],
y=df2['Snaga_DC'],
name='Snaga DC'
),
go.Scatter(
x=df2['Vrijeme'],
y=df2['Snaga_AC'],
name='Snaga AC'
)
],
layout=go.Layout(
title='Snaga (DC, AC)',
font=dict(color='#e98400'),
showlegend=True,
yaxis=dict(title='Snaga u W'),
xaxis=dict(title='Vrijeme'),
plot_bgcolor='#4c4c4a',
paper_bgcolor='#4c4c4a'
)
),
style={'height': 550},
id='my-graph'
)
])
elif tab == 'tab-2':
return html.Div([
html.H3('Podaci o prethodnoj proizvodnji',
style={'color':'rgb(255,255,255)',
'text-shadow':'1px 1px #000000',
'text-align': 'center',
'font-family':"Verdana"}),
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df2.columns],
data=df2.to_dict("rows"),
sorting=True,
style_cell={'textAlign': 'right',
'color':'rgb(255,255,255)',
'backgroundColor': '#4c4c4a'},
style_cell_conditional=[{'if': {'row_index': 'odd'},
'backgroundColor': '#5c5c5a'},
{'if': {'column_id': 'Vrijeme'},
'textAlign': 'center'},
],
style_header={'backgroundColor': '#4c4c4a',
'fontWeight': 'bold',
'textAlign':'center'},
style_data={'whiteSpace': 'normal'},
css=[{'selector': '.dash-cell div.dash-cell-value',
'rule': 'display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;'}]
)
])
@app.callback(dash.dependencies.Output('download-link', 'href'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')])
def update_download_link(start_date, end_date):
conn = mysql.connect(host='pfw0ltdr46khxib3.cbetxkdyhwsb.us-east-1.rds.amazonaws.com',
user='kfd7pprqwrvy9uep',
password='zvg9opaacxqy4mmu',
db='oha3los99548olek')
query = "SELECT * FROM elektrana WHERE Vrijeme >= '{} 04:00:00' AND Vrijeme <= '{} 22:00:00'".format(start_date, end_date)
dff = pd.read_sql(query, conn)
csv_string = dff.to_csv(encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
csv_string = "data:text/csv;charset=utf-8," + urllib.quote(csv_string)
return csv_string
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
if __name__ == '__main__':
app.run_server(debug=True, processes=4) |
from ..forms import UniversityForm
from ..models import University
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
# Просмотр списка университетов
class UniversityListView(generic.ListView):
model = University
template_name = '../catalog/university_list.html'
paginate_by = 10
def get_queryset(self):
return University.objects.order_by('name_of_university')
class UniversityDetailView(generic.DetailView):
model = University
template_name = '../templates/catalog/university_detail.html'
class UniversityCreate(CreateView):
model = University
form_class = UniversityForm
class UniversityUpdate(UpdateView):
model = University
form_class = UniversityForm
class UniversityDelete(DeleteView):
model = University
success_url = reverse_lazy('departments')
|
from django.conf.urls import patterns, include, url
from django.views import generic
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', include('stories.urls')),
url(r'^success$', generic.TemplateView.as_view(template_name="about.html"),
name='about'),
url(r'^about$', generic.TemplateView.as_view(template_name="success.html"),
name='story_success'),
(r'^ckeditor/', include('ckeditor.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from orders.products.views import ListProductsView
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^test/', 'Prometheus.views.test', name='test'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^orders/new/?$', ListProductsView.as_view(), name='product_selector'),
url(r'^orders/new/', include('orders.products.urls')),
url(r'^orders/', include('orders.urls')),)
|
# coding: utf-8
from math import sin, cos, sqrt
from six.moves import xrange
import h5py
import numpy as np
import scipy.io as sio
import tensorflow as tf
class Id:
def __init__(self, Ia, Ib):
self.Ia=Ia
self.Ib=Ib
def show(self):
print('A: %s\nB: %s'%(self.Ia, self.Ib))
def convlayer_pooling(input_feature, input_dim, output_dim, nb, cotw, name='meshconv', training=True,
special_activation=True,
no_activation=False, bn=True):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
padding_feature = tf.zeros([tf.shape(input_feature)[0], 1, input_dim], tf.float32)
padded_input = tf.concat([padding_feature, input_feature], 1)
def compute_nb_feature(input_f):
return tf.gather(input_f, nb) * cotw
total_nb_feature = tf.map_fn(compute_nb_feature, padded_input)
mean_nb_feature = tf.reduce_sum(total_nb_feature, axis=2)
nb_weights = tf.get_variable("nb_weights", [input_dim, output_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
nb_bias = tf.get_variable("nb_bias", [output_dim], tf.float32, initializer=tf.constant_initializer(0.0))
nb_feature = tf.tensordot(mean_nb_feature, nb_weights, [[2], [0]]) + nb_bias
edge_weights = tf.get_variable("edge_weights", [input_dim, output_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
edge_bias = tf.get_variable("edge_bias", [output_dim], tf.float32, initializer=tf.constant_initializer(0.0))
edge_feature = tf.tensordot(input_feature, edge_weights, [[2], [0]]) + edge_bias
total_feature = edge_feature + nb_feature
if not bn:
fb = total_feature
else:
fb = batch_norm_wrapper(total_feature, is_training=training)
if no_activation:
fa = fb
print('no activation')
elif special_activation == 'sigmoid':
fa = 2.0*tf.nn.sigmoid(fb)-1.0
print('sigmoid')
elif special_activation == 'l_relu':
fa = leaky_relu(fb)
print('l_relu')
elif special_activation == 'softsign':
fa = tf.nn.softsign(fb)
print('softsign')
elif special_activation == 'softplusplus':
fa = softplusplus(fb)
print('softplusplus')
else:
fa = tf.nn.tanh(fb)
print('tanh')
return fa
def newconvlayer_pooling(input_feature, input_dim, output_dim, nb_weights, edge_weights, nb, cotw,
name='meshconvpooling',
training=True, special_activation='fuck', no_activation=False, bn=True):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
padding_feature = tf.zeros([tf.shape(input_feature)[0], 1, input_dim], tf.float32)
padded_input = tf.concat([padding_feature, input_feature], 1)
def compute_nb_feature(input_f):
return tf.gather(input_f, nb) * cotw
total_nb_feature = tf.map_fn(compute_nb_feature, padded_input)
mean_nb_feature = tf.reduce_sum(total_nb_feature, axis=2)
nb_bias = tf.get_variable("nb_bias", [output_dim], tf.float32, initializer=tf.constant_initializer(0.0))
nb_feature = tf.tensordot(mean_nb_feature, nb_weights, [[2], [0]]) + nb_bias
edge_bias = tf.get_variable("edge_bias", [output_dim], tf.float32, initializer=tf.constant_initializer(0.0))
edge_feature = tf.tensordot(input_feature, edge_weights, [[2], [0]]) + edge_bias
total_feature = edge_feature + nb_feature
if not bn:
fb = total_feature
else:
fb = batch_norm_wrapper(total_feature, is_training=training)
# if no_activation:
# fa = fb
# elif not special_activation:
# fa = leaky_relu(fb)
# else:
# fa = tf.nn.tanh(fb)
if no_activation:
fa = fb
print('no activation')
elif special_activation == 'sigmoid':
fa = 2.0*tf.nn.sigmoid(fb)-1.0
print('sigmoid')
elif special_activation == 'l_relu':
fa = leaky_relu(fb)
print('l_relu')
elif special_activation == 'softsign':
fa = tf.nn.softsign(fb)
print('softsign')
elif special_activation == 'softplusplus':
fa = softplusplus(fb)
print('softplusplus')
else:
fa = tf.nn.tanh(fb)
print('tanh')
return fa
def get_conv_weights(input_dim, output_dim, name='convweight'):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
n = tf.get_variable("nb_weights", [input_dim, output_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
e = tf.get_variable("edge_weights", [input_dim, output_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
return n, e
def get_conv_weights_diag(input_dim, output_dim, name='convweight'):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
n = tf.get_variable("nb_weights", [input_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
e = tf.get_variable("edge_weights", [input_dim], tf.float32,
tf.random_normal_initializer(stddev=0.02))
return tf.diag(n), tf.diag(e)
def load_data(path, resultmin, resultmax, useS=True, graphconv=False):
data = h5py.File(path)
datalist = data.keys()
# data = sio.loadmat(path)
# .value.astype('float32')
logr = np.transpose(data['FLOGRNEW'], (2, 1, 0))
s = np.transpose(data['FS'], (2, 1, 0))
# neighbour = data['neighbour']
neighbour1 = np.transpose(data['neighbour1'])
cotweight1 = np.transpose(data['cotweight1'])
# sio.savemat('1.mat',{'logr':logr.astype('float32')})
# f = np.zeros_like(logr).astype('float32')
# f = logr
# sio.savemat('2.mat', {'logr2': f})
# demapping = np.transpose(data['demapping'])
# degree = data['degrees']
pointnum1 = neighbour1.shape[0]
maxdegree1 = neighbour1.shape[1]
modelnum = len(logr)
logrmin = logr.min()
logrmin = logrmin - 1e-6
logrmax = logr.max()
logrmax = logrmax + 1e-6
smin = s.min()
smin = smin - 1e-6
smax = s.max()
smax = smax + 1e-6
rnew = (resultmax - resultmin) * (logr - logrmin) / (logrmax - logrmin) + resultmin
snew = (resultmax - resultmin) * (s - smin) / (smax - smin) + resultmin
if useS:
feature = np.concatenate((rnew, snew), axis=2)
else:
feature = rnew
f = np.zeros_like(feature).astype('float32')
f = feature
nb1 = np.zeros((pointnum1, maxdegree1)).astype('float32')
nb1 = neighbour1
L1 = np.zeros((pointnum1, pointnum1)).astype('float32')
cotw1 = np.zeros((cotweight1.shape[0], cotweight1.shape[1], 1)).astype('float32')
for i in range(1):
cotw1[:, :, i] = cotweight1
degree1 = np.zeros((neighbour1.shape[0], 1)).astype('float32')
for i in range(neighbour1.shape[0]):
degree1[i] = np.count_nonzero(nb1[i])
return f, nb1, degree1, logrmin, logrmax, smin, smax, modelnum, pointnum1, maxdegree1, L1, cotw1
def bce(o, t):
o = tf.clip_by_value(o, 1e-7, 1. - 1e-7)
return tf.reduce_mean(-(t * tf.log(o) + (1. - t) * tf.log(1. - o)))
def loss_abs(a, b):
return tf.reduce_mean(tf.abs(tf.subtract(a, b)))
def loss_mse(a, b):
return tf.pow(tf.reduce_mean(tf.square(tf.subtract(a, b))), 0.5)
def load_data_old(path, result_max, result_min):
data = sio.loadmat(path)
datalist = data.keys()
dismat = data['dismat']
if 'lz_a' in datalist:
metric_lz_a = data['lz_a']
metric_lz_a = np.zeros_like(metric_lz_a).astype('float32')
metric_lz_a = metric_lz_a.astype('float32')
if 'lz_b' in datalist:
metric_lz_b = data['lz_b']
metric_lz_b = np.zeros_like(metric_lz_b).astype('float32')
metric_lz_b = metric_lz_b.astype('float32')
x = np.zeros_like(data).astype('float32')
x = dismat.astype('float32')
# x_min = x.min(axis = 0)
x_min = x.min()
x_min = x_min - 1e-6
# x_max = x.max(axis = 0)
x_max = x.max()
x_max = x_max + 1e-6
x = (result_max - result_min) * (x - x_min) / (x_max - x_min) + result_min
if 'lz_a' in datalist and 'lz_b' in datalist:
a=1
else:
metric_lz_a = np.array([])
metric_lz_b = np.array([])
return x, x_min, x_max, metric_lz_a, metric_lz_b
def load_lfd(path, std=100.0):
data = sio.loadmat(path)
datalist = data.keys()
dismat = data['dismat']
if 'lz_a' in datalist:
metric_lz_a = data['lz_a']
metric_lz_a = np.zeros_like(metric_lz_a).astype('float32')
metric_lz_a = metric_lz_a.astype('float32')
if 'lz_b' in datalist:
metric_lz_b = data['lz_b']
metric_lz_b = np.zeros_like(metric_lz_b).astype('float32')
metric_lz_b = metric_lz_b.astype('float32')
x = np.zeros_like(data).astype('float32')
x = dismat.astype('float32')
# x_min = x.min(axis = 0)
x_mean = x.mean()
x = (x-x_mean)/std
# x = (result_max - result_min) * (x - x_min) / (x_max - x_min) + result_min
if 'lz_a' in datalist and 'lz_b' in datalist:
a=1
else:
metric_lz_a = np.array([])
metric_lz_b = np.array([])
return x, x_min, x_max, metric_lz_a, metric_lz_b
def recover_lfd(dis, mean, std):
dis = dis * std + mean
# dis = (dismax - dismin) * (dis - resultmin) / (resultmax - resultmin) + dismin
return dis
def recover_data_old(dis, dismin, dismax, resultmin, resultmax):
dis = (dismax - dismin) * (dis - resultmin) / (resultmax - resultmin) + dismin
return dis
def recover_data(recover_feature, logrmin, logrmax, smin, smax, resultmin, resultmax, useS=True):
logr = recover_feature[:, :, 0:3]
logr = (logrmax - logrmin) * (logr - resultmin) / (resultmax - resultmin) + logrmin
# feature=[]
if useS:
s = recover_feature[:, :, 3:9]
s = (smax - smin) * (s - resultmin) / (resultmax - resultmin) + smin
logr = np.concatenate((logr, s), axis=2)
return logr
# -----------------------------------------------------------conv operation
def leaky_relu(input_, alpha=0.02):
return tf.maximum(input_, alpha * input_)
def softplusplus(input_, alpha=0.02):
return tf.log(1.0+tf.exp(input_*(1.0-alpha)))+alpha*input_-tf.log(2.0)
def linear_l2(input_, input_size, output_size, name='Linear', stddev=0.02, bias_start=0.0):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
matrix = tf.get_variable("weights", [input_size, output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias, matrix
def linear(input_, input_size, output_size, name='Linear', stddev=0.02, bias_start=0.0):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
matrix = tf.get_variable("weights", [input_size, output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def linear1(input_, matrix, output_size, name='Linear', stddev=0.02, bias_start=0.0):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
# matrix = tf.get_variable("weights", [input_size, output_size], tf.float32,
# tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def batch_norm_wrapper(inputs, name='batch_norm', is_training=False, decay=0.9, epsilon=1e-5):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
if is_training == True:
scale = tf.get_variable('scale', dtype=tf.float32, trainable=True,
initializer=tf.ones([inputs.get_shape()[-1]], dtype=tf.float32))
beta = tf.get_variable('beta', dtype=tf.float32, trainable=True,
initializer=tf.zeros([inputs.get_shape()[-1]], dtype=tf.float32))
pop_mean = tf.get_variable('overallmean', dtype=tf.float32, trainable=False,
initializer=tf.zeros([inputs.get_shape()[-1]], dtype=tf.float32))
pop_var = tf.get_variable('overallvar', dtype=tf.float32, trainable=False,
initializer=tf.ones([inputs.get_shape()[-1]], dtype=tf.float32))
else:
scope.reuse_variables()
scale = tf.get_variable('scale', dtype=tf.float32, trainable=True)
beta = tf.get_variable('beta', dtype=tf.float32, trainable=True)
pop_mean = tf.get_variable('overallmean', dtype=tf.float32, trainable=False)
pop_var = tf.get_variable('overallvar', dtype=tf.float32, trainable=False)
if is_training == True:
axis = list(range(len(inputs.get_shape()) - 1))
batch_mean, batch_var = tf.nn.moments(inputs, axis)
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, scale, epsilon)
else:
return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, epsilon)
def fclayer(inputs, input_dim, output_dim, name='fclayer', training=True):
with tf.variable_scope(name) as scope:
scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0))
fcdot = linear(inputs, input_dim, output_dim, 'fclinear')
fcbn = batch_norm_wrapper(fcdot, name='fcbn', is_training=training, decay=0.9)
fca = leaky_relu(fcbn)
return fca
# ------------------------------------------------------prior_factory.py------------------------
def onehot_categorical(batch_size, n_labels):
y = np.zeros((batch_size, n_labels), dtype=np.float32)
indices = np.random.randint(0, n_labels, batch_size)
for b in range(batch_size):
y[b, indices[b]] = 1
return y
def uniform(batch_size, n_dim, n_labels=10, minv=-1, maxv=1, label_indices=None):
if label_indices is not None:
if n_dim != 2:
raise Exception("n_dim must be 2.")
def sample(label, n_labels):
num = int(np.ceil(np.sqrt(n_labels)))
size = (maxv - minv) * 1.0 / num
x, y = np.random.uniform(-size / 2, size / 2, (2,))
i = label / num
j = label % num
x += j * size + minv + 0.5 * size
y += i * size + minv + 0.5 * size
return np.array([x, y]).reshape((2,))
z = np.empty((batch_size, n_dim), dtype=np.float32)
for batch in range(batch_size):
for zi in range(int(n_dim / 2)):
z[batch, zi * 2:zi * 2 + 2] = sample(label_indices[batch], n_labels)
else:
z = np.random.uniform(minv, maxv, (batch_size, n_dim)).astype(np.float32)
return z
def gaussian(batch_size, n_dim, mean=0, var=1, n_labels=10, use_label_info=False):
if use_label_info:
if n_dim != 2:
raise Exception("n_dim must be 2.")
def sample(n_labels):
x, y = np.random.normal(mean, var, (2,))
angle = np.angle((x - mean) + 1j * (y - mean), deg=True)
label = (int(n_labels * angle)) // 360
if label < 0:
label += n_labels
return np.array([x, y]).reshape((2,)), label
z = np.empty((batch_size, n_dim), dtype=np.float32)
z_id = np.empty((batch_size, 1), dtype=np.int32)
for batch in range(batch_size):
for zi in range(int(n_dim / 2)):
a_sample, a_label = sample(n_labels)
z[batch, zi * 2:zi * 2 + 2] = a_sample
z_id[batch] = a_label
return z, z_id
else:
z = np.random.normal(mean, var, (batch_size, n_dim)).astype(np.float32)
return z
def gaussian_mixture(batch_size, n_dim=2, n_labels=10, x_var=0.5, y_var=0.1, label_indices=None):
if n_dim != 2:
raise Exception("n_dim must be 2.")
def sample(x, y, label, n_labels):
shift = 1.4
r = 2.0 * np.pi / float(n_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x = np.random.normal(0, x_var, (batch_size, int(n_dim / 2)))
y = np.random.normal(0, y_var, (batch_size, int(n_dim / 2)))
z = np.empty((batch_size, n_dim), dtype=np.float32)
for batch in range(batch_size):
for zi in range(int(n_dim / 2)):
if label_indices is not None:
z[batch, zi * 2:zi * 2 + 2] = sample(x[batch, zi], y[batch, zi], label_indices[batch], n_labels)
else:
z[batch, zi * 2:zi * 2 + 2] = sample(x[batch, zi], y[batch, zi], np.random.randint(0, n_labels),
n_labels)
return z
|
import math
def solve(k, c, s):
tries = int(math.ceil(k / float(c)))
if s < tries:
return "IMPOSSIBLE"
indexes = []
curK = 0
for i in range(0, tries):
ind = 0
mult = k**(c-1)
for j in range(0, c):
ind += curK * mult
mult /= k
curK += 1
if curK >= k:
break
oldInd = ind
ind = min(ind, k**c - 1)
if oldInd != ind:
print "##############" + str(oldInd - ind) + "############"
indexes.append(str(ind + 1))
return " ".join(indexes)
name = "storage/emulated/0/codejam/D-large"
fi = open(name + ".in", "r")
fout = open(name + ".out", "w")
numTestCases = int(fi.readline())
print "#TestCases: ", numTestCases
for i in range(0, numTestCases):
line = fi.readline().strip().split(" ")
line = map(int, line)
k = line[0]
c = line[1]
s = line[2]
fout.write("Case #" + str(i + 1) + ": " + solve(k, c, s) + "\n")
print "Case #" + str(i + 1) + ": " + solve(k, c, s)
fi.close()
fout.close() |
import pandas as pd, numpy as np
import re, sys, os
from matplotlib import pyplot as plt
def parse(file_path, pattern="Accuracy Score"):
scores = []
count = 0
print (file_path)
with open(file_path, 'r') as f:
line = f.readline()
while line:
count = count + 1
# print ("count : {0}".format(count))
match = re.search(pattern + "\s*:* (\d*.\d*)", line)
if match:
score = float(match.group(1))
# print ("score : {0}".format(score))
scores.append(score)
line = f.readline()
return scores
def plot():
xgboost_scores = parse("./results/XGBoost/xgboost_output.txt")
svc_scores = parse("./results/svc/svc_output_ec2.txt")
neural_network_scores = parse("./results/neural_network/neural_network_output.txt")
fig = plt.figure(figsize=[15,10])
ax = plt.axes()
plt.boxplot([xgboost_scores, svc_scores, neural_network_scores], whis='range',\
usermedians=[0.024210444278385725, 0.03926197570479028, 0.06511743051130295])
plt.ylim(-0.1, 1)
plt.title("Box Plot of Accuracy Score", fontsize=20)
ax.set_xticklabels(['xgboost', 'svc', 'neural network'], fontsize=20)
fig.savefig('./graphs/box_plot_accuracy.png')
plt.show()
xgboost_mse = parse("./results/XGBoost/xgboost_output.txt", "Mean Absolute Error")
prophet_mse = parse("./results/prophet/prophet_output.txt", "Metric mse")
multi_regression_mse = parse("./results/multi_regression/multi_regression_output.txt", "The MSE of prediction is")
fig = plt.figure(figsize=[20,20])
ax = plt.axes()
plt.boxplot([xgboost_mse, prophet_mse, multi_regression_mse], whis='range',\
usermedians=[1.1064039558425396, 0.29742817421625595, 43.75182238704884])
#plt.ylim(-0.1, 2)
plt.title("Box Plot of Mean Square Error", fontsize=20)
ax.set_xticklabels(['xgboost', 'prophet', 'multivariate regression'], fontsize=20)
fig.savefig("./graphs/box_plot_mse.png")
plt.show()
if __name__ == "__main__":
# print (os.getcwd())
scores = parse("./results/neural_network/neural_network_output.txt", "Accuracy Score")
print (min(scores))
plot() |
'''
Get python modules
'''
import cPickle
import os
import csv
'''
Get third-party modules
'''
from fisher import pvalue as fisher
'''
Get MOCA modules
'''
from MOCA.DataHandler import get_path, get_supervised_dataset
from MOCA.Statistics import Performance, contingency_table, EffectSize
from MOCA.Setworks import assemble_setwork
from DataHandler import load_results_file
def validate_markers(Arguments):
'''
'''
Labels, Features, Variates, Phenotypes, Markers = get_supervised_dataset(Arguments)
Header, Results = load_results_file(get_path("ValidateMarkers"))
CSVfile = open(Arguments.Filename + ".csv", "wb")
CSVwriter = csv.writer(CSVfile, dialect='excel')
CSVwriter.writerow(["Union","Intersection","Difference","Interaction", "Phenotype", "P-value", "Odds Ratio", "Effect Size",
"Sensitivity","Specificity","PPV","NPV","Accuracy", "MCC", "Sample Count", "Case Count"])
for Phenotype in Phenotypes:
Response = Variates[Features.index(Phenotype)]
for Marker in Results:
try:
Predictor = assemble_setwork(Features, Variates,
filter(None, Marker[Header.index("Union")].split(", ")),
filter(None, Marker[Header.index("Intersection")].split(", ")),
filter(None, Marker[Header.index("Difference")].split(", ")), Arguments)
TP,FP,FN,TN = contingency_table(Predictor, Response, NA=Arguments.NA)
performance = Performance(Marker[Header.index("Interaction")], TP,FP,FN,TN)
effect_size = EffectSize(Marker[Header.index("Interaction")], TP,FP,FN,TN)
CSVwriter.writerow([Marker[Header.index("Union")], Marker[Header.index("Intersection")], Marker[Header.index("Difference")],
Marker[Header.index("Interaction")], Phenotype[:Phenotype.index(":")], "%0.2e" %fisher(TP,FP,FN,TN).two_tail,
"%0.2f" %effect_size.odds_ratio, "%0.2f" %effect_size.difference_of_proportions, "%0.2f" %performance.sensitivity,
"%0.2f" %performance.specificity, "%0.2f" %performance.PPV, "%0.2f" %performance.NPV,
"%0.2f" %performance.accuracy, "%0.2f" %performance.MCC, TP+FP+FN+TN, TP+FN])
except ValueError:
CSVwriter.writerow([Marker[Header.index("Union")], Marker[Header.index("Intersection")], Marker[Header.index("Difference")], "NA"])
CSVfile.close()
return
|
import asyncio
import json
from asyncio import CancelledError
from copy import copy
from decimal import Decimal
from unittest import TestCase
from unittest.mock import patch
from aioresponses import aioresponses
import hummingbot.connector.parrot as parrot
class ParrotConnectorUnitTest(TestCase):
# logging.Level required to receive logs from the data source logger
level = 0
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
def setUp(self) -> None:
super().setUp()
self.log_records = []
parrot.logger().setLevel(1)
parrot.logger().addHandler(self)
self.campaigns_get_resp = {"status": "success", "campaigns": [
{"id": 1, "campaign_name": "zilliqa", "link": "https://zilliqa.com/index.html", "markets": [
{"market_id": 1, "exchange_name": "binance", "base_asset": "ZIL", "quote_asset": "USDT",
"base_asset_full_name": "zilliqa", "quote_asset_full_name": "tether", "trading_pair": "ZILUSDT",
"return": 1.4600818845692998, "last_snapshot_ts": 1592263560000,
"last_snapshot_volume": 7294.967867187001, "trailing_1h_volume": 525955.48182515,
"hourly_payout_usd": 1.488095238095238, "bots": 10, "last_hour_bots": 14, "filled_24h_volume": 0.0,
"market_24h_usd_volume": 0.0},
]}]}
self.expected_campaign_no_markets = parrot.CampaignSummary(market_id=1, trading_pair='ZIL-USDT',
exchange_name='binance', spread_max=Decimal('0'),
payout_asset='', liquidity=Decimal('0'),
liquidity_usd=Decimal('0'), active_bots=0,
reward_per_wk=Decimal('0'), apy=Decimal('0'))
self.expected_campaign_w_markets = parrot.CampaignSummary(market_id=1, trading_pair='ZIL-USDT',
exchange_name='binance', spread_max=Decimal('0.02'),
payout_asset='ZIL', liquidity=Decimal('0'),
liquidity_usd=Decimal('0'), active_bots=15,
reward_per_wk=Decimal('205930.0'), apy=Decimal('0'))
self.expected_campaign_32_markets = {
32: parrot.CampaignSummary(market_id=32, trading_pair='ALGO-USDT', exchange_name='binance',
spread_max=Decimal('0.015'), payout_asset='ALGO', liquidity=Decimal('0'),
liquidity_usd=Decimal('0'), active_bots=18, reward_per_wk=Decimal('341.0'),
apy=Decimal('0'))}
self.markets_get_resp = {"status": "success", "markets": [
{"base_asset": "ZIL",
"base_asset_full_name": "zilliqa", "exchange_name": "binance",
"market_id": 1, "quote_asset": "USDT", "quote_asset_full_name": "tether", "trading_pair": "ZIL/USDT",
"base_asset_address": "", "quote_asset_address": "",
"active_bounty_periods": [
{"bounty_period_id": 2396, "bounty_campaign_id": 38, "bounty_campaign_name": "dafi",
"bounty_campaign_link": "https://zilliqa.com/index.html", "start_timestamp": 1657584000000,
"end_timestamp": 1658188800000, "budget": {"bid": 102965.0, "ask": 102965.0}, "spread_max": 2.0,
"payout_asset": "ZIL"}], "return": 8.694721275945772, "last_snapshot_ts": 1657812180000,
"last_snapshot_volume": 3678.5291375, "trailing_1h_volume": 261185.66037849995,
"hourly_payout_usd": 4.317788244047619, "bots": 15, "last_hour_bots": 18, "filled_24h_volume": 6816.23476,
"weekly_reward_in_usd": 751.8118232323908, "weekly_reward": {"ZIL": 205735.9191468253},
"has_user_bots": 'false', "market_24h_usd_volume": 0.0}]}
self.get_fail = {"status": "error", "message": "ERROR message"}
self.snapshot_get_resp = {"status": "success", "market_snapshot": {"market_id": 32, "timestamp": 1657747860000,
"last_snapshot_ts": 1657747864000,
"annualized_return": 0.4026136989303596,
"payout_summary": {"open_volume": {
"reward": {
"ask": {"ALGO": 0.01691468253968254},
"bid": {
"ALGO": 0.01691468253968254}},
"reward_profoma": {
"ask": {"ALGO": 0.01691468253968254},
"bid": {
"ALGO": 0.01691468253968254}},
"payout_asset_usd_rate": {
"ALGO": 0.30415},
"total_hourly_payout_usd": 0.6173520833333332},
"filled_volume": {}},
"summary_stats": {"open_volume": {
"ask": {"accumulated_roll_over": 0},
"bid": {"accumulated_roll_over": 0},
"bots": 17, "oov_ask": 16274,
"oov_bid": 31099, "bots_ask": 14,
"bots_bid": 9,
"spread_ask": 0.29605111465204803,
"spread_bid": 0.33684674006707405,
"last_hour_bots": 19,
"oov_eligible_ask": 16156,
"oov_eligible_bid": 26059,
"last_hour_bots_ask": 16,
"last_hour_bots_bid": 15,
"base_asset_usd_rate": 0.30415,
"quote_asset_usd_rate": 1},
"filled_volume": {}}},
"user_snapshot": {"timestamp": 1657747860000, "is_default": True,
"rewards_summary": {"ask": {}, "bid": {}},
"summary_stats": {"oov_ask": 0, "oov_bid": 0, "reward_pct": 0,
"spread_ask": -1, "spread_bid": -1,
"reward": {"ask": {}, "bid": {}},
"reward_profoma": {"ask": {}, "bid": {}},
"open_volume_pct": 0, "oov_eligible_ask": 0,
"oov_eligible_bid": 0}},
"market_mid_price": 0.30415}
self.expected_snapshots_bad_timestamp = {"status": "error",
"message": "Data not available for timestamp 1657747860000."}
self.expected_snapshots_error = {"status": "error", "message": "404: Not Found"}
self.expected_summary = {
'ALGO-USDT': parrot.CampaignSummary(market_id=32, trading_pair='ALGO-USDT', exchange_name='binance',
spread_max=Decimal('0.015'), payout_asset='ALGO',
liquidity=Decimal('42215'),
liquidity_usd=Decimal('12839.69224999999898390035113'), active_bots=17,
reward_per_wk=Decimal('341.0'),
apy=Decimal('0.40261369893035958700266974119585938751697540283203125'))}
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage().startswith(message)
for record in self.log_records)
@aioresponses()
def test_get_active_campaigns_empty_markets(self, mocked_http):
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}campaigns", body=json.dumps(self.campaigns_get_resp))
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(""))
campaigns = self.ev_loop.run_until_complete(parrot.get_active_campaigns("binance"))
self.assertEqual({1: self.expected_campaign_no_markets}, campaigns)
self.assertTrue(self._is_logged("WARNING",
"Could not get active markets from Hummingbot API"
" (returned response '')."))
@aioresponses()
def test_get_active_campaigns_failed_markets(self, mocked_http):
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}campaigns", body=json.dumps(self.campaigns_get_resp))
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.get_fail))
campaigns = self.ev_loop.run_until_complete(parrot.get_active_campaigns("binance"))
self.assertEqual({1: self.expected_campaign_no_markets}, campaigns)
self.assertTrue(self._is_logged("WARNING",
"Could not get active markets from Hummingbot API"
f" (returned response '{self.get_fail}')."))
@aioresponses()
def test_get_active_campaigns_markets_wrong_id(self, mocked_http):
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}campaigns", body=json.dumps(self.campaigns_get_resp))
market_wrong_id = self.markets_get_resp
market_wrong_id["markets"][0]["market_id"] = 10
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(market_wrong_id))
campaigns = self.ev_loop.run_until_complete(parrot.get_active_campaigns("binance"))
self.assertEqual({1: self.expected_campaign_no_markets}, campaigns)
self.assertFalse(self._is_logged("WARNING",
"Could not get active markets from Hummingbot API"
f" (returned response '{self.get_fail}')."))
@aioresponses()
def test_get_active_campaigns_markets(self, mocked_http):
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}campaigns", body=json.dumps(self.campaigns_get_resp))
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.markets_get_resp))
campaigns = self.ev_loop.run_until_complete(parrot.get_active_campaigns("binance", ["ZIL-USDT"]))
self.assertEqual({1: self.expected_campaign_w_markets}, campaigns)
# def test_get_active_campaigns_markets_live(self):
# campaigns = self.ev_loop.run_until_complete(parrot.get_active_campaigns("binance", ["ALGO-USDT"]))
# self.assertEqual({1: self.expected_campaign_w_markets}, campaigns)
@aioresponses()
def test_get_active_markets(self, mocked_http):
mocked_http.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.markets_get_resp))
campaigns = {1: copy(self.expected_campaign_no_markets)}
campaigns = self.ev_loop.run_until_complete(parrot.get_active_markets(campaigns))
self.assertNotEqual({1: self.expected_campaign_no_markets}, campaigns)
self.assertEqual({1: self.expected_campaign_w_markets}, campaigns)
@aioresponses()
def test_get_market_snapshots(self, mocked_http):
market_id = 32
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}charts/market_band?chart_interval=1&market_id={market_id}",
body=json.dumps({"status": "success", "data": [
{"timestamp": 1662589860000, "price": 0.30005, "ask": 0.301145, "bid": 0.298362,
"spread_ask": 0.3647958323482506, "spread_bid": 0.5624147716913023, "liquidity": 32932.5255}]}))
snapshot = self.ev_loop.run_until_complete(parrot.get_market_snapshots(market_id))
self.assertEqual({'data': [{'ask': 0.301145,
'bid': 0.298362,
'liquidity': 32932.5255,
'price': 0.30005,
'spread_ask': 0.3647958323482506,
'spread_bid': 0.5624147716913023,
'timestamp': 1662589860000}],
'status': 'success'}, snapshot)
@aioresponses()
def test_get_market_snapshots_returns_none(self, mocked_http):
market_id = 32
# 'status' == "error"
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}charts/market_band?chart_interval=1&market_id={market_id}",
body=json.dumps({"status": "error", "data": []}))
snapshot = self.ev_loop.run_until_complete(parrot.get_market_snapshots(market_id))
self.assertEqual(None, snapshot)
# No 'status' field
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}charts/market_band?chart_interval=1&market_id={market_id}",
body=json.dumps({"data": []}))
snapshot = self.ev_loop.run_until_complete(parrot.get_market_snapshots(market_id))
self.assertEqual(None, snapshot)
# JSON resp is None
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}charts/market_band?chart_interval=1&market_id={market_id}",
body=json.dumps(None))
snapshot = self.ev_loop.run_until_complete(parrot.get_market_snapshots(market_id))
self.assertEqual(None, snapshot)
@aioresponses()
def test_get_market_last_snapshot(self, mocked_http):
market_id = 32
timestamp = 1662589860000
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}user/single_snapshot?aggregate_period=1m&market_id={market_id}×tamp={timestamp}",
body=json.dumps(self.snapshot_get_resp))
with patch("hummingbot.connector.parrot.get_market_snapshots") as mocked_snapshots:
mocked_snapshots.return_value = {"status": "success", "data": [{"timestamp": timestamp}]}
snapshot = self.ev_loop.run_until_complete(parrot.get_market_last_snapshot(market_id))
self.assertEqual(self.snapshot_get_resp, snapshot)
# This test is likely to fail with time as the data will change
# def test_get_market_last_snapshot_live(self):
# market_id = 32
# snapshot = self.ev_loop.run_until_complete(parrot.get_market_last_snapshot(market_id))
# self.assertEqual(self.snapshot_get_resp, snapshot)
@aioresponses()
def test_get_campaign_summary(self, mocked_http):
timestamp = 16577478600000
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}user/single_snapshot?aggregate_period=1m&market_id={32}×tamp={timestamp}",
body=json.dumps(self.snapshot_get_resp))
with patch("hummingbot.connector.parrot.get_market_snapshots") as mocked_snapshots:
mocked_snapshots.return_value = {"status": "success", "data": [{"timestamp": timestamp}]}
with patch('hummingbot.connector.parrot.get_active_campaigns') as mocked_ac:
mocked_ac.return_value = self.expected_campaign_32_markets
summary = self.ev_loop.run_until_complete(parrot.get_campaign_summary("binance", ["ALGO-USDT"]))
self.assertEqual(self.expected_summary, summary)
@aioresponses()
def test_get_campaign_summary_http_error(self, mocked_http):
timestamp = 16577478600000
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}user/single_snapshot?market_id={32}×tamp={timestamp}&aggregate_period=1m",
body=json.dumps(self.snapshot_get_resp))
with patch('hummingbot.connector.parrot.get_active_campaigns') as mocked_ac:
with patch("hummingbot.connector.parrot.get_market_snapshots") as mocked_snapshots:
mocked_snapshots.return_value = {"status": "success", "data": [{"timestamp": timestamp}]}
with patch('hummingbot.connector.parrot.get_market_snapshots') as mocked_ss:
mocked_ac.return_value = self.expected_campaign_32_markets
mocked_ss.return_value = self.expected_snapshots_error
summary = self.ev_loop.run_until_complete(parrot.get_campaign_summary("binance", ["ALGO-USDT"]))
# No snapshot, just dict re-arrangement
self.assertEqual({}, summary)
self.assertTrue(self._is_logged("ERROR", "Unexpected error while requesting data from Hummingbot API."))
@aioresponses()
def test_get_campaign_summary_exception(self, mocked_http):
mocked_http.get(
f"{parrot.PARROT_MINER_BASE_URL}user/single_snapshot?market_id={32}×tamp={-1}&aggregate_period=1m",
body=json.dumps(self.snapshot_get_resp))
with patch('hummingbot.connector.parrot.get_active_campaigns') as mocked_ac:
with patch('hummingbot.connector.parrot.get_market_snapshots') as mocked_ss:
with self.assertRaises(CancelledError):
mocked_ac.side_effect = asyncio.CancelledError
mocked_ss.return_value = self.expected_campaign_32_markets
self.ev_loop.run_until_complete(parrot.get_campaign_summary("binance", ["ALGO-USDT"]))
self.assertTrue(
self._is_logged("ERROR", "Unexpected error while requesting data from Hummingbot API."))
with self.assertRaises(CancelledError):
mocked_ac.return_value = self.expected_campaign_32_markets
mocked_ss.side_effect = asyncio.CancelledError
self.ev_loop.run_until_complete(parrot.get_campaign_summary("binance", ["ALGO-USDT"]))
self.assertTrue(
self._is_logged("ERROR", "Unexpected error while requesting data from Hummingbot API."))
@aioresponses()
def test_retrieve_active_campaigns_error_is_logged(self, mock_api):
resp = {"status": "error", "message": "Rate limit exceeded: 10 per 1 minute"}
mock_api.get(f"{parrot.PARROT_MINER_BASE_URL}campaigns", body=json.dumps(resp))
mock_api.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(resp))
campaigns = asyncio.get_event_loop().run_until_complete(
parrot.get_active_campaigns(
exchange="binance",
trading_pairs=["COINALPHA-HBOT"]))
self.assertEqual(0, len(campaigns))
self.assertTrue(self._is_logged("WARNING",
"Could not get active campaigns from Hummingbot API"
f" (returned response '{resp}')."))
@aioresponses()
def test_active_campaigns_are_filtered_by_token_pair(self, mock_api):
url = f"{parrot.PARROT_MINER_BASE_URL}campaigns"
resp = {
"status": "success",
"campaigns": [{
"id": 26,
"campaign_name": "xym",
"link": "https://symbolplatform.com/",
"markets": [{
"market_id": 62,
"trading_pair": "XYM-BTC",
"exchange_name": "kucoin",
"base_asset": "XYM",
"base_asset_full_name": "symbol",
"quote_asset": "BTC",
"quote_asset_full_name": "bitcoin"}]},
{
"id": 27,
"campaign_name": "test",
"link": "https://symbolplatform.com/",
"markets": [{
"market_id": 63,
"trading_pair": "COINALPHA-HBOT",
"exchange_name": "kucoin",
"base_asset": "COINALPHA",
"base_asset_full_name": "coinalpha",
"quote_asset": "HBOT",
"quote_asset_full_name": "hbot"}]}]}
mock_api.get(url, body=json.dumps(resp))
mock_api.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.markets_get_resp))
campaigns = asyncio.get_event_loop().run_until_complete(
parrot.get_active_campaigns(
exchange="kucoin",
trading_pairs=["COINALPHA-HBOT"]))
self.assertEqual(1, len(campaigns))
campaign_summary: parrot.CampaignSummary = campaigns[63]
self.assertEqual("COINALPHA-HBOT", campaign_summary.trading_pair)
self.assertEqual("kucoin", campaign_summary.exchange_name)
self.assertEqual(Decimal("0"), campaign_summary.spread_max)
@aioresponses()
def test_active_campaigns_are_filtered_by_exchange_name(self, mock_api):
url = f"{parrot.PARROT_MINER_BASE_URL}campaigns"
resp = {
"status": "success",
"campaigns": [{
"id": 26,
"campaign_name": "xym",
"link": "https://symbolplatform.com/",
"markets": [{
"market_id": 62,
"trading_pair": "XYM-BTC",
"exchange_name": "ascendex",
"base_asset": "XYM",
"base_asset_full_name": "symbol",
"quote_asset": "BTC",
"quote_asset_full_name": "bitcoin"}],
"bounty_periods": [{
"id": 823,
"start_datetime": "2021-10-05T00:00:00",
"end_datetime": "2021-10-12T00:00:00",
"payout_parameters": [{
"id": 2212,
"market_id": 62,
"bid_budget": 1371.5,
"ask_budget": 1371.5,
"exponential_decay_function_factor": 8.0,
"spread_max": 1.5,
"payout_asset": "XYM"}]}]}]}
mock_api.get(url, body=json.dumps(resp))
mock_api.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.markets_get_resp))
campaigns = asyncio.get_event_loop().run_until_complete(
parrot.get_active_campaigns(
exchange="test_exchange",
trading_pairs=["XYM-BTC"]))
self.assertEqual(0, len(campaigns))
mock_api.get(url, body=json.dumps(resp))
mock_api.get(f"{parrot.PARROT_MINER_BASE_URL}markets", body=json.dumps(self.markets_get_resp))
campaigns = asyncio.get_event_loop().run_until_complete(
parrot.get_active_campaigns(
exchange="ascend_ex",
trading_pairs=["XYM-BTC"]))
self.assertEqual(1, len(campaigns))
@aioresponses()
def test_get_campaign_summary_logs_error_if_exception_happens(self, mock_api):
url = f"{parrot.PARROT_MINER_BASE_URL}campaigns"
mock_api.get(url, exception=Exception("Test error description"))
campaigns = asyncio.get_event_loop().run_until_complete(
parrot.get_campaign_summary(
exchange="test_exchange",
trading_pairs=["XYM-BTC"]))
self.assertEqual(0, len(campaigns))
self.assertTrue(self._is_logged("ERROR",
"Unexpected error while requesting data from Hummingbot API."))
def test_are_same_entity(self):
self.assertTrue(parrot.are_same_entity("ascend_ex", "ascendex"))
self.assertTrue(parrot.are_same_entity("ascend_ex", "ascend_ex"))
self.assertTrue(parrot.are_same_entity("gate_io", "gateio"))
self.assertFalse(parrot.are_same_entity("gate_io", "gateios"))
|
from assistance.smart_assistant import SmartAssistant
def main():
assistant = SmartAssistant()
assistant.hello()
while assistant.ready:
assistant.execute_cycle()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2016 Taylor C. Richberger <taywee@gmx.com>
# This code is released under the license described in the LICENSE file
from __future__ import division, absolute_import, print_function, unicode_literals
from datetime import datetime, timedelta
from ssllabs.chaincert import ChainCert
from ssllabs.object import Object
from ssllabs.util import objectornone
class Chain(Object):
'''object that can be used to access the chain of an endpoint, accessed from :meth:`ssllabs.endpointdetails.EndpointDetails.chain`'''
def __init__(self, data):
self.__certs = [ChainCert(cert) for cert in data.get('certs', list())]
self.__issues = objectornone(Issues, data, 'issues')
@property
def certs(self):
'''a list of :class:`ssllabs.chaincert.ChainCert` objects, representing
the chain certificates in the order in which they were retrieved from
the server'''
return self.__certs
@property
def issues(self):
'''list of chain issues as an :class:`Issues` object'''
return self.__issues
class Issues(object):
'''Issues that may be present, from :meth:`Chain.issues`'''
def __init__(self, data):
self.__addedexternal = bool(1 & data)
self.__incompletechain = bool(2 & data)
self.__unrelated = bool(4 & data)
self.__wrongorder = bool(8 & data)
self.__selfsignedroot = bool(16 & data)
self.__couldnotvalidate = bool(32 & data)
@property
def addedexternal(self):
'''if we added external certificates'''
return self.__addedexternal
@property
def incompletechain(self):
'''incomplete chain (set only when we were able to build a chain by
adding missing intermediate certificates from external sources)'''
return self.__incompletechain
@property
def unrelated(self):
'''chain contains unrelated or duplicate certificates (i.e.,
certificates that are not part of the same chain)'''
return self.__unrelated
@property
def wrongorder(self):
'''the certificates form a chain (trusted or not), but the order is
incorrect'''
return self.__wrongorder
@property
def selfsignedroot(self):
'''contains a self-signed root certificate (not set for self-signed
leafs)'''
return self.__selfsignedroot
@property
def couldnotvalidate(self):
'''the certificates form a chain (if we added external certificates,
:meth`addedexternal` will be set), but we could not validate it. If the
leaf was trusted, that means that we built a different chain we
trusted.'''
return self.__couldnotvalidate
|
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from . import CameraConfig
__all__ = ['GaussianCameraConfig']
class GaussianCameraConfig(CameraConfig):
"""Class that handles the configuration of the Gaussian camera.
Attributes
----------
doSpotOscillation : bool
Flag tp make the generated spot oscillate.
xAmplitude : int
The amplitude of the x component of the spot oscillation.
xFrequency : float
The frequency of the x component of the spot oscillation.
yAmplitude : int
The amplitude of the y component of the spot oscillation.
yFrequency : float
The frequency of the y component of the spot oscillation.
"""
def __init__(self):
"""Summary
"""
super().__init__()
self.doSpotOscillation = False
self.xAmplitude = 10
self.xFrequency = 5.0
self.yAmplitude = 5
self.yFrequency = 10.0
def fromDict(self, config):
"""Translate config to class attributes.
Parameters
----------
config : dict
The configuration to translate.
"""
super().fromDict(config)
self.doSpotOscillation = config["spotOscillation"]["do"]
self.xAmplitude = config["spotOscillation"]["x"]["amplitude"]
self.xFrequency = config["spotOscillation"]["x"]["frequency"]
self.yAmplitude = config["spotOscillation"]["y"]["amplitude"]
self.yFrequency = config["spotOscillation"]["y"]["frequency"]
def toDict(self, writeEmpty=False):
"""Translate class attributes to configuration dict.
Parameters
----------
writeEmpty : bool
Flag to write parameters with None as values.
Returns
-------
dict
The currently stored configuration.
"""
config = super().toDict(writeEmpty)
config["spotOscillation"] = {"x": {}, "y": {}}
config["spotOscillation"]["do"] = self.doSpotOscillation
config["spotOscillation"]["x"]["amplitude"] = self.xAmplitude
config["spotOscillation"]["x"]["frequency"] = self.xFrequency
config["spotOscillation"]["y"]["amplitude"] = self.yAmplitude
config["spotOscillation"]["y"]["frequency"] = self.yFrequency
return config
|
"""
Programa para realizar un filtro de datos
"""
DATA = [
{
'name': 'Facundo',
'age': 72,
'organization': 'Platzi',
'position': 'Technical Coach',
'language': 'python',
},
{
'name': 'Luisana',
'age': 33,
'organization': 'Globant',
'position': 'UX Designer',
'language': 'javascript',
},
{
'name': 'Héctor',
'age': 19,
'organization': 'Platzi',
'position': 'Associate',
'language': 'ruby',
},
{
'name': 'Gabriel',
'age': 20,
'organization': 'Platzi',
'position': 'Associate',
'language': 'javascript',
},
{
'name': 'Isabella',
'age': 30,
'organization': 'Platzi',
'position': 'QA Manager',
'language': 'java',
},
{
'name': 'Karo',
'age': 23,
'organization': 'Everis',
'position': 'Backend Developer',
'language': 'python',
},
{
'name': 'Ariel',
'age': 32,
'organization': 'Rappi',
'position': 'Support',
'language': '',
},
{
'name': 'Juan',
'age': 17,
'organization': '',
'position': 'Student',
'language': 'go',
},
{
'name': 'Pablo',
'age': 32,
'organization': 'Master',
'position': 'Human Resources Manager',
'language': 'python',
},
{
'name': 'Lorena',
'age': 56,
'organization': 'Python Organization',
'position': 'Language Maker',
'language': 'python',
},
]
def run():
all_py_devs = [pys['name'] for pys in DATA if pys['language']=='python']
older_devs = [olds['name'] for olds in DATA if olds['age'] > 30]
youngs = list(filter(lambda worker: worker['age'] <18 , DATA))
youngs = list(map(lambda worker: worker['name'] , youngs))
all_python_devs = list(filter(lambda worker: worker["language"] == "python", DATA))
all_python_devs = list(map(lambda worker: worker["name"], all_python_devs))
all_Platzi_workers = list(filter(lambda worker: worker["organization"] == "Platzi", DATA))
all_Platzi_workers = list(map(lambda worker: worker["name"], all_Platzi_workers))
adults = list(filter(lambda worker: worker["age"] > 18, DATA))
adults = list(map(lambda worker: worker["name"], adults))
old_people = [worker | {"old": worker["age"] > 70} for worker in DATA]
for worker in all_Platzi_workers:
print(worker)
print('solo personas que saben python:')
for py in all_py_devs:
print(py)
print('mayores de 30 años: ')
for old in older_devs:
print(old)
print('menores de 18 años: ')
for workers in youngs:
print(workers)
if __name__ == '__main__':
run() |
"""Project settings. Configure connection to database."""
db_name = 'company'
password = ''
user = 'root'
host = 'localhost'
auto_commit = True
|
#!/usr/bin/env Python3
"""
Usage: ./promoter_finder.py <ctab>
"""
import sys
import pandas as pd
df = pd.read_csv(sys.argv[1], sep='\t', index_col='chr')
relevant_data = df.loc[:,['t_name','start', 'end','strand']]
relevant_data['promoter_start'] = relevant_data.loc[:,'start']
relevant_data['promoter_end'] = relevant_data.loc[:,'start']
forward_strand_filter = relevant_data.loc[:,'strand'] == '+'
relevant_data.loc[forward_strand_filter,'promoter_start'] = relevant_data.loc[forward_strand_filter,'start'] - 500
relevant_data.loc[forward_strand_filter,'promoter_end'] = relevant_data.loc[forward_strand_filter,'start'] + 500
reverse_strand_filter = relevant_data.loc[:,'strand'] == '-'
relevant_data.loc[reverse_strand_filter,'promoter_start'] = relevant_data.loc[reverse_strand_filter,'end'] - 500
relevant_data.loc[reverse_strand_filter,'promoter_end'] = relevant_data.loc[reverse_strand_filter,'end'] + 500
relevant_data = relevant_data.drop(columns=['start','end','strand'])
relevant_data = relevant_data[['promoter_start','promoter_end','t_name']]
filter_df = relevant_data.loc[:,'promoter_start'] < 1
relevant_data.loc[filter_df,'promoter_start'] = 1
relevant_data = relevant_data[['promoter_start','promoter_end','t_name']]
relevant_data.to_csv(sys.argv[2], sep='\t', header=False) |
from models.db_models.models import ActionLogTypes
from db.db import session
from flask import Flask, jsonify, request
from flask_restful import Resource, fields, marshal_with, abort,reqparse
action_log_type_fields = {
'id': fields.Integer,
'message': fields.String,
'code':fields.Integer
}
parser = reqparse.RequestParser()
class ActionLogTypeResource(Resource):
@marshal_with(action_log_type_fields)
def get(self, id):
action_log_type = session.query(ActionLogTypes).filter(ActionLogTypes.id == id).first()
if not action_log_type:
abort(404, message="Action log type {} doesn't exist".format(id))
return action_log_type
def delete(self, id):
try:
action_log_type = session.query(ActionLogTypes).filter(ActionLogTypes.id == id).first()
if not action_log_type:
abort(404, message="Action log type {} doesn't exist".format(id))
session.delete(action_log_type)
session.commit()
return {}, 204
except Exception as e:
session.rollback()
abort(400, message="Error while remove Action Log Type")
@marshal_with(action_log_type_fields)
def put(self, id):
try:
json_data = request.get_json(force=True)
action_log_type = session.query(ActionLogTypes).filter(ActionLogTypes.id == id).first()
action_log_type.message = json_data['message']
action_log_type.code = json_data['code']
session.add(action_log_type)
session.commit()
return action_log_type, 201
except Exception as e:
session.rollback()
abort(400, message="Error while update Action Log Type")
class ActionLogTypeListResource(Resource):
@marshal_with(action_log_type_fields)
def get(self):
action_log_types = session.query(ActionLogTypes).all()
return action_log_types
@marshal_with(action_log_type_fields)
def post(self):
try:
json_data = request.get_json(force=True)
action_log_type= ActionLogTypes(message=json_data["message"])
session.add(action_log_type)
session.commit()
return action_log_type, 201
except Exception as e:
session.rollback()
abort(400, message="Error while adding record Action Log Type") |
from talkback_accessible import talkback_focus
from node_checker import Node_Checker
class Node:
# raw_properties are the dictionary of properties associated with the node 9(e.g. "focusable" "cont_desc")
# characteristics are determined with heuristic tests (e.g. "is speakble" "is visible")
# parent, pointer to parent node
# children, empty list of children
#test
def __init__(self, properties, parent, level_arg):
self.raw_properties = properties
# depth from root
self.level = level_arg
self.characteristics = {}
self.parent = parent
self.children = []
# log to track decisions about talkback accessibility and checks
self.log = {'talkback_accessible':[], 'checks':[]}
# logged by step
#self.log = {'talkback_accessible':[], 'checks':[]}
# collects results of check results for individual nodes
self.checker = Node_Checker(self)
# checks if coords are within this node's boundries
def contains_coords(self,coords):
bounds = self.get_bounds()
#print("coords: "+str(coords))
#print("bounds: "+str(bounds))
if coords['x'] >= bounds['left'] and coords['x'] <= bounds['right'] and \
coords['y'] >= bounds['top'] and coords['y'] <= bounds['bottom']:
return True
else:
return False
@staticmethod
def print_header(table_type,fd):
if table_type=="BY_NODE":
fd.write("app_id,trace_id,view_id,node_id,class,is_clickable,android_widget,ad,")
if table_type == "IMAGE_NODE":
fd.write("app_id,trace_id,view_id,node_id,class,is_clickable,android_widget,ad,text,cont_desc,label,")
def print_table(self,table_type,fd):
if table_type == "BY_NODE":
k = self.raw_properties.keys()
#ID column
if 'resource-id' in k:
fd.write(str(self.raw_properties['resource-id']))
else:
fd.write("None")
#,class_name,android_widget?,ad_widget?,checks
fd.write(","+str(self.raw_properties['class'])+"," +str(self.is_clickable())+","+\
str(self.is_android_default_widget())+","+str(self.is_ads_widget())+",")
self.checker.print_table(table_type,fd)
fd.write("\n")
if table_type == "IMAGE_NODE":
k = self.raw_properties.keys()
#ID column
if 'resource-id' in k:
fd.write(str(self.raw_properties['resource-id']))
else:
fd.write("None")
#,class_name,android_widget?,ad_widget?,checks
#text_to_print = self.get_textfield().split("",)
# apparenlty only prints first line of label which is fine
fd.write(","+str(self.raw_properties['class'])+"," +str(self.is_clickable())+","+\
str(self.is_android_default_widget())+","+str(self.is_ads_widget())+"," +\
"\""+str(self.get_textfield()).replace("\n","::").replace("\"","*")+"\","+\
"\""+str(self.get_cont_desc()).replace("\n","::").replace("\"","*")+"\","+\
"\""+str(self.get_speakable_text()).replace("\n","::").replace("\"","*")+"\",")
self.checker.print_table(table_type,fd)
fd.write("\n")
def print(self, fd):
k = self.raw_properties.keys()
self.__print_level(fd)
fd.write("##########\n")
# resource id
self.__print_level(fd)
if 'resource-id' in k:
fd.write("id: " + str(self.raw_properties['resource-id'] +"\n"))
else:
fd.write("no resource id\n")
# class
self.__print_level(fd)
if 'class' in k:
fd.write("class: "+str(self.raw_properties['class'])+"\n")
else:
fd.write('no class\n')
self.__print_level(fd)
fd.write("ad: "+str(self.is_ads_widget())+"\n")
self.__print_level(fd)
fd.write("widget: "+str(self.is_android_default_widget())+"\n")
# bounds
self.__print_level(fd)
fd.write("bounds: "+str(self.get_bounds())+"\n")
# text, if applicable, to help identify
if 'text' in self.raw_properties.keys():
self.__print_level(fd)
try:
fd.write("text: " + str(self.raw_properties['text'])+"\n")
except UnicodeEncodeError:
fd.write("text: undefined unicode\n")
self.__print_level(fd)
try:
fd.write("label: " + str(self.get_speakable_text())+"\n")
except UnicodeEncodeError:
fd.write("label: undefined unicode\n")
# talkback accessible criteria
self.__print_level(fd)
fd.write("talkback_accessible: " + str(self.is_talkback_accessible())+"\n")
# print talkback accessible log
for entry in set(self.log['talkback_accessible']):
self.__print_level(fd)
try:
fd.write("- "+str(entry)+"\n")
except UnicodeEncodeError:
fd.write("-: undefined unicode\n")
# print results of checks
# self.__print_level()
# print("checks results")
# for check, result in self.checks.items():
# self.__print_level()
# print(check + ": "+str(result))
# print checks log
for entry in set(self.log['checks']):
self.__print_level(fd)
fd.write("- "+str(entry)+"\n")
#self.__print_level()
self.checker.print_header(fd)
fd.write("\n")
self.checker.print_table("BY_NODE",fd)
#self.__print_children()
fd.write('\n\n')
def __print_children(self):
for c in self.children:
if len(c.children) > 0:
c.__print_children()
self.__print_level()
try:
print("- child: "+str(c.get_resource_id()) +" "+str(c.get_speakable_text()))
except UnicodeEncodeError:
print("- child: "+str(c.get_resource_id()) + " undefined unicode")
def __print_level(self,fd):
for i in range(0,self.level):
fd.write("\t ")
fd.write("++ ")
##############
#### Getters and Setters
##############
def get_resource_id(self):
k=self.raw_properties.keys()
if 'resource-id' in k:
return self.raw_properties['resource-id']
else:
return None
def add_child(self,child):
self.children.append(child)
# so don't have to remember what order they are in in the json
def get_bounds(self):
bounds = self.raw_properties['bounds']
# the coordinates start in upper left of screen so 0,0 is left, upper-most point
return {"left":bounds[0], "top":bounds[1], "right":bounds[2], "bottom":bounds[3]}
def is_talkback_accessible(self):
if not 'talkback_accessible' in self.characteristics.keys():
self.characteristics['talkback_accessible'] = talkback_focus(self)
return self.characteristics['talkback_accessible']
def is_android_default_widget(self):
if not 'android_default_widget' in self.characteristics.keys():
self.characteristics['android_default_widget'] = self.__is_android_supported_widget()
return self.characteristics['android_default_widget']
def is_webview(self):
if self.raw_properties['class'] == "android.webkit.WebView":
return True
return False
# defined as having a class from the "android.widget", "android.appwidget",
# "android.inputmethodservice", "android.support", "android.view", "android.webkit"
# library as chosen from the
# https://github.com/google/Accessibility-Test-Framework-for-Android/blob/master/src/main/java/com/google/android/apps/common/testing/accessibility/framework/checks/ClassNameCheck.java
# V3.0 accessed 3_23_2018
# TODO
def __is_android_supported_widget(self):
node_class = self.raw_properties['class']
# classes appear to be android.widget.<widget>.<name>....
valid_ui_package_names = [
"android.app", "android.appwidget", "android.inputmethodservice",
"android.support", "android.view", "android.webkit", "android.widget"
]
for package in valid_ui_package_names:
if node_class.startswith(package):
return True
return False
def is_ads_widget(self):
if not 'ads_widget' in self.characteristics.keys():
self.characteristics['ads_widget'] = self.__is_ads_widget()
return self.characteristics['ads_widget']
def __is_ads_widget(self):
# appears the ads interface comes from library com.google.android.gms.ads
ads_library = "com.google.android.gms.ads"
node_class = self.raw_properties['class']
# multiple types of views/widgets for ads? so just check for library at beginning
# e.g. a class of com.google.android.gms.com.AdView should return true
if node_class[:len(ads_library)] == ads_library:
return True
return False
#################
##### TODO
###############
#TODO
# don't know what fields to look for
def is_checkable(self):
return False
# best guess for now of catching webviews
# if it is or inherits from android.webkit.WebView
def has_webAction(self):
web_class = "android.webkit.WebView"
if self.raw_properties['class'] == web_class:
return True
if web_class in self.raw_properties['ancestors']:
return True
return False
###############
##### CHARACTERISTICS
###############
def is_editable_textview(self):
ancestors = self.raw_properties['ancestors']
if "android.widget.EditText" in ancestors:
return True
return False
def is_clickable(self):
#print("is clickable test")
k = self.raw_properties.keys()
has_clickable ='clickable' in k
has_long_clickable = 'content-desc' in k
# is clickable
if('clickable' in k):
if(self.raw_properties["clickable"]):
self.log['talkback_accessible'].append("node is clickable")
return True
# long clickable
if('long-clickable' in k):
if(self.raw_properties["long-clickable"]):
self.log['talkback_accessible'].append("node is long clickable")
return True
return False
# return if node is visible
# OLD: currently defined as the "visibile to user" property is "True"
# defined as visibility == VISIBLE based on Accessibility-test-framework...ViewAccessibilityUtils...405
def is_visible(self):
#print("is visible test")
k = self.raw_properties.keys()
# if ('visible-to-user' in k):
# TODO: figure out all the meaning of the different
# possible values of 'visibility'
# self.log['talkback_accessible'].append('visible-to-user: ' + str(self.raw_properties['visible-to-user']))
# if (self.raw_properties['visible-to-user']):
# return True
if ('visibility' in k):
# TODO: figure out all the meaning of the different
# possible values of 'visibility'
self.log['talkback_accessible'].append('visibility: ' + str(self.raw_properties['visibility']))
if (self.raw_properties['visibility'] == 'visible'):
return True
# if the node doesn't have a visibility property
# or if that property is not set to "visible"
# it is not a visible node
else:
self.log['talkback_accessible'].append('no visibility tag')
return False
def has_non_zero_dimensions(self):
bounds = self.get_bounds()
# not sure of case where it's only zero in one dimension
if bounds['left'] == bounds['right'] and bounds['top'] == bounds['bottom']:
self.log['talkback_accessible'].append("zero dimensions")
return False
return True
def is_actionable(self):
#print("is actionable test")
if self.is_clickable():
self.log['talkback_accessible'].append("is clickable")
return True
elif self.is_focusable():
self.log['talkback_accessible'].append("is focusable")
return True
# TODO
# elif web thing
self.log['talkback_accessible'].append("not actionable")
return False
def is_focusable(self):
#print("is focusable test")
result = self.raw_properties["focusable"]
if result:
self.log['talkback_accessible'].append("focusable")
else:
self.log['talkback_accessible'].append("not focusable")
return result
def is_top_level_scrollable(self):
if self == None:
return False
if self.parent == None:
# not a child of anything
return False
if self.is_scrollable():
return True
# top-level items in a scrolling pager are actually two levels down since the first
# level items in pagers are the pages themselves
grandparent = self.parent.parent
if (grandparent != None):
if grandparent.get_role() == "ROLE_PAGER":
return True
parent_role = self.parent.get_role()
# Note that ROLE_DROP_DOWN_LIST(Spinner) is not accepted.
# TODO: haven't RecyclerView is classified as a list or grid based on its CollectionInfo.
result = (parent_role == "ROLE_LIST" or parent_role == "ROLE_GRID" or parent_role =="ROLE_SCROLL_VIEW" \
or parent_role == "ROLE_HORIZONTAL_SCROLL_VIEW")
self.log['talkback_accessible'].append("role: "+str(parent_role))
return result
# don't know if right definition since in talkback specifies as "scroll forward" and "scroll backward"
# but in data only have "horizontal" and "vertical" scroll
def is_scrollable(self):
k = self.raw_properties.keys()
if "scrollable-vertical" in k:
if self.raw_properties["scrollable-vertical"]:
self.log['talkback_accessible'].append("scrollable-vertical")
return True
if "scrollable-horizontal" in k:
if self.raw_properties["scrollable-horizontal"]:
self.log['talkback_accessible'].append("scrollable-horizontal")
return True
return False
#
def has_visible_children(self):
#print("has visible children test")
for child in self.children:
if child.is_visible():
return True
return False
# returns if the node is talkback speaking
# based on if it's speakable text is null
# this includes speakable text from children
def is_speaking(self):
return (not self.get_speakable_text() == None)
def has_focusable_ancestors(self):
#print("has focusable ancestor test")
ancestor = self.parent
while ancestor:
if talkback_focus(ancestor, check_children=False):
return True
ancestor = ancestor.parent
return False
###########
## Getters
##########
'''
A node is speakable if:
1. has text or content description
2. is checkable (checkbox)
3. has web content
4. has non-actionable speaking children
# will return text either from self or children, or None if no text
# was is _speaking
'''
def get_speakable_text(self):
#print("getting speakable")
#print("is speaking test")
text = self.get_self_text()
# try:
# print("text: " + str(text))
# except UnicodeEncodeError:
# print("text: undefined unicode")
if text != None:
#print("self text")
return text
#TODO
#elif is_checkable(node):
# return True
# TODO
# if web thing
text = self.get_non_actionable_speaking_children_text()
# if text != None:
# return text
#print(" text: "+str(text))
return text
# defined as having none Null or zero length Text or Content Description
# TODO:
# * For the purposes of this check, any node with a CollectionInfo is considered to not have
# * text since its text and content description are used only for collection transitions.
# returns a node's text, or cont-descr, if available, else, returns None
def get_self_text(self):
#print ("hast text")
#pass_label = False
text = None
text = self.get_textfield()
if text == None:
text = self.get_cont_desc()
self.log['talkback_accessible'].append("has own label: "+str(not text == None))
return text
# need for other check so make public
def get_cont_desc(self):
cont_desc = None
k = self.raw_properties.keys()
has_content_desc = 'content-desc' in k
if has_content_desc:
self.log['talkback_accessible'].append("has cont desc: "+str(self.raw_properties['content-desc'][0]))
has_content_desc = not self.__is_empty(self.raw_properties["content-desc"][0])
if has_content_desc:
cont_desc = self.raw_properties["content-desc"][0]
return cont_desc
## based on talkback
def get_non_actionable_speaking_children_text(self):
#print ("num children: "+str(len(self.children)))
#print("non actionable children test")
for child in self.children:
#print("child id: "+str(child.get_resource_id()))
# ignore focusable children
if child.is_focusable():
#print ("child: "+child.get_resource_id()+" focusable")
continue
# ignore invisible children
if not child.is_visible():
#print("visible child")
continue
# check if this is a speaking child
child_text = child.get_speakable_text()
#print ("child: "+str(child.get_resource_id()) + " "+str(child_text))
if child_text != None:
self.log['talkback_accessible'].append("has nonactionable speaking children")
#node.characteristics['has_non_actionable_speaking_children'] = True
return child_text
# recursively check children
#if len(child.children) > 0:
# child.get_speakable_text()
# if leaf node and hasn't passed yet, false
#print("ran out of children")
return None
# based on Accessibility-Test_Framework-For-Android
# def get_non_actionable_speaking_children_ATF(self):
# for child in self.children:
# if (not child.is_visible()) or child.is_accessibility_focusable_ATF():
#Role.java l213 2.14.2018 Talkback
## SHOULD BE LOOKING AT JUST CLASS OR INHERITANCE!?
def get_role(self):
#print ("Getting role")
if self == None:
return "ROLE_NONE"
k = self.raw_properties.keys()
if not "class" in k:
return "ROLE_NONE"
node_class = self.raw_properties['class']
#node_classes = node.raw_properties['ancestors']
# When comparing node.getClassName() to class name of standard widgets, we should take care of
# the order of the "if" statements: check subclasses before checking superclasses.
# e.g. RadioButton is a subclass of Button, we should check Role RadioButton first and fall
# down to check Role Button.
# Inheritance: View->ImageView
if (node_class == "android.widget.ImageView"):
if (self.is_clickable()):
return "ROLE_IMAGE_BUTTON"
else:
return "ROLE_IMAGE"
##############################
## Subclasses of TextView.
## Inheritance: View->TextView->Button->CompoundButton->Switch
if node_class == "android.widget.Switch":
return "ROLE_SWITCH"
if node_class == "android.widget.ToggleButton":
return "ROLE_TOGGLE_BUTTON"
if node_class == "android.widget.RadioButton":
return "ROLE_RADIO_BUTTON"
if node_class == "android.widget.CompoundButton":
return "ROLE_CHECKBOX"
if node_class == "android.widget.Button":
return "ROLE_BUTTON"
if node_class == "android.widget.CheckedTextView":
return "ROLE_CHECKED_TEXT_VIEW"
if node_class == "android.widget.EditText":
return "ROLE_EDIT_TEXT"
##################################
## Subclasses of Progressbar
if node_class == "android.widget.SeekBar":
return "ROLE_SEEK_CONTROL"
if node_class == "android.widget.ProgressBar":
return "ROLE_PROGRESS_BAR"
if node_class == "android.inputmethodservice.Keyboard.Key":
return "ROLE_KEYBOARD_KEY"
##############################
## Subclasses of ViewGroup
if node_class == "android.webkit.WebView":
return "ROLE_WEB_VIEW"
if node_class == "android.widget.TabWidget":
return "ROLE_TAB_BAR"
if node_class == "android.widget.HorizontalScrollView":
return "ROLE_HORIZONTAL_SCROLL_VIEW"
if node_class == "android.widget.ScrollView":
return "ROLE_SCROLL_VIEW"
# Inheritance: View->ViewGroup->ViewPager
if node_class == "android.support.v4.view.ViewPager":
return "ROLE_PAGER"
if node_class == "android.widget.Spinner":
return "ROLE_DROP_DOWN_LIST"
if node_class == "android.widget.GridView":
return "ROLE_GRID"
if node_class == "android.widget.AbsListView":
return "ROLE_LIST"
# TODO!!! ln 339
# CollectionInfoCompat collection = node.getCollectionInfo();
# if (collection != null) {
# # RecyclerView will be classified as a list or grid.
# if (collection.getRowCount() > 1 && collection.getColumnCount() > 1) {
# return ROLE_GRID;
# } else {
# return ROLE_LIST;
# }
# }
if node_class == "android.view.ViewGroup":
return "ROLE_VIEW_GROUP"
return "ROLE_NONE"
#### Private Getters
def get_textfield(self):
text = None
k = self.raw_properties.keys()
# set to if the field exists
has_text = 'text' in k
if has_text:
self.log['talkback_accessible'].append("has text: "+self.raw_properties['text'])
has_text = not self.__is_empty(self.raw_properties['text'])
if has_text:
text = self.raw_properties['text']
else:
self.log['talkback_accessible'].append("no text")
return text
######################
##### HELPER
######################
def __is_empty(self,str):
if (str == None or len(str) == 0):
return True
else:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.