code
stringlengths 1
199k
|
|---|
import argparse
import codonpdx.calc
import codonpdx.clearresults
import codonpdx.count
import codonpdx.insert
import codonpdx.mirror
import codonpdx.queueJobs
import sys
parser = argparse.ArgumentParser(prog='codonpdx',
description='Codonpdx command line utility.')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
subparsers = parser.add_subparsers(help='Sub-command descriptions:')
parserCount = subparsers.add_parser(
'count',
help='Count the codons of a file and produce JSON '
'output containing the results.'
)
parserCount.add_argument(
'-i',
'--infile',
nargs='?',
type=argparse.FileType('rU'),
default=sys.stdin,
help='A file containing sequence data.'
)
parserCount.add_argument(
'-g',
'--gzip',
action='store_true',
default=False,
help='Indicates the input is gzipped.'
)
parserCount.add_argument(
'-j',
'--job',
required=True,
help='The UUID for the job if this process is placing its results into '
'the results table.'
)
parserCount.add_argument(
'-f',
'--format',
choices=['fasta', 'genbank'],
help='The file format.'
)
parserCount.add_argument(
'-p',
'--pretty',
action='store_true',
help='Print the JSON in a pretty, more human-readable way.'
)
parserCount.add_argument(
'-o',
'--output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
help='Where to place the output JSON.'
)
parserCount.add_argument(
'-s',
'--shuffle',
action='store_true',
default=False,
help='Indicates whether to generate a shuffled codon count'
)
parserCount.set_defaults(
func=codonpdx.count.count
)
parserLoadDB = subparsers.add_parser(
'insert',
help='Insert organism codon count JSON information into a sequence '
'database.'
)
parserLoadDB.add_argument(
'-d',
'--dbname',
choices=['refseq', 'genbank'],
help='The database table to store the count information in.'
)
parserLoadDB.add_argument(
'-i',
'--infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='The file to to read the JSON data from. Defaults to standard input.'
)
parserLoadDB.set_defaults(
func=codonpdx.insert.insert
)
parserLoadDB = subparsers.add_parser(
'insertInput',
help='Insert organism codon count JSON information into the input.'
)
parserLoadDB.add_argument(
'-i',
'--infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='The file to to read the JSON data from. Defaults to standard input.'
)
parserLoadDB.add_argument(
'-j',
'--job',
required=True,
help='The UUID for the job if this process is placing its results into '
'the results table.'
)
parserLoadDB.set_defaults(
func=codonpdx.insert.insertinput
)
parserCalcScore = subparsers.add_parser(
'calc',
help='Compare an organism to all other organisms in a given sequence '
'database.'
)
parserCalcScore.add_argument(
'-d',
'--dbname',
choices=['refseq', 'genbank'],
help='The sequence database to compare the organism to.'
)
parserCalcScore.add_argument(
'-v',
'--virus',
required=True,
help='The accession.version number of the organism to compare.'
)
parserCalcScore.add_argument(
'-w',
'--virusdb',
choices=['input', 'refseq', 'genbank'],
default='input',
help='The database table where the input virus resides.'
)
parserCalcScore.add_argument(
'-o',
'--output',
action='store_true',
help='Output scores to stdout instead of storing in the results table.'
)
parserCalcScore.add_argument(
'-j',
'--job',
required=True,
help='The UUID for the job if this process is placing its results into '
'the results table.'
)
parserCalcScore.set_defaults(
func=codonpdx.calc.calc
)
parserMirror = subparsers.add_parser(
'mirror',
help='Mirror remote codon repository'
)
parserMirror.add_argument(
'-d',
'--dbname',
required=True,
choices=['refseq', 'genbank'],
help='The repository to mirror'
)
parserMirror.set_defaults(
func=codonpdx.mirror.mirror
)
parserQueueJobs = subparsers.add_parser(
'queueJobs',
help='Count and load repository codon counts into postgres'
)
parserQueueJobs.add_argument(
'-d',
'--dbname',
required=True,
choices=['refseq', 'genbank'],
help='The repository to parse'
)
parserQueueJobs.add_argument(
'-f',
'--format',
choices=['fasta', 'genbank'],
help='The file format.'
)
parserQueueJobs.set_defaults(
func=codonpdx.queueJobs.queueJobs
)
parserClearResults = subparsers.add_parser(
'clearResults',
help='clear the results table of data older than a week'
)
parserClearResults.add_argument(
'-d',
'--days',
type=int,
default=7,
help='Number of days to remove, default: 7'
)
parserClearResults.set_defaults(
func=codonpdx.clearresults.clear
)
args = parser.parse_args()
args.func(args)
|
import os
from libs import constants
from _ctypes import Array
def BuildLogfile(h):
if not os.path.isdir(constants.LOGDIR):
print('INFO: create a folder '+constants.LOGDIR+' to store logging files')
os.makedirs(constants.LOGDIR)
if constants.LOGFILE_NAME == 'RANDOM_STRING':
import random
logname = ('%12x' % random.randrange(16**12)).strip()
elif constants.LOGFILE_NAME == 'DATE':
import datetime
logname = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
else:exit('ERROR: '+constants.LOGFILE_NAME+' is not an expected value for constant "LOGFILE_NAME"...')
logfile = constants.LOGDIR+'/'+h+'_'+logname+constants.LOGFILE_EXTENSION
return logfile
def CleanComments(array):
cleaning = False
remove = True
#try to remove empty lines if any
while remove:
try:
array.remove('')
except:remove = False
for i, item in enumerate(array):
if item[0] == '#':
array[i] = ''
cleaning = True
array = [x.strip(' ') for x in array]
#try to remove empty lines if commented lines are found above
remove = True
while remove:
try:
array.remove('')
except:remove = False
return array
|
"""A WebSocket handler for Treadmill state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from treadmill import schema
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
class IdentityGroupAPI(object):
"""Handler for /identity-groups topic.
"""
def __init__(self):
@schema.schema({'$ref': 'websocket/identity_group.json#/message'})
def subscribe(message):
"""Return filter based on message payload.
"""
identity_group = message.get('identity-group', '*')
return [(os.path.join('/identity-groups', identity_group), '*')]
def on_event(filename, operation, content):
"""Event handler.
"""
if not filename.startswith('/identity-groups/'):
return
sow = operation is None
full_identity = filename[len('/identity-groups/'):]
identity_group, identity = full_identity.rsplit('/', 1)
message = {
'topic': '/identity-groups',
'identity-group': identity_group,
'identity': int(identity),
'app': None,
'host': None,
'sow': sow
}
if content:
message.update(yaml.load(content))
return message
self.subscribe = subscribe
self.on_event = on_event
def init():
"""API module init.
"""
return [('/identity-groups', IdentityGroupAPI(), [])]
|
import logging
from airflow.configuration import conf
PROVIDERS_GOOGLE_VERBOSE_LOGGING: bool = conf.getboolean('providers_google',
'VERBOSE_LOGGING', fallback=False)
if PROVIDERS_GOOGLE_VERBOSE_LOGGING:
for logger_name in ["google_auth_httplib2", "httplib2", "googleapiclient"]:
logger = logging.getLogger(logger_name)
logger.handlers += [handler for handler in
logging.getLogger().handlers if handler.name in ["task", "console"]]
logger.level = logging.DEBUG
logger.propagate = False
import httplib2
httplib2.debuglevel = 4
|
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
bestPos = None
maxDistance = 0
counter = 0
prev = 0
iEncountered = False
for i in range(len(seats)):
if seats[i] == 1:
if iEncountered:
diff = (i - prev)//2
else: diff = i-prev
maxDistance = max(diff, maxDistance)
counter = 0
prev = i
iEncountered = True
else:
counter += 1
maxDistance = max(counter, maxDistance)
return maxDistance
|
"""Cutting stock problem with the objective to minimize wasted space."""
import argparse
import collections
import time
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--solver', default='sat', help='Method used to solve: sat, mip.')
PARSER.add_argument(
'--output_proto_file',
default='',
help='Output file to write the cp_model proto to.')
DESIRED_LENGTHS = [
2490, 3980, 2490, 3980, 2391, 2391, 2391, 596, 596, 596, 2456, 2456, 3018,
938, 3018, 938, 943, 3018, 943, 3018, 2490, 3980, 2490, 3980, 2391, 2391,
2391, 596, 596, 596, 2456, 2456, 3018, 938, 3018, 938, 943, 3018, 943,
3018, 2890, 3980, 2890, 3980, 2391, 2391, 2391, 596, 596, 596, 2856, 2856,
3018, 938, 3018, 938, 943, 3018, 943, 3018, 3290, 3980, 3290, 3980, 2391,
2391, 2391, 596, 596, 596, 3256, 3256, 3018, 938, 3018, 938, 943, 3018,
943, 3018, 3690, 3980, 3690, 3980, 2391, 2391, 2391, 596, 596, 596, 3656,
3656, 3018, 938, 3018, 938, 943, 3018, 943, 3018, 2790, 3980, 2790, 3980,
2391, 2391, 2391, 596, 596, 596, 2756, 2756, 3018, 938, 3018, 938, 943,
3018, 943, 3018, 2790, 3980, 2790, 3980, 2391, 2391, 2391, 596, 596, 596,
2756, 2756, 3018, 938, 3018, 938, 943
]
POSSIBLE_CAPACITIES = [4000, 5000, 6000, 7000, 8000]
def regroup_and_count(raw_input):
"""Regroup all equal capacities in a multiset."""
grouped = collections.defaultdict(int)
for i in raw_input:
grouped[i] += 1
output = []
for size, count in grouped.items():
output.append([size, count])
output.sort(reverse=False)
return output
def price_usage(usage, capacities):
"""Compute the best price for a given usage and possible capacities."""
price = max(capacities)
for capacity in capacities:
if capacity < usage:
continue
price = min(capacity - usage, price)
return price
def create_state_graph(items, max_capacity):
"""Create a state graph from a multiset of items, and a maximum capacity."""
states = []
state_to_index = {}
states.append(0)
state_to_index[0] = 0
transitions = []
for item_index, size_and_count in enumerate(items):
size, count = size_and_count
num_states = len(states)
for state_index in range(num_states):
current_state = states[state_index]
current_state_index = state_index
for card in range(count):
new_state = current_state + size * (card + 1)
if new_state > max_capacity:
break
new_state_index = -1
if new_state in state_to_index:
new_state_index = state_to_index[new_state]
else:
new_state_index = len(states)
states.append(new_state)
state_to_index[new_state] = new_state_index
# Add the transition
transitions.append([
current_state_index, new_state_index, item_index, card + 1
])
return states, transitions
def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file):
"""Solve the cutting stock with arc-flow and the CP-SAT solver."""
items = regroup_and_count(DESIRED_LENGTHS)
print('Items:', items)
num_items = len(DESIRED_LENGTHS)
max_capacity = max(POSSIBLE_CAPACITIES)
states, transitions = create_state_graph(items, max_capacity)
print('Dynamic programming has generated', len(states), 'states and',
len(transitions), 'transitions')
incoming_vars = collections.defaultdict(list)
outgoing_vars = collections.defaultdict(list)
incoming_sink_vars = []
item_vars = collections.defaultdict(list)
item_coeffs = collections.defaultdict(list)
transition_vars = []
model = cp_model.CpModel()
objective_vars = []
objective_coeffs = []
for outgoing, incoming, item_index, card in transitions:
count = items[item_index][1]
max_count = count // card
count_var = model.NewIntVar(
0, max_count,
'i%i_f%i_t%i_C%s' % (item_index, incoming, outgoing, card))
incoming_vars[incoming].append(count_var)
outgoing_vars[outgoing].append(count_var)
item_vars[item_index].append(count_var)
item_coeffs[item_index].append(card)
transition_vars.append(count_var)
for state_index, state in enumerate(states):
if state_index == 0:
continue
exit_var = model.NewIntVar(0, num_items, 'e%i' % state_index)
outgoing_vars[state_index].append(exit_var)
incoming_sink_vars.append(exit_var)
price = price_usage(state, POSSIBLE_CAPACITIES)
objective_vars.append(exit_var)
objective_coeffs.append(price)
# Flow conservation
for state_index in range(1, len(states)):
model.Add(
sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index]))
# Flow going out of the source must go in the sink
model.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars))
# Items must be placed
for item_index, size_and_count in enumerate(items):
num_arcs = len(item_vars[item_index])
model.Add(
sum(item_vars[item_index][i] * item_coeffs[item_index][i]
for i in range(num_arcs)) == size_and_count[1])
# Objective is the sum of waste
model.Minimize(
sum(objective_vars[i] * objective_coeffs[i]
for i in range(len(objective_vars))))
# Output model proto to file.
if output_proto_file:
output_file = open(output_proto_file, 'w')
output_file.write(str(model.Proto()))
output_file.close()
# Solve model.
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
solver.parameters.num_search_workers = 8
status = solver.Solve(model)
print(solver.ResponseStats())
def solve_cutting_stock_with_arc_flow_and_mip():
"""Solve the cutting stock with arc-flow and a MIP solver."""
items = regroup_and_count(DESIRED_LENGTHS)
print('Items:', items)
num_items = len(DESIRED_LENGTHS)
max_capacity = max(POSSIBLE_CAPACITIES)
states, transitions = create_state_graph(items, max_capacity)
print('Dynamic programming has generated', len(states), 'states and',
len(transitions), 'transitions')
incoming_vars = collections.defaultdict(list)
outgoing_vars = collections.defaultdict(list)
incoming_sink_vars = []
item_vars = collections.defaultdict(list)
item_coeffs = collections.defaultdict(list)
start_time = time.time()
solver = pywraplp.Solver('Steel',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
objective_vars = []
objective_coeffs = []
var_index = 0
for outgoing, incoming, item_index, card in transitions:
count = items[item_index][1]
count_var = solver.IntVar(
0, count, 'a%i_i%i_f%i_t%i_c%i' % (var_index, item_index, incoming,
outgoing, card))
var_index += 1
incoming_vars[incoming].append(count_var)
outgoing_vars[outgoing].append(count_var)
item_vars[item_index].append(count_var)
item_coeffs[item_index].append(card)
for state_index, state in enumerate(states):
if state_index == 0:
continue
exit_var = solver.IntVar(0, num_items, 'e%i' % state_index)
outgoing_vars[state_index].append(exit_var)
incoming_sink_vars.append(exit_var)
price = price_usage(state, POSSIBLE_CAPACITIES)
objective_vars.append(exit_var)
objective_coeffs.append(price)
# Flow conservation
for state_index in range(1, len(states)):
solver.Add(
sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index]))
# Flow going out of the source must go in the sink
solver.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars))
# Items must be placed
for item_index, size_and_count in enumerate(items):
num_arcs = len(item_vars[item_index])
solver.Add(
sum(item_vars[item_index][i] * item_coeffs[item_index][i]
for i in range(num_arcs)) == size_and_count[1])
# Objective is the sum of waste
solver.Minimize(
sum(objective_vars[i] * objective_coeffs[i]
for i in range(len(objective_vars))))
solver.EnableOutput()
status = solver.Solve()
### Output the solution.
if status == pywraplp.Solver.OPTIMAL:
print('Objective value = %f found in %.2f s' %
(solver.Objective().Value(), time.time() - start_time))
else:
print('No solution')
def main(args):
"""Main function"""
if args.solver == 'sat':
solve_cutting_stock_with_arc_flow_and_sat(args.output_proto_file)
else: # 'mip'
solve_cutting_stock_with_arc_flow_and_mip()
if __name__ == '__main__':
main(PARSER.parse_args())
|
import BaseHTTPServer
import CGIHTTPServer
import cgitb; cgitb.enable() #used for error log outputs
server = BaseHTTPServer.HTTPServer
handler = CGIHTTPServer.CGIHTTPRequestHandler
server_address = ("", 8080)
handler.cgi_directories = ["/cgi"]
httpd = server(server_address, handler)
print "Starting server...."
httpd.serve_forever()
|
'''
Created on Jun 4, 2016
@author: Debanjan.Mahata
'''
import requests
import json
import sys
import config
import yagmail
import csv
from time import sleep
from config import DEVICE_PLANT_MAPPING
from config import PLANT_DEVICE_MAPPING
from config import DEVICE_STALE_DATA_MINS
from config import CAM_STALE_DATA_MINS
from config import max_ph, min_ph
from config import max_ec, min_ec
from config import max_tfw, min_tfw
from config import max_tbw, min_tbw
from config import proxy
from config import DEVICE_CONN_NO_TRIES
from config import DEVICE_CONN_WAIT_TIME
from config import GROWTH_URL
from config import proxies
from dateutil import parser
from datetime import datetime
from utility import time_diff
from utility import to_datetime
from utility import variance
from datetime import timedelta
from config import report_emails
from config import admin_email, admin_passwd
class Plant:
"""Class representing individual plants monitored by the system"""
def __init__(self, plant_id):
#id allocated to the plant
self.plant_id = plant_id
#id of the photon device monitoring the plant
self.plant_device_id = None
#name of the photon device monitoring the plant
self.plant_device_name = None
#pH value of the solution containing the plant
self.ph = None
#Electrical Conductivity value of the solt containing the plant
self.ec = None
#Time between watering set for the plant
self.tbw = None
#Time for watering set for the plant
self.tfw = None
#Current height of the plant as captured by the camera
self.current_height = None
#Current growth of the plant as captured by the camera
self.current_growth = None
#Growth of the plant as captured on the previous day
self.last_day_growth = None
#Height of the plant as captured on the previous day
self.last_day_height = None
#Hourly growth difference
self.hourly_growth_diff = None
#Hourly height difference
self.hourly_height_diff = None
#Daily growth difference
self.daily_growth_diff = None
#Daily height difference
self.daily_height_diff = None
#Time when the factors of the system were manipulated
self.last_time_manipulated = None
#Time when the reading of the measurements were taken
self.read_time = None
#Time when the plant was last heard
self.last_time_heard = None
#Time when the camera shot was taken for measuring height and growth
self.cam_shot_time = None
#Indicates whether the reading currently recorded is stale due to
#loss in connection
self.stale_read_flag = False
#Indicates whether the reading currently recorded is stale due to loss
#in connection with the camera
self.stale_cam_flag = False
#Flag indicating whether the pH value recorded is abnormal
self.abnormal_ph_val_flag = False
#Flag indicating whether the EC value recorded is abnormal
self.abnormal_ec_val_flag = False
#Flag indicating whether the time between watering is abnormal
self.abnormal_tbw_flag = False
#Flag indicating whether the time for watering is abnormal
self.abnormal_tfw_flag = False
def get_plant_id(self):
"""Gets the plant id"""
return self.plant_id
def get_plant_device_id(self):
"""Gets the id of the photon device to which the plant is connected"""
return self.plant_device.get_device_id()
def get_plant_device_name(self):
"""Gets the name of the device to which the plant is connected"""
return self.plant_device.get_device_name()
def get_ph(self):
"""Gets the pH value of the solution for the plant"""
return self.ph
def set_ph(self, ph):
"""Sets the pH value of the solution for the plant"""
self.ph = ph
def get_ec(self):
"""Gets the EC value of the solution for the plant"""
return self.ec
def set_ec(self, ec):
"""Sets the EC value of the solution for the plant"""
self.ec = ec
def get_tbw(self):
"""Gets the time between water value for the plant"""
return self.tbw
def set_tbw(self, tbw):
"""Sets the time between water value for the plant"""
self.tbw = tbw
def get_tfw(self):
"""Gets the time for water value for the plant"""
return self.tfw
def set_tfw(self, tfw):
"""Sets the time for water value for the plant"""
self.tfw = tfw
def get_current_height(self):
"""Gets the current height for the plant"""
return self.current_height
def set_current_height(self, curr_height):
"""Sets the current height for the plant"""
self.current_height = curr_height
def get_current_growth(self):
"""Gets the current growth for the plant"""
return self.current_growth
def set_current_growth(self, curr_growth):
"""Sets the current growth for the plant"""
self.current_growth = curr_growth
def get_last_day_growth(self):
"""Gets the Growth of the plant as captured on the previous day """
return self.last_day_growth
def set_last_day_growth(self, last_day_growth):
"""Sets the Growth of the plant as captured on the previous day """
self.last_day_growth = last_day_growth
def get_last_day_height(self):
"""Gets the Height of the plant as captured on the previous day"""
return self.last_day_height
def set_last_day_height(self, last_day_height):
"""Sets the Height of the plant as captured on the previous day"""
self.last_day_height = last_day_height
def get_daily_growth_diff(self):
"""Gets the Daily growth difference"""
return self.daily_growth_diff
def set_daily_growth_diff(self, daily_growth_diff):
"""Sets the Daily growth difference"""
self.daily_growth_diff = daily_growth_diff
def get_daily_height_diff(self):
"""Gets the Daily height difference"""
return self.daily_height_diff
def set_daily_height_diff(self, daily_height_diff):
"""Sets the Daily height difference"""
self.daily_height_diff = daily_height_diff
def get_hourly_height_diff(self):
"""Gets the Hourly height difference"""
return self.hourly_height_diff
def set_hourly_height_diff(self, diff):
"""Sets the Hourly height difference"""
self.hourly_height_diff = diff
def get_hourly_growth_diff(self):
"""Gets the Hourly growth difference"""
return self.hourly_growth_diff
def set_hourly_growth_diff(self, diff):
"""Sets the Hourly growth difference"""
self.hourly_growth_diff = diff
def get_last_time_manipulated(self):
"""Gets the Time when the factors of the system were manipulated"""
return self.last_time_manipulated
def set_last_time_manipulated(self, date_time):
"""Sets the Time when the factors of the system were manipulated"""
self.last_time_manipulated = date_time
def get_read_time(self):
"""Gets the Time when the reading of the measurements were taken"""
return self.read_time
def set_read_time(self, last_time_read):
"""Sets the Time when the reading of the measurements were taken"""
self.read_time = last_time_read
def get_cam_shot_time(self):
"""Gets the Time when the camera shot was taken for measuring height
and growth"""
return self.cam_shot_time
def set_cam_shot_time(self, date_time):
"""Sets the Time when the camera shot was taken for measuring height
and growth"""
self.cam_shot_time = date_time
def get_last_time_heard(self):
"""Gets the Time when the plant was last heard"""
return self.last_time_heard
def set_last_time_heard(self, last_time_heard):
"""Sets the Time when the plant was last heard"""
self.last_time_heard = last_time_heard
def get_stale_read_flag(self):
"""Gets the flag indicating whether the reading currently recorded is
stale due to loss in connection"""
return self.stale_read_flag
def set_stale_read_flag(self, flag):
"""Sets the flag indicating whether the reading currently recorded is
stale due to loss in connection"""
self.stale_read_flag = flag
def get_stale_cam_flag(self):
"""Gets the flag indicating whether the reading currently recorded
is stale due to loss in connectivity with the camera"""
return self.stale_cam_flag
def set_stale_cam_flag(self, flag):
"""Sets the flag indicating whether the reading currently recorded
is stale due to loss in connectivity with the camera"""
self.stale_cam_flag = flag
def get_abnormal_ph_val_flag(self):
"""Gets the Flag indicating whether the pH value is abnormal"""
return self.abnormal_ph_val_flag
def set_abnormal_ph_val_flag(self, flag):
"""Sets the Flag indicating whether the pH value is abnormal"""
self.abnormal_ph_val_flag = flag
def get_abnormal_ec_val_flag(self):
"""Gets the Flag indicating whether the EC value is abnormal"""
return self.abnormal_ec_val_flag
def set_abnormal_ec_val_flag(self, flag):
"""Sets the Flag indicating whether the EC value is abnormal"""
self.abnormal_ec_val_flag = flag
def get_abnormal_tbw_flag(self):
"""Gets the Flag indicating whether the time between watering is abnormal"""
return self.abnormal_tbw_flag
def set_abnormal_tbw_flag(self, flag):
"""Sets the Flag indicating whether the time between watering is abnormal"""
self.abnormal_tbw_flag = flag
def set_abnormal_tfw_flag(self, flag):
"""Sets the Flag indicating whether the time for watering is abnormal"""
self.abnormal_tfw_flag = flag
def get_abnormal_tfw_flag(self):
"""Gets the Flag indicating whether the time for watering is abnormal"""
return self.abnormal_tfw_flag
class MonitorPlantData:
"""Class representing and performing the regular monitoring of the plants
in the system"""
def __init__(self, devices, plants):
#list of photon device objects connected to the system
self.devices = devices
#List of plant objects in the system
self.plants = plants
#device plant mapping from the configuration file
self.DEVICE_PLANT_MAPPING = DEVICE_PLANT_MAPPING
#plant device mapping from the configuration file
self.PLANT_DEVICE_MAPPING = PLANT_DEVICE_MAPPING
#list of photon devices connected to the cloud
self.connected_devices = []
#list of photon device disconnected from the cloud
self.disconnected_devices = []
#list of plants connected to the cloud
self.connected_plants = []
#list of plants disconnected to the cloud
self.disconnected_plants = []
def plant_connectivity_check(self):
"""Performs the connectivity check of the photon devices and the plants
connected to them at a given instance. Groups the connected and
disconnected plants and devices into separate lists"""
for device in self.DEVICE_PLANT_MAPPING:
if self.check_device_connection(device):
self.connected_devices.append(device)
for plant in self.DEVICE_PLANT_MAPPING[device]:
self.connected_plants.append(plant)
else:
self.disconnected_devices.append(device)
for plant in self.DEVICE_PLANT_MAPPING[device]:
self.disconnected_plants.append(plant)
def hourly_monitor(self):
"""Performs the hourly monitoring and readings of the plants"""
#performs connectivity check
self.plant_connectivity_check()
#monitors and reads the plant data connected at that instant
for plant in self.connected_plants:
self.read_plant_data(plant)
#Returns back to the disconnected plants in order to check for the
#current connectivity. Tries for a set number of attempts and then
#breaks, notifying the failure to connect and asking for checking the
#connectivity.
for plant in self.disconnected_plants:
status = self.check_device_connection(self.PLANT_DEVICE_MAPPING[plant]["device_name"])
no_tries = 0
while status == False:
sleep(DEVICE_CONN_WAIT_TIME)
status = self.check_device_connection(self.PLANT_DEVICE_MAPPING[plant]["device_name"])
no_tries += 1
if no_tries > DEVICE_CONN_NO_TRIES:
print "The readings for "+plant+" could not be recorded. Please check the connectivity"
break
if status == True:
self.disconnected_plants.remove(plant)
self.read_plant_data(plant)
def status_check(self):
"""A utility method for checking and printing the status of the
photon devices and plants in the system connected to the cloud.
Used for report generation"""
print("Report Generated at: ", datetime.now())
print("\n")
print("Connected Devices:", self.connected_devices)
print("Disconnected Devices:", self.disconnected_devices)
print("\n")
print("Connected Plants:", self.connected_plants)
print("Disconnected Plants:", self.disconnected_plants)
print("\n\n")
print("Latest values for connected plants:")
print("\n")
connected_plants = self.get_connected_plants()
for plant in connected_plants:
print("Plant Id: ", plant.get_plant_id())
print("pH value: ", plant.get_ph())
print("EC value: ", plant.get_ec())
print("Time for watering: ", plant.get_tfw())
print("Time between watering: ", plant.get_tbw())
print("Growth: ", plant.get_current_growth())
print("Height: ", plant.get_current_height())
print("Latest Cam Shot Time (EST): ", plant.get_cam_shot_time())
print("Last time the plant was heard: ", plant.get_last_time_heard())
print("Is the latest data recorded from device stale? ", plant.get_stale_read_flag())
print("Is the latest cam shot data recorded stale? ", plant.get_stale_cam_flag())
print("Is the latest ph Value recorded abnormal? ", plant.get_abnormal_ph_val_flag())
print("Is the latest EC value recorded abnormal? ", plant.get_abnormal_ec_val_flag())
print("Is the latest time between watering recorded abnormal? ", plant.get_abnormal_tbw_flag())
print("Is the latest time for watering recorded abnormal? ", plant.get_abnormal_tfw_flag())
print("\n------------------------\n")
print("Latest values for disconnected plants:")
disconnected_plants = self.get_disconnected_plants()
for plant in disconnected_plants:
print("Plant Id: ", plant.get_plant_id())
print("pH value: ", plant.get_ph())
print("EC value: ", plant.get_ec())
print("Time for watering: ", plant.get_tfw())
print("Time between watering: ", plant.get_tbw())
print("Growth: ", plant.get_current_growth())
print("Height: ", plant.get_current_height())
print("Latest Cam Shot Time (EST): ", plant.get_cam_shot_time())
print("Last time the plant was heard: ", plant.get_last_time_heard())
print("Is the latest data recorded from device stale? ", plant.get_stale_read_flag())
print("Is the latest cam shot data recorded stale? ", plant.get_stale_cam_flag())
print("Is the latest ph Value recorded abnormal? ", plant.get_abnormal_ph_val_flag())
print("Is the latest EC value recorded abnormal? ", plant.get_abnormal_ec_val_flag())
print("Is the latest time between watering recorded abnormal? ", plant.get_abnormal_tbw_flag())
print("Is the latest time for watering recorded abnormal? ", plant.get_abnormal_tfw_flag())
print("\n------------------------\n")
print("\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
def create_summary_report(self, coll):
"""Method for generating a summarized report of the plants from the
mongodb collections used for storing the plant data."""
print("\nSummary of pH, EC and Growth values for the past 24 hours.\n")
plant_keys = PLANT_DEVICE_MAPPING.keys()
plant_dict = {}
times = []
for plant_id in plant_keys:
print("Plant Id:", plant_id)
current_time = datetime.now()
past24hr = current_time - timedelta(hours=24)
last_observations = coll.find({"plant_id" : plant_id,
"read_time" : {"$gte" : past24hr ,
"$lt" : current_time}})
ph_values = []
ec_values = []
growth_values = []
height_values = []
for entries in last_observations:
#print("Ph Value of the plant", entries["phVal"])
times.append(entries["read_time"])
try:
ph_values.append((float(str(entries["phVal"])), entries["read_time"]))
growth_values.append((float(str(entries["growth"])), entries["read_time"]))
height_values.append((float(str(entries["height"])), entries["read_time"]))
ec_val = float(str(entries["nutrient_conductivity"]))
ec_values.append((ec_val, entries["read_time"]))
except ValueError:
pass
plant_dict[plant_id] = {"phVals": ph_values, "ecVals": ec_values,
"growthVals" : growth_values,
"heightVals": height_values}
#print(plant_dict)
self.get_plant_summary(plant_dict, plant_id, times, coll)
def get_plant_summary(self, plant_dict, plant_id, times, data_collection):
"""Method for getting the plant summary information from the plant
data stored in the mongodb collections"""
ph_vals = [entries[0] for entries in plant_dict[plant_id]["phVals"]]
if ph_vals == []:
print("No pH values were recorded in the given time period")
else:
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for ph between "+str(min_time)+" and "+str(max_time))
ph_variance = variance(ph_vals)
min_ph = min(ph_vals)
max_ph = max(ph_vals)
erratic_ph_vals = [entries[0] for entries in plant_dict[plant_id]["phVals"]
if entries[0] > config.max_ph or entries[0] < config.min_ph]
print("The minimum ph value recorded: "+ str(min_ph))
print("The maximum ph value recorded: "+ str(max_ph))
print("Variance in ph values recorded: "+ str(ph_variance))
print("Unique ph values: "+ str(list(set(ph_vals))))
print("Erratic ph values: "+ str(erratic_ph_vals))
print("\nph values and their respective timings:\n")
for entries in plant_dict[plant_id]["phVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
ec_vals = [entries[0] for entries in plant_dict[plant_id]["ecVals"]]
if ec_vals == []:
print("No EC values were recorded in the given period of time")
else:
ec_variance = variance(ec_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for ec between "+str(min_time)+" and "+str(max_time))
min_ec = min(ec_vals)
max_ec = max(ec_vals)
erratic_ec_vals = [entries[0] for entries in plant_dict[plant_id]["ecVals"]
if entries[0] > config.max_ec or entries[0] < config.min_ec]
print("The minimum ec value recorded: "+ str(min_ec))
print("The maximum ec value recorded: "+ str(max_ec))
print("Variance in ec values recorded: "+ str(ec_variance))
print("Unique ec values: "+ str(list(set(ec_vals))))
print("Erratic ec values: "+ str(erratic_ec_vals))
print("\nec values and their respective timings:\n")
for entries in plant_dict[plant_id]["ecVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
growth_vals = [entries[0] for entries in plant_dict[plant_id]["growthVals"]]
if growth_vals == []:
print("No growth values were recorded in the given period of time")
else:
growth_variance = variance(growth_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for growth between "+str(min_time)+" and "+str(max_time))
min_growth = min(growth_vals)
max_growth = max(growth_vals)
print("The minimum growth value recorded: "+ str(min_growth))
print("The maximum growth value recorded: "+ str(max_growth))
print("Variance in growth values recorded: "+ str(growth_variance))
print("Unique growth values: "+ str(list(set(growth_vals))))
print("\ngrowth values and their respective timings:\n")
for entries in plant_dict[plant_id]["growthVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
height_vals = [entries[0] for entries in plant_dict[plant_id]["heightVals"]]
if height_vals == []:
print("No height values were recorded in the given period of time")
else:
height_variance = variance(height_vals)
min_time = min(times)
max_time = max(times)
print("Summary of recorded readings for height between "+str(min_time)+" and "+str(max_time))
min_height = min(height_vals)
max_height = max(height_vals)
print("The minimum height value recorded: "+ str(min_height))
print("The maximum height value recorded: "+ str(max_height))
print("Variance in height values recorded: "+ str(height_variance))
print("Unique height values: "+ str(list(set(height_vals))))
print("\nheight values and their respective timings:\n")
for entries in plant_dict[plant_id]["heightVals"]:
print(str(entries[1])+" : "+str(entries[0]))
print("\n----------------------\n")
print("\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n")
def read_plant_data(self, plant):
"""Method used for reading all the data points related to a plant
connected to the system"""
plant_obj = self.plants[plant]
plant_device_name = self.PLANT_DEVICE_MAPPING[plant]["device_name"]
plant_device = self.devices[plant_device_name]
#get the time when the plant was last heard
plant_last_heard = to_datetime(self.devices[plant_device_name].last_heard)
plant_obj.set_last_time_heard(plant_last_heard)
#get the current read time
curr_read_time = datetime.now()
plant_obj.set_read_time(curr_read_time)
#get the stale device flag
STALE_PLANT_DATA_FLAG = self.is_data_read_stale(curr_read_time,
plant_last_heard)
plant_obj.set_stale_read_flag(STALE_PLANT_DATA_FLAG)
#get camera data for the plant
camera_data_obj = PlantGrowth(plant)
if proxy == False:
pass
else:
camera_data_obj.set_proxy()
plant_growth_tuple = camera_data_obj.get_growth_data()
#set the camera data for the plant instance
if plant_growth_tuple == None:
growth = None
STALE_CAM_DATA = True
shot_time = None
height = None
last_plant_height = plant_obj.get_current_height()
plant_obj.set_current_height(last_plant_height)
plant_obj.set_hourly_height_diff(0.0)
last_plant_growth = plant_obj.get_current_growth()
plant_obj.set_current_growth(last_plant_growth)
plant_obj.set_hourly_growth_diff(0.0)
plant_obj.set_stale_cam_flag(STALE_CAM_DATA)
plant_obj.set_cam_shot_time(plant_obj.get_cam_shot_time())
else:
growth = plant_growth_tuple[0]
STALE_CAM_DATA = plant_growth_tuple[1]
shot_time = plant_growth_tuple[2]
height = plant_growth_tuple[3]
last_plant_height = plant_obj.get_current_height()
hourly_height_diff = height - last_plant_height
plant_obj.set_current_height(height)
plant_obj.set_hourly_height_diff(hourly_height_diff)
last_plant_growth = plant_obj.get_current_growth()
hourly_growth_diff = growth - last_plant_growth
plant_obj.set_current_growth(growth)
plant_obj.set_hourly_growth_diff(hourly_growth_diff)
plant_obj.set_stale_cam_flag(STALE_CAM_DATA)
plant_obj.set_cam_shot_time(shot_time)
#get the ph readings of the plant
plant_ph_obj = PlantPh(plant, plant_device)
ph_reading = plant_ph_obj.get_ph_reading()
#set the current ph readings of the plant
if ph_reading == None:
plant_obj.set_ph(plant_obj.get_ph())
plant_obj.set_abnormal_ph_val_flag(True)
else:
plant_obj.set_ph(ph_reading[0])
plant_obj.set_abnormal_ph_val_flag(ph_reading[1])
#get the ec readings of the plant
plant_ec_obj = PlantEc(plant, plant_device)
ec_reading = plant_ec_obj.get_ec_reading()
#set the current ph readings of the plant
if ec_reading == None:
plant_obj.set_ec(plant_obj.get_ec())
plant_obj.set_abnormal_ec_val_flag(True)
else:
plant_obj.set_ec(ec_reading[0])
plant_obj.set_abnormal_ec_val_flag(ec_reading[1])
#get the watering readings of the plant
plant_watering_obj = PlantWatering(plant, plant_device)
watering_reading = plant_watering_obj.get_watering_reading()
#set the current ph readings of the plant
if watering_reading == None:
plant_obj.set_tbw(plant_obj.get_tbw())
plant_obj.set_tfw(plant_obj.get_tfw())
plant_obj.set_abnormal_tbw_flag(True)
plant_obj.set_abnormal_tfw_flag(True)
else:
plant_obj.set_tbw(watering_reading[0])
plant_obj.set_tfw(watering_reading[2])
plant_obj.set_abnormal_tbw_flag(watering_reading[1])
plant_obj.set_abnormal_tfw_flag(watering_reading[3])
def check_device_connection(self, device_name):
"""Checks the current connectivity of the given photon device identified
by its name"""
return self.devices[device_name].connected
def get_plants(self):
"""Gets the list of plant objects"""
return self.plants.values()
def get_connected_devices(self):
"""Gets the list of connected photon devices"""
return self.connected_devices
def get_disconnected_devices(self):
"""Gets the list of disconnected photon devices"""
return self.disconnected_devices
def get_connected_plants(self):
"""Gets the list of connected plants"""
return [self.plants[entries] for entries in self.connected_plants]
def get_disconnected_plants(self):
"""Gets the list of disconnected plants"""
return [self.plants[entries] for entries in self.disconnected_plants]
def is_data_read_stale(self, curr_time, last_heard):
"""Method for calculating if the time at which the plant data is read
is too old. This may be due to previously stored values in the photon
device which got disconnected from the cloud and did not update the
recent data"""
time_diff = curr_time - last_heard
time_diff_mins = float(time_diff.seconds) / 60.0
STALE_DATA = False
if time_diff_mins >= DEVICE_STALE_DATA_MINS:
STALE_DATA = True
return STALE_DATA
class PlantGrowth:
"""Class representing the growth data captured by the camera"""
def __init__(self, plant_id):
#plant id for which the growth data needs to be captured
self.plant_id = plant_id
#URL for the camera server
self.GROWTH_URL = GROWTH_URL
#set the proxy flag
self.proxy = False
def set_proxy(self):
"""Sets the proxy flag"""
self.proxy = True
def get_growth_data(self):
"""gets the current growth value of the given plant id"""
payload = {}
payload["plant"] = self.plant_id
base_url = self.GROWTH_URL
if self.proxy:
response = requests.get(base_url, params=payload, proxies=proxies)
else:
response = requests.get(base_url, params=payload)
try:
json_resp = json.loads(response.text)
STALE_CAM_DATA = False
growth = json_resp[0]["camera_output"]
height = json_resp[0]["height"]
shot_time = parser.parse(json_resp[0]["shot_time"]).replace(tzinfo=None)
time_diff = time_diff(shot_time, datetime.now())
time_diff_mins = time_diff[0]
if time_diff_mins >= CAM_STALE_DATA_MINS:
STALE_CAM_DATA = True
return (growth, STALE_CAM_DATA, shot_time, height)
else:
return (growth, STALE_CAM_DATA, shot_time, height)
except:
return None
class PlantPh:
"""Class representing the pH readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the pH reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the pH reading is abnormal
self.abnormal_ph_flag = False
def get_ph_reading(self):
"""Method for reading the pH value of the solution for a particular
plant"""
if self.plant_id[1] == "0":
try:
ph_plant0 = float(self.device.P0_phVal)
if ph_plant0 > max_ph or ph_plant0 < min_ph:
self.abnormal_ph_flag = True
return (ph_plant0, self.abnormal_ph_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
ph_plant1 = float(self.device.P1_phVal)
if ph_plant1 > max_ph or ph_plant1 < min_ph:
self.abnormal_ph_flag = True
return (ph_plant1, self.abnormal_ph_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class PlantWatering:
"""Class representing the watering frequency readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the watering frequency reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the time for watering reading is abnormal
self.abnormal_tfw_flag = False
#Flag indicating whether the time between watering reading is abnormal
self.abnormal_tbw_flag = False
def get_watering_reading(self):
"""Method for reading the watering frequency values for a particular plant"""
if self.plant_id[1] == "0":
try:
tbw_plant0 = int(self.device.P0_TBW)
tfw_plant0 = int(self.device.P0_TFW)
if tbw_plant0 > max_tbw or tbw_plant0 < min_tbw:
self.abnormal_tbw_flag = True
if tfw_plant0 > max_tfw or tfw_plant0 < min_tfw:
self.abnormal_tfw_flag = True
return (tbw_plant0, self.abnormal_tbw_flag, tfw_plant0, self.abnormal_tfw_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
tbw_plant1 = int(self.device.P1_TBW)
tfw_plant1 = int(self.device.P1_TFW)
if tbw_plant1 > max_tbw or tbw_plant1 < min_tbw:
self.abnormal_tbw_flag = True
if tfw_plant1 > max_tfw or tfw_plant1 < min_tfw:
self.abnormal_tfw_flag = True
return (tbw_plant1, self.abnormal_tbw_flag, tfw_plant1, self.abnormal_tfw_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class PlantEc:
"""Class representing the Electrical Conductivity readings for a plant"""
def __init__(self, plant_id, device):
#Id of the plant for which the Electrical Conductivity reading is being recorded
self.plant_id = plant_id
#Device from which the reading is recorded
self.device = device
#Flag indicating whether the EC reading is abnormal
self.abnormal_ec_flag = False
def get_ec_reading(self):
"""Method for reading the Electrical Conductivity values for a particular plant"""
if self.plant_id[1] == "0":
try:
ec_plant0 = float(self.device.P0_nutrientC)
if ec_plant0 > max_ec or ec_plant0 < min_ec:
self.abnormal_ec_flag = True
return (ec_plant0, self.abnormal_ec_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
if self.plant_id[1] == "1":
try:
ec_plant1 = float(self.device.P1_nutrientC)
if ec_plant1 > max_ec or ec_plant1 < min_ec:
self.abnormal_ec_flag = True
return (ec_plant1, self.abnormal_ec_flag)
except TypeError:
print "Not registered variable"
return None
except AttributeError:
print "Not registered variable"
return None
except IOError:
print "Photon not connected"
return None
except:
print "Error from Spark Cloud"
return None
class Report:
"""Class representing the daily reports generated"""
def __init__(self, plant_monitor_obj, data_collection, emails):
#Instance of the plant monitor object containing the recent plant data
self.plant_monitor_obj = plant_monitor_obj
#mongodb collection storing the daily plant data
self.data_collection = data_collection
#list of email ids to which the automated reports needs to be sent
#the list is obtained from the configuration files
self.emails = emails
#file for stroing the report for the day
self.report_file = open("report.txt", "w")
#csv file containing the daily readings
self.csv_file = open("daily_plant_readings.csv", "w")
def generate_report(self):
"""Method for generating the daily report"""
sys.stdout = self.report_file
#self.plant_monitor_obj.status_check()
self.plant_monitor_obj.create_summary_report(self.data_collection)
def generate_csv(self):
"""Method for generating csv files for last 24 hours readings"""
current_time = datetime.now()
past24hr = current_time - timedelta(hours=24)
last_observations = self.data_collection.find({"read_time" :
{"$gte" : past24hr ,
"$lt" : current_time}})
plant_data_list = []
for entries in last_observations:
plant_data_list.append(entries)
keys = plant_data_list[0].keys()
with self.csv_file as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(plant_data_list)
def send_emails(self):
yagmail.register(admin_email, admin_passwd)
yag = yagmail.SMTP(admin_email)
to = self.emails
subject = '24 hour report for hydroponic environment'
body = 'Please find the past 24 hour report generarted from the automated \
hydroponic environment along with the data readings recorded in a csv file. \
Report File -> report.txt \
Data Readings -> daily_plant_readings.csv \
Please note this is an automatically \
generated email. For more information about the project and its \
source code please check the GitHub repository: https://github.com/infyponics/infyponics'
report_doc = 'report.txt'
csv_readings = 'daily_plant_readings.csv'
yag.send(to = to, subject = subject, contents = [body, report_doc,
csv_readings])
|
import json
import urlparse
import datetime
import base64
import uuid
import urllib
from django.http import QueryDict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Activity
from ..views import statements
from oauth2_provider.provider import constants
from oauth2_provider.provider.utils import now as date_now
from oauth2_provider.provider.oauth2.forms import ClientForm
from oauth2_provider.provider.oauth2.models import Client, Grant, AccessToken, RefreshToken
from oauth2_provider.provider.oauth2.backends import BasicClientBackend, RequestParamsClientBackend, AccessTokenBackend
DEFAULT_SCOPE = "%s %s" % (constants.SCOPES[0][1], constants.SCOPES[1][1])
class OAuth2Tests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s-%s" % (__name__, cls.__name__)
def login(self):
if not settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = True
self.client.login(username='test-user-1', password='test')
def auth_url(self):
return reverse('oauth2:capture')
def auth_url2(self):
return reverse('oauth2:authorize')
def redirect_url(self):
return reverse('oauth2:redirect')
def access_token_url(self):
return reverse('oauth2:access_token')
def get_client(self, cid=2):
return Client.objects.get(id=cid)
def get_grant(self):
return Grant.objects.all()[0]
def get_user(self):
return User.objects.get(username='test-user-1')
def get_password(self):
return 'test'
def _login_and_authorize(self, url_func=None, scope=None, cid=2):
if url_func is None:
url_func = lambda: self.auth_url() + '?client_id=%s&response_type=code&state=abc' % self.get_client(cid).client_id
response = self.client.get(url_func())
response = self.client.get(self.auth_url2())
# LRS CHANGE - DON'T HAVE TO SUPPLY SCOPE HERE - SHOULD GET DEFAULTED
if scope:
response = self.client.post(self.auth_url2(), {'authorize': True, 'scope': scope})
else:
response = self.client.post(self.auth_url2(), {'authorize': True})
self.assertEqual(302, response.status_code, response.content)
self.assertTrue(self.redirect_url() in response['Location'])
class AuthorizationTest(OAuth2Tests):
fixtures = ['test_oauth2']
def setUp(self):
self._old_login = settings.LOGIN_URL
settings.LOGIN_URL = '/login/'
def tearDown(self):
settings.LOGIN_URL = self._old_login
def test_authorization_requires_login(self):
response = self.client.get(self.auth_url())
# Login redirect
self.assertEqual(302, response.status_code)
self.assertEqual('/login/', urlparse.urlparse(response['Location']).path)
self.login()
response = self.client.get(self.auth_url())
self.assertEqual(302, response.status_code)
self.assertTrue(self.auth_url2() in response['Location'])
def test_authorization_requires_client_id(self):
self.login()
response = self.client.get(self.auth_url())
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue("An unauthorized client tried to access your resources." in response.content)
def test_authorization_rejects_invalid_client_id(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=123')
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue("An unauthorized client tried to access your resources." in response.content)
def test_authorization_requires_response_type(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=%s' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue(escape(u"No 'response_type' supplied.") in response.content)
def test_authorization_requires_supported_response_type(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=unsupported' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue(escape(u"'unsupported' is not a supported response type.") in response.content)
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
self.assertEqual(200, response.status_code, response.content)
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=token' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
self.assertEqual(200, response.status_code)
def test_authorization_requires_a_valid_redirect_uri(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code&redirect_uri=%s' % (
self.get_client().client_id,
self.get_client().redirect_uri + '-invalid'))
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue(escape(u"The requested redirect didn't match the client settings.") in response.content)
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code&redirect_uri=%s' % (
self.get_client().client_id,
self.get_client().redirect_uri))
response = self.client.get(self.auth_url2())
self.assertEqual(200, response.status_code)
def test_authorization_requires_a_valid_scope(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code&scope=invalid+invalid2' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
self.assertEqual(400, response.status_code)
self.assertTrue(escape(u"'invalid' is not a valid scope.") in response.content)
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code&scope=%s' % (
self.get_client().client_id,
constants.SCOPES[0][1]))
response = self.client.get(self.auth_url2())
self.assertEqual(200, response.status_code)
def test_authorization_is_not_granted(self):
self.login()
response = self.client.get(self.auth_url() + '?client_id=%s&response_type=code' % self.get_client().client_id)
response = self.client.get(self.auth_url2())
response = self.client.post(self.auth_url2(), {'authorize': False, 'scope': constants.SCOPES[0][1]})
self.assertEqual(302, response.status_code, response.content)
self.assertTrue(self.redirect_url() in response['Location'])
response = self.client.get(self.redirect_url())
self.assertEqual(302, response.status_code)
self.assertTrue('error=access_denied' in response['Location'])
self.assertFalse('code' in response['Location'])
def test_authorization_is_granted(self):
self.login()
self._login_and_authorize()
response = self.client.get(self.redirect_url())
self.assertEqual(302, response.status_code)
self.assertFalse('error' in response['Location'])
self.assertTrue('code' in response['Location'])
def test_preserving_the_state_variable(self):
self.login()
self._login_and_authorize()
response = self.client.get(self.redirect_url())
self.assertEqual(302, response.status_code)
self.assertFalse('error' in response['Location'])
self.assertTrue('code' in response['Location'])
self.assertTrue('state=abc' in response['Location'])
def test_redirect_requires_valid_data(self):
self.login()
response = self.client.get(self.redirect_url())
self.assertEqual(400, response.status_code)
class AccessTokenTest(OAuth2Tests):
fixtures = ['test_oauth2.json']
def get_user_auth(self):
return "Basic %s" % base64.b64encode("%s:%s" % ("test-user-1", "test"))
def test_access_token_get_expire_delta_value(self):
user = self.get_user()
client = self.get_client()
token = AccessToken.objects.create(user=user, client=client)
now = date_now()
default_expiration_timedelta = constants.EXPIRE_DELTA
current_expiration_timedelta = datetime.timedelta(seconds=token.get_expire_delta(reference=now))
self.assertTrue(abs(current_expiration_timedelta - default_expiration_timedelta) <= datetime.timedelta(seconds=1))
def test_fetching_access_token_with_invalid_client(self):
self.login()
self._login_and_authorize()
response = self.client.post(self.access_token_url(), {
'grant_type': 'authorization_code',
'client_id': self.get_client().client_id + '123',
'client_secret': self.get_client().client_secret, })
self.assertEqual(400, response.status_code, response.content)
self.assertEqual('invalid_client', json.loads(response.content)['error'])
def test_fetching_access_token_with_invalid_grant(self):
self.login()
self._login_and_authorize()
response = self.client.post(self.access_token_url(), {
'grant_type': 'authorization_code',
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
'code': '123'})
self.assertEqual(400, response.status_code, response.content)
self.assertEqual('invalid_grant', json.loads(response.content)['error'])
def _login_authorize_get_token(self, scope=DEFAULT_SCOPE, cid=2):
required_props = ['access_token', 'token_type']
self.login()
self._login_and_authorize(url_func=None, scope=scope, cid=cid)
response = self.client.get(self.redirect_url())
query = QueryDict(urlparse.urlparse(response['Location']).query)
code = query['code']
response = self.client.post(self.access_token_url(), {
'grant_type': 'authorization_code',
'client_id': self.get_client(cid).client_id,
'client_secret': self.get_client(cid).client_secret,
'code': code})
self.assertEqual(200, response.status_code, response.content)
token = json.loads(response.content)
for prop in required_props:
self.assertIn(prop, token, "Access token response missing "
"required property: %s" % prop)
return token
def test_get_statements_user_submitted(self):
token = self._login_authorize_get_token()
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.get_user_auth(), X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_get = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization="Bearer " + token['access_token'], content_type="application/json")
self.assertEqual(stmt_get.status_code, 200)
stmts = json.loads(stmt_get.content)['statements']
self.assertEqual(len(stmts), 1)
def test_get_statements_oauth_submitted(self):
token = self._login_authorize_get_token()
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_get = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization="Bearer " + token['access_token'], content_type="application/json")
self.assertEqual(stmt_get.status_code, 200)
stmts = json.loads(stmt_get.content)['statements']
self.assertEqual(len(stmts), 1)
def test_get_statements_mix_submitted(self):
token = self._login_authorize_get_token()
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created",
"display": {"en-US":"created"}}, "object": {"id":"act:activity"},
"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}})
response = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.get_user_auth(), X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_get = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization="Bearer " + token['access_token'], content_type="application/json")
self.assertEqual(stmt_get.status_code, 200)
stmts = json.loads(stmt_get.content)['statements']
self.assertEqual(len(stmts), 2)
stmt_get = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.get_user_auth(), content_type="application/json")
self.assertEqual(stmt_get.status_code, 200)
stmts = json.loads(stmt_get.content)['statements']
self.assertEqual(len(stmts), 2)
def test_put_statements(self):
token = self._login_authorize_get_token(scope=constants.SCOPES[0][1])
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"act:test_put"}})
param = {"statementId":put_guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 204)
def test_post_statements(self):
token = self._login_authorize_get_token()
stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}}
stmt_json = json.dumps(stmt)
post = self.client.post('/XAPI/statements/', data=stmt_json, content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 200)
def test_write_statements_wrong_scope(self):
token = self._login_authorize_get_token(scope=constants.SCOPES[2][1])
stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}}
stmt_json = json.dumps(stmt)
post = self.client.post('/XAPI/statements/', data=stmt_json, content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 403)
def test_complex_statement_get(self):
token = self._login_authorize_get_token()
stmt_data = [{"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_complex_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:jane@example.com"}},
{"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}}]
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.get_user_auth(), X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post.status_code, 200)
param = {"activity":"act:test_complex_get"}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
resp = self.client.get(path,Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 200)
stmts = json.loads(resp.content)['statements']
self.assertEqual(len(stmts), 1)
def test_define(self):
stmt = {
"actor":{
"objectType": "Agent",
"mbox":"mailto:t@t.com",
"name":"bob"
},
"verb":{
"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}
},
"object":{
"id":"act:test_define",
'definition': {
'name': {'en-US':'testname'},
'description': {'en-US':'testdesc'},
'type': 'type:course'
}
}
}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt), content_type="application/json",
Authorization=self.get_user_auth(), X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post.status_code, 200)
token = self._login_authorize_get_token()
stmt2 = {
"actor":{
"objectType": "Agent",
"mbox":"mailto:t@t.com",
"name":"bob"
},
"verb":{
"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}
},
"object":{
"id":"act:test_define",
'definition': {
'name': {'en-US':'testname differ'},
'description': {'en-US':'testdesc differ'},
'type': 'type:course'
}
}
}
# Doesn't have define permission - should create another activity with that ID that isn't canonical
stmt_post2 = self.client.post(reverse(statements), json.dumps(stmt2), content_type="application/json",
Authorization="Bearer " + token['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post2.status_code, 200)
acts = Activity.objects.filter(activity_id="act:test_define")
self.assertEqual(len(acts), 2)
stmt_post = self.client.post(reverse(statements), json.dumps(stmt), content_type="application/json",
Authorization=self.get_user_auth(), X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post.status_code, 200)
token2 = self._login_authorize_get_token(scope="%s %s" % (constants.SCOPES[0][1], constants.SCOPES[4][1]), cid=1)
stmt3 = {
"actor":{
"objectType": "Agent",
"mbox":"mailto:t@t.com",
"name":"bob"
},
"verb":{
"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}
},
"object":{
"id":"act:test_define",
'definition': {
'name': {'en-US':'testname i define!'},
'description': {'en-US':'testdesc i define!'},
'type': 'type:course'
}
}
}
# Doesn't have define permission - should create another activity with that ID that isn't canonical
stmt_post3 = self.client.post(reverse(statements), json.dumps(stmt3), content_type="application/json",
Authorization="Bearer " + token2['access_token'], X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post3.status_code, 200)
act_names = Activity.objects.filter(activity_id="act:test_define").values_list('activity_definition_name', flat=True)
act_descs = Activity.objects.filter(activity_id="act:test_define").values_list('activity_definition_description', flat=True)
self.assertEqual(len(act_names), 2)
self.assertEqual(len(act_descs), 2)
self.assertIn('{"en-US":"testname i define!"}', act_names)
self.assertIn('{"en-US":"testdesc i define!"}', act_descs)
def test_fetching_access_token_with_valid_grant(self):
self._login_authorize_get_token()
def test_fetching_access_token_with_invalid_grant_type(self):
self.login()
self._login_and_authorize()
response = self.client.get(self.redirect_url())
query = QueryDict(urlparse.urlparse(response['Location']).query)
code = query['code']
response = self.client.post(self.access_token_url(), {
'grant_type': 'invalid_grant_type',
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
'code': code
})
self.assertEqual(400, response.status_code)
self.assertEqual('unsupported_grant_type', json.loads(response.content)['error'],
response.content)
def test_fetching_single_access_token(self):
constants.SINGLE_ACCESS_TOKEN = True
result1 = self._login_authorize_get_token()
result2 = self._login_authorize_get_token()
self.assertEqual(result1['access_token'], result2['access_token'])
constants.SINGLE_ACCESS_TOKEN = False
def test_fetching_single_access_token_after_refresh(self):
constants.SINGLE_ACCESS_TOKEN = True
token = self._login_authorize_get_token()
self.client.post(self.access_token_url(), {
'grant_type': 'refresh_token',
'refresh_token': token['refresh_token'],
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
})
new_token = self._login_authorize_get_token()
self.assertNotEqual(token['access_token'], new_token['access_token'])
constants.SINGLE_ACCESS_TOKEN = False
def test_fetching_access_token_multiple_times(self):
self._login_authorize_get_token()
code = self.get_grant().code
response = self.client.post(self.access_token_url(), {
'grant_type': 'authorization_code',
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
'code': code})
self.assertEqual(400, response.status_code)
self.assertEqual('invalid_grant', json.loads(response.content)['error'])
# LRS CHANGE - ACCORDING TO OAUTH2 SPEC, SHOULDN'T BE ABLE TO ESCALATE
# THE SCOPE SINCE YOU CAN'T PASS IN SCOPE PARAM TO ACCESS TOKEN ENDPOINT
# def test_escalating_the_scope(self):
# self.login()
# self._login_and_authorize()
# code = self.get_grant().code
# response = self.client.post(self.access_token_url(), {
# 'grant_type': 'authorization_code',
# 'client_id': self.get_client().client_id,
# 'client_secret': self.get_client().client_secret,
# 'code': code,
# 'scope': constants.SCOPES[6][1]})
# self.assertEqual(400, response.status_code)
# self.assertEqual('invalid_scope', json.loads(response.content)['error'])
def test_refreshing_an_access_token(self):
token = self._login_authorize_get_token()
response = self.client.post(self.access_token_url(), {
'grant_type': 'refresh_token',
'refresh_token': token['refresh_token'],
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
})
self.assertEqual(200, response.status_code)
response = self.client.post(self.access_token_url(), {
'grant_type': 'refresh_token',
'refresh_token': token['refresh_token'],
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
})
self.assertEqual(400, response.status_code)
self.assertEqual('invalid_grant', json.loads(response.content)['error'],
response.content)
def test_password_grant_public(self):
c = self.get_client()
c.client_type = 1 # public
c.save()
response = self.client.post(self.access_token_url(), {
'grant_type': 'password',
'client_id': c.client_id,
# No secret needed
'username': self.get_user().username,
'password': self.get_password(),
})
self.assertEqual(200, response.status_code, response.content)
self.assertNotIn('refresh_token', json.loads(response.content))
expires_in = json.loads(response.content)['expires_in']
expires_in_days = round(expires_in / (60.0 * 60.0 * 24.0))
self.assertEqual(expires_in_days, constants.EXPIRE_DELTA_PUBLIC.days)
def test_password_grant_confidential(self):
c = self.get_client()
c.client_type = 0 # confidential
c.save()
response = self.client.post(self.access_token_url(), {
'grant_type': 'password',
'client_id': c.client_id,
'client_secret': c.client_secret,
'username': self.get_user().username,
'password': self.get_password(),
})
self.assertEqual(200, response.status_code, response.content)
self.assertTrue(json.loads(response.content)['refresh_token'])
def test_password_grant_confidential_no_secret(self):
c = self.get_client()
c.client_type = 0 # confidential
c.save()
response = self.client.post(self.access_token_url(), {
'grant_type': 'password',
'client_id': c.client_id,
'username': self.get_user().username,
'password': self.get_password(),
})
self.assertEqual('invalid_client', json.loads(response.content)['error'])
def test_password_grant_invalid_password_public(self):
c = self.get_client()
c.client_type = 1 # public
c.save()
response = self.client.post(self.access_token_url(), {
'grant_type': 'password',
'client_id': c.client_id,
'username': self.get_user().username,
'password': self.get_password() + 'invalid',
})
self.assertEqual(400, response.status_code, response.content)
self.assertEqual('invalid_client', json.loads(response.content)['error'])
def test_password_grant_invalid_password_confidential(self):
c = self.get_client()
c.client_type = 0 # confidential
c.save()
response = self.client.post(self.access_token_url(), {
'grant_type': 'password',
'client_id': c.client_id,
'client_secret': c.client_secret,
'username': self.get_user().username,
'password': self.get_password() + 'invalid',
})
self.assertEqual(400, response.status_code, response.content)
self.assertEqual('invalid_grant', json.loads(response.content)['error'])
def test_access_token_response_valid_token_type(self):
token = self._login_authorize_get_token()
self.assertEqual(token['token_type'], constants.TOKEN_TYPE, token)
class AuthBackendTest(OAuth2Tests):
fixtures = ['test_oauth2']
def test_basic_client_backend(self):
request = type('Request', (object,), {'META': {}})()
request.META['HTTP_AUTHORIZATION'] = "Basic " + "{0}:{1}".format(
self.get_client().client_id,
self.get_client().client_secret).encode('base64')
self.assertEqual(BasicClientBackend().authenticate(request).id,
2, "Didn't return the right client.")
def test_request_params_client_backend(self):
request = type('Request', (object,), {'REQUEST': {}})()
request.REQUEST['client_id'] = self.get_client().client_id
request.REQUEST['client_secret'] = self.get_client().client_secret
self.assertEqual(RequestParamsClientBackend().authenticate(request).id,
2, "Didn't return the right client.'")
def test_access_token_backend(self):
user = self.get_user()
client = self.get_client()
backend = AccessTokenBackend()
token = AccessToken.objects.create(user=user, client=client)
authenticated = backend.authenticate(access_token=token.token,
client=client)
self.assertIsNotNone(authenticated)
class EnforceSecureTest(OAuth2Tests):
fixtures = ['test_oauth2']
def setUp(self):
constants.ENFORCE_SECURE = True
def tearDown(self):
constants.ENFORCE_SECURE = False
def test_authorization_enforces_SSL(self):
self.login()
response = self.client.get(self.auth_url())
self.assertEqual(400, response.status_code)
self.assertTrue("A secure connection is required." in response.content)
def test_access_token_enforces_SSL(self):
response = self.client.post(self.access_token_url(), {})
self.assertEqual(400, response.status_code)
self.assertTrue("A secure connection is required." in response.content)
class ClientFormTest(TestCase):
def test_client_form(self):
form = ClientForm({'name': 'TestName', 'url': 'http://127.0.0.1:8000',
'redirect_uri': 'http://localhost:8000/'})
self.assertFalse(form.is_valid())
form = ClientForm({
'name': 'TestName',
'url': 'http://127.0.0.1:8000',
'redirect_uri': 'http://localhost:8000/',
'client_type': constants.CLIENT_TYPES[0][0]})
self.assertTrue(form.is_valid())
form.save()
class DeleteExpiredTest(OAuth2Tests):
fixtures = ['test_oauth2']
def setUp(self):
self._delete_expired = constants.DELETE_EXPIRED
constants.DELETE_EXPIRED = True
def tearDown(self):
constants.DELETE_EXPIRED = self._delete_expired
def test_clear_expired(self):
self.login()
self._login_and_authorize()
response = self.client.get(self.redirect_url())
self.assertEqual(302, response.status_code)
location = response['Location']
self.assertFalse('error' in location)
self.assertTrue('code' in location)
# verify that Grant with code exists
code = urlparse.parse_qs(location)['code'][0]
self.assertTrue(Grant.objects.filter(code=code).exists())
# use the code/grant
response = self.client.post(self.access_token_url(), {
'grant_type': 'authorization_code',
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
'code': code})
self.assertEquals(200, response.status_code)
token = json.loads(response.content)
self.assertTrue('access_token' in token)
access_token = token['access_token']
self.assertTrue('refresh_token' in token)
refresh_token = token['refresh_token']
# make sure the grant is gone
self.assertFalse(Grant.objects.filter(code=code).exists())
# and verify that the AccessToken and RefreshToken exist
self.assertTrue(AccessToken.objects.filter(token=access_token)
.exists())
self.assertTrue(RefreshToken.objects.filter(token=refresh_token)
.exists())
# refresh the token
response = self.client.post(self.access_token_url(), {
'grant_type': 'refresh_token',
'refresh_token': token['refresh_token'],
'client_id': self.get_client().client_id,
'client_secret': self.get_client().client_secret,
})
self.assertEqual(200, response.status_code)
token = json.loads(response.content)
self.assertTrue('access_token' in token)
self.assertNotEquals(access_token, token['access_token'])
self.assertTrue('refresh_token' in token)
self.assertNotEquals(refresh_token, token['refresh_token'])
# make sure the orig AccessToken and RefreshToken are gone
self.assertFalse(AccessToken.objects.filter(token=access_token)
.exists())
self.assertFalse(RefreshToken.objects.filter(token=refresh_token)
.exists())
|
import time
from tempest_lib import exceptions as lib_exc
from tempest import exceptions
from tempest import config
from tempest.services.volume.json.volumes_client import BaseVolumesClientJSON
from tempest.services.volume.json.snapshots_client import BaseSnapshotsClientJSON
CONF = config.CONF
class CleanBFVResource(BaseVolumesClientJSON, BaseSnapshotsClientJSON):
"""
Client class to clean up the resources created due to boot from volume
feature. This cleanup will delete any snapshot attached to volume from
which VM was booted and then volume itself.
"""
def __init__(self, auth_provider, **kwargs):
BaseVolumesClientJSON.__init__(self, auth_provider,
default_volume_size=CONF.volume.volume_size, **kwargs)
BaseSnapshotsClientJSON.__init__(self, auth_provider, **kwargs)
def _wait_for_snapshot_deletion(self, snapshot_id):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
while True:
try:
self.show_snapshot(snapshot_id)
except lib_exc.NotFound:
return
else:
time.sleep(self.build_interval)
dtime = time.time() - start_time
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for snapshot %s to get deleted.'
% (self.build_timeout, snapshot_id))
raise exceptions.TimeoutException(message)
def _delete_attached_snapshots(self,volume_id):
bfv_snapshots = self.list_snapshots()
for snapshot in bfv_snapshots:
if snapshot['volume_id'] == volume_id:
self.delete_snapshot(snapshot['id'])
self._wait_for_snapshot_deletion(snapshot['id'])
def volume_not_deletable(self, volume):
"""This will return true if either volume does not exist
or in available or error state."""
try:
res = self.show_volume(volume)
if res['status'] not in ['available', 'error']:
return True
except lib_exc.NotFound:
return True
return False
def clean_bfv_resource(self, volumes):
"""First clean snapshots attached to volume if any then delete volume.
"""
for volume in volumes:
if not volume or self.volume_not_deletable(volume):
continue
self._delete_attached_snapshots(volume)
self.delete_volume(volume)
def set_block_device_mapping_args(image_ref, kwargs):
"""
Update the kwargs dictionary with block device mapping.
These arguments are required when booting instance from volume.
"""
if "block_device_mapping_v2" in kwargs.keys() or \
"block_device_mapping" in kwargs.keys():
return kwargs
if 'volume_size' in kwargs:
vol_size = kwargs.pop('volume_size')
else:
vol_size = CONF.volume.volume_size
bv_map = [{
"source_type": "image",
"destination_type": "volume",
"delete_on_termination": "1",
"boot_index": 0,
"uuid": image_ref,
"device_name": "vda",
"volume_size": str(vol_size)}]
bdm_args = {
'block_device_mapping_v2' : bv_map,
}
kwargs.update(bdm_args)
return kwargs
def get_cleanBFV_obj(auth_provider):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(default_params)
return CleanBFVResource(auth_provider, **params)
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zabbix_wechat_db', '0004_temp_closed'),
]
operations = [
migrations.AddField(
model_name='group',
name='AGENTID',
field=models.CharField(default='', max_length=64),
),
]
|
"""
Pelix remote services: XML-RPC implementation
Based on standard package xmlrpclib
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
try:
# Python 3
# pylint: disable=F0401
from xmlrpc.server import SimpleXMLRPCDispatcher
import xmlrpc.client as xmlrpclib
except ImportError:
# Python 2
# pylint: disable=F0401
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
import xmlrpclib
from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \
Invalidate, Property, Provides
from pelix.utilities import to_str
import pelix.http
import pelix.remote
import pelix.remote.transport.commons as commons
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
XMLRPC_CONFIGURATION = 'xmlrpc'
""" Remote Service configuration constant """
PROP_XMLRPC_URL = '{0}.url'.format(XMLRPC_CONFIGURATION)
""" XML-RPC servlet URL """
_logger = logging.getLogger(__name__)
class _XmlRpcServlet(SimpleXMLRPCDispatcher):
"""
A XML-RPC servlet that can be registered in the Pelix HTTP service
Calls the dispatch method given in the constructor
"""
def __init__(self, dispatch_method, encoding=None):
"""
Sets up the servlet
"""
SimpleXMLRPCDispatcher.__init__(self, allow_none=True,
encoding=encoding)
# Register the system.* functions
self.register_introspection_functions()
# Make a link to the dispatch method
self._dispatch_method = dispatch_method
def _simple_dispatch(self, name, params):
"""
Dispatch method
"""
try:
# Internal method
return self.funcs[name](*params)
except KeyError:
# Other method
pass
# Call the other method outside the except block, to avoid messy logs
# in case of error
return self._dispatch_method(name, params)
def do_POST(self, request, response):
"""
Handles a HTTP POST request
:param request: The HTTP request bean
:param request: The HTTP response handler
"""
# Get the request content
data = to_str(request.read_data())
# Dispatch
result = self._marshaled_dispatch(data, self._simple_dispatch)
# Send the result
response.send_content(200, result, 'text/xml')
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_XMLRPC_EXPORTER)
@Provides(pelix.remote.SERVICE_EXPORT_PROVIDER)
@Requires('_http', pelix.http.HTTP_SERVICE)
@Property('_path', pelix.http.HTTP_SERVLET_PATH, '/XML-RPC')
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(XMLRPC_CONFIGURATION,))
class XmlRpcServiceExporter(commons.AbstractRpcServiceExporter):
"""
XML-RPC Remote Services exporter
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(XmlRpcServiceExporter, self).__init__()
# Handled configurations
self._kinds = None
# HTTP Service
self._http = None
self._path = None
# XML-RPC servlet
self._servlet = None
def get_access(self):
"""
Retrieves the URL to access this component
"""
port = self._http.get_access()[1]
return "http{2}://{{server}}:{0}{1}".format(
port, self._path, "s" if self._http.is_https() else "")
def make_endpoint_properties(self, svc_ref, name, fw_uid):
"""
Prepare properties for the ExportEndpoint to be created
:param svc_ref: Service reference
:param name: Endpoint name
:param fw_uid: Framework UID
:return: A dictionary of extra endpoint properties
"""
return {PROP_XMLRPC_URL: self.get_access()}
@Validate
def validate(self, context):
"""
Component validated
"""
# Call parent
super(XmlRpcServiceExporter, self).validate(context)
# Create/register the servlet
self._servlet = _XmlRpcServlet(self.dispatch)
self._http.register_servlet(self._path, self._servlet)
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Unregister the servlet
self._http.unregister(None, self._servlet)
# Call parent
super(XmlRpcServiceExporter, self).invalidate(context)
# Clean up members
self._servlet = None
class _ServiceCallProxy(object):
"""
Service call proxy
"""
def __init__(self, name, url):
"""
Sets up the call proxy
:param name: End point name
:param url: End point URL
"""
self.__name = name
self.__url = url
def __getattr__(self, name):
"""
Prefixes the requested attribute name by the endpoint name
"""
# Make a proxy for this call
# This is an ugly trick to handle multithreaded calls, as the
# underlying proxy re-uses the same connection when possible: sometimes
# it means sending a request before retrieving a result
proxy = xmlrpclib.ServerProxy(self.__url, allow_none=True)
return getattr(proxy, "{0}.{1}".format(self.__name, name))
@ComponentFactory(pelix.remote.FACTORY_TRANSPORT_XMLRPC_IMPORTER)
@Provides(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER)
@Property('_kinds', pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED,
(XMLRPC_CONFIGURATION,))
class XmlRpcServiceImporter(commons.AbstractRpcServiceImporter):
"""
XML-RPC Remote Services importer
"""
def __init__(self):
"""
Sets up the exporter
"""
# Call parent
super(XmlRpcServiceImporter, self).__init__()
# Component properties
self._kinds = None
def make_service_proxy(self, endpoint):
"""
Creates the proxy for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
:return: A service proxy
"""
# Get the access URL
access_url = endpoint.properties.get(PROP_XMLRPC_URL)
if not access_url:
# No URL information
_logger.warning("No access URL given: %s", endpoint)
return
if endpoint.server is not None:
# Server information given
access_url = access_url.format(server=endpoint.server)
else:
# Use the local IP as the source server, just in case
local_server = "localhost"
access_url = access_url.format(server=local_server)
# Return the proxy
return _ServiceCallProxy(endpoint.name, access_url)
def clear_service_proxy(self, endpoint):
"""
Destroys the proxy made for the given ImportEndpoint
:param endpoint: An ImportEndpoint bean
"""
# Nothing to do
return
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class tmtrafficpolicy_tmglobal_binding(base_resource) :
""" Binding class showing the tmglobal that can be bound to tmtrafficpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the traffic policy for which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the traffic policy for which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(tmtrafficpolicy_tmglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tmtrafficpolicy_tmglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch tmtrafficpolicy_tmglobal_binding resources.
"""
try :
obj = tmtrafficpolicy_tmglobal_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of tmtrafficpolicy_tmglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy_tmglobal_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count tmtrafficpolicy_tmglobal_binding resources configued on NetScaler.
"""
try :
obj = tmtrafficpolicy_tmglobal_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of tmtrafficpolicy_tmglobal_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = tmtrafficpolicy_tmglobal_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class tmtrafficpolicy_tmglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.tmtrafficpolicy_tmglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tmtrafficpolicy_tmglobal_binding = [tmtrafficpolicy_tmglobal_binding() for _ in range(length)]
|
"""
Task resources for the Barbican API.
"""
import abc
import six
from barbican import api
from barbican.common import utils
from barbican import i18n as u
from barbican.model import models
from barbican.model import repositories as rep
from barbican.plugin import resources as plugin
from barbican.tasks import certificate_resources as cert
LOG = utils.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseTask(object):
"""Base asynchronous task."""
@abc.abstractmethod
def get_name(self):
"""Localized task name
A hook method to return a short localized name for this task.
The returned name in the form 'u.('Verb Noun')'. For example:
u._('Create Secret')
"""
def process(self, *args, **kwargs):
"""A template method for all asynchronous tasks.
This method should not be overridden by sub-classes. Rather the
abstract methods below should be overridden.
:param args: List of arguments passed in from the client.
:param kwargs: Dict of arguments passed in from the client.
:return: None
"""
name = self.get_name()
# Retrieve the target entity (such as an models.Order instance).
try:
entity = self.retrieve_entity(*args, **kwargs)
except Exception as e:
# Serious error!
LOG.exception(u._LE("Could not retrieve information needed to "
"process task '%s'."), name)
raise e
# Process the target entity.
try:
self.handle_processing(entity, *args, **kwargs)
except Exception as e_orig:
LOG.exception(u._LE("Could not perform processing for "
"task '%s'."), name)
# Handle failure to process entity.
try:
status, message = api.generate_safe_exception_message(name,
e_orig)
self.handle_error(entity, status, message, e_orig,
*args, **kwargs)
except Exception:
LOG.exception(u._LE("Problem handling an error for task '%s', "
"raising original "
"exception."), name)
raise e_orig
# Handle successful conclusion of processing.
try:
self.handle_success(entity, *args, **kwargs)
except Exception as e:
LOG.exception(u._LE("Could not process after successfully "
"executing task '%s'."), name)
raise e
@abc.abstractmethod
def retrieve_entity(self, *args, **kwargs):
"""A hook method to retrieve an entity for processing.
:param args: List of arguments passed in from the client.
:param kwargs: Dict of arguments passed in from the client.
:return: Entity instance to process in subsequent hook methods.
"""
@abc.abstractmethod
def handle_processing(self, entity, *args, **kwargs):
"""A hook method to handle processing on behalf of an entity.
:param args: List of arguments passed in from the client.
:param kwargs: Dict of arguments passed in from the client.
:return: None
"""
@abc.abstractmethod
def handle_error(self, entity, status, message, exception,
*args, **kwargs):
"""A hook method to deal with errors seen during processing.
This method could be used to mark entity as being in error, and/or
to record an error cause.
:param entity: Entity retrieved from _retrieve_entity() above.
:param status: Status code for exception.
:param message: Reason/message for the exception.
:param exception: Exception raised from handle_processing() above.
:param args: List of arguments passed in from the client.
:param kwargs: Dict of arguments passed in from the client.
:return: None
"""
@abc.abstractmethod
def handle_success(self, entity, *args, **kwargs):
"""A hook method to post-process after successful entity processing.
This method could be used to mark entity as being active, or to
add information/references to the entity.
:param entity: Entity retrieved from _retrieve_entity() above.
:param args: List of arguments passed in from the client.
:param kwargs: Dict of arguments passed in from the client.
:return: None
"""
class BeginTypeOrder(BaseTask):
"""Handles beginning processing of a TypeOrder."""
def get_name(self):
return u._('Process TypeOrder')
def __init__(self, project_repo=None, order_repo=None,
secret_repo=None, project_secret_repo=None, datum_repo=None,
kek_repo=None, container_repo=None,
container_secret_repo=None, secret_meta_repo=None,
order_plugin_meta_repo=None):
LOG.debug('Creating BeginTypeOrder task processor')
self.repos = rep.Repositories(
project_repo=project_repo,
project_secret_repo=project_secret_repo,
secret_repo=secret_repo,
datum_repo=datum_repo,
kek_repo=kek_repo,
secret_meta_repo=secret_meta_repo,
order_repo=order_repo,
order_plugin_meta_repo=order_plugin_meta_repo,
container_repo=container_repo,
container_secret_repo=container_secret_repo)
def retrieve_entity(self, order_id, external_project_id):
return self.repos.order_repo.get(
entity_id=order_id,
external_project_id=external_project_id)
def handle_processing(self, order, *args, **kwargs):
self.handle_order(order)
def handle_error(self, order, status, message, exception,
*args, **kwargs):
order.status = models.States.ERROR
order.error_status_code = status
order.error_reason = message
self.repos.order_repo.save(order)
def handle_success(self, order, *args, **kwargs):
if models.OrderType.CERTIFICATE != order.type:
order.status = models.States.ACTIVE
else:
# TODO(alee-3): enable the code below when sub status is added
# if cert.ORDER_STATUS_CERT_GENERATED.id == order.sub_status:
# order.status = models.States.ACTIVE
order.status = models.States.ACTIVE
self.repos.order_repo.save(order)
def handle_order(self, order):
"""Handle secret creation using meta info.
If type is key
create secret
if type is asymmetric
create secrets
create containers
if type is certificate
TBD
:param order: Order to process.
"""
order_info = order.to_dict_fields()
order_type = order_info.get('type')
meta_info = order_info.get('meta')
# Retrieve the project.
project = self.repos.project_repo.get(order.project_id)
if order_type == models.OrderType.KEY:
# Create Secret
new_secret = plugin.generate_secret(
meta_info,
meta_info.get('payload_content_type',
'application/octet-stream'),
project,
self.repos
)
order.secret_id = new_secret.id
LOG.debug("...done creating keys order's secret.")
elif order_type == models.OrderType.ASYMMETRIC:
# Create asymmetric Secret
new_container = plugin.generate_asymmetric_secret(
meta_info,
meta_info.get('payload_content_type',
'application/octet-stream'),
project, self.repos)
order.container_id = new_container.id
LOG.debug("...done creating asymmetric order's secret.")
elif order_type == models.OrderType.CERTIFICATE:
# Request a certificate
new_container = cert.issue_certificate_request(
order, project, self.repos)
if new_container:
order.container_id = new_container.id
LOG.debug("...done requesting a certificate.")
else:
raise NotImplementedError(
u._('Order type "{order_type}" not implemented.').format(
order_type=order_type))
class UpdateOrder(BaseTask):
"""Handles updating an order."""
def get_name(self):
return u._('Update Order')
def __init__(self, project_repo=None, order_repo=None,
secret_repo=None, project_secret_repo=None, datum_repo=None,
kek_repo=None, container_repo=None,
container_secret_repo=None, secret_meta_repo=None):
LOG.debug('Creating UpdateOrder task processor')
self.repos = rep.Repositories(
project_repo=project_repo,
order_repo=order_repo,
secret_repo=secret_repo,
project_secret_repo=project_secret_repo,
datum_repo=datum_repo,
kek_repo=kek_repo,
container_repo=container_repo,
container_secret_repo=container_secret_repo,
secret_meta_repo=secret_meta_repo
)
def retrieve_entity(self, order_id, external_project_id, updated_meta):
return self.repos.order_repo.get(
entity_id=order_id,
external_project_id=external_project_id)
def handle_processing(self, order, order_id, keystone_id, updated_meta):
self.handle_order(order, updated_meta)
def handle_error(self, order, status, message, exception,
*args, **kwargs):
order.status = models.States.ERROR
order.error_status_code = status
order.error_reason = message
LOG.exception(u._LE("An error has occurred updating the order."))
self.repos.order_repo.save(order)
def handle_success(self, order, *args, **kwargs):
# TODO(chellygel): Handle sub-status on a pending order.
order.status = models.States.ACTIVE
self.repos.order_repo.save(order)
def handle_order(self, order, updated_meta):
"""Handle Order Update
:param order: Order to update.
"""
order_info = order.to_dict_fields()
order_type = order_info.get('type')
if order_type == models.OrderType.CERTIFICATE:
# Update a certificate request
cert.modify_certificate_request(order, updated_meta, self.repos)
LOG.debug("...done updating a certificate order.")
else:
raise NotImplementedError(
u._('Order type "{order_type}" not implemented.').format(
order_type=order_type))
LOG.debug("...done updating order.")
|
import ConfigParser
config = ConfigParser.RawConfigParser()
config.add_section('ServerConfig')
config.set('ServerConfig', 'BLOCK_SIZE', 2048)
config.set('ServerConfig', 'ROOT_DIR', '/tmp/')
config.set('ServerConfig', 'FRIENDS', 'http://example.com/,http://example.ca/')
config.set('ServerConfig', 'HOST_IP', '0.0.0.0')
config.set('ServerConfig', 'HOST_PORT', '8051')
config.set('ServerConfig', 'THREAD_NUM', '10')
with open('annelia.conf.example', 'wb') as configfile:
config.write(configfile)
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SuccessMessageBucket'
db.create_table('form_builder_successmessagebucket', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('form_builder', ['SuccessMessageBucket'])
# Adding model 'FormMeta'
db.create_table('form_builder_formmeta', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('form_builder', ['FormMeta'])
# Adding model 'SuccessHandlers'
db.create_table('form_builder_successhandlers', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('form_builder', ['SuccessHandlers'])
# Adding model 'FormBody'
db.create_table('form_builder_formbody', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('form_builder', ['FormBody'])
def backwards(self, orm):
# Deleting model 'SuccessMessageBucket'
db.delete_table('form_builder_successmessagebucket')
# Deleting model 'FormMeta'
db.delete_table('form_builder_formmeta')
# Deleting model 'SuccessHandlers'
db.delete_table('form_builder_successhandlers')
# Deleting model 'FormBody'
db.delete_table('form_builder_formbody')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'form_builder.choicefield': {
'Meta': {'object_name': 'ChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.emailsuccesshandler': {
'Meta': {'object_name': 'EmailSuccessHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
'form_builder.emailuserhandler': {
'Meta': {'object_name': 'EmailUserHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'to': "orm['widgy.Node']"})
},
'form_builder.form': {
'Meta': {'object_name': 'Form'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u'Untitled form 8'", 'max_length': '255'})
},
'form_builder.formbody': {
'Meta': {'object_name': 'FormBody'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.forminput': {
'Meta': {'object_name': 'FormInput'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'form_builder.formmeta': {
'Meta': {'object_name': 'FormMeta'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.formsubmission': {
'Meta': {'object_name': 'FormSubmission'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 22, 0, 0)'}),
'form_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'form_node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'form_submissions'", 'on_delete': 'models.PROTECT', 'to': "orm['widgy.Node']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'form_builder.formvalue': {
'Meta': {'object_name': 'FormValue'},
'field_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'field_node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'values'", 'to': "orm['form_builder.FormSubmission']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'form_builder.multiplechoicefield': {
'Meta': {'object_name': 'MultipleChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.savedatahandler': {
'Meta': {'object_name': 'SaveDataHandler'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.submitbutton': {
'Meta': {'object_name': 'SubmitButton'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "u'submit'", 'max_length': '255'})
},
'form_builder.successhandlers': {
'Meta': {'object_name': 'SuccessHandlers'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.successmessagebucket': {
'Meta': {'object_name': 'SuccessMessageBucket'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.textarea': {
'Meta': {'object_name': 'Textarea'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'form_builder.uncaptcha': {
'Meta': {'object_name': 'Uncaptcha'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'widgy.node': {
'Meta': {'object_name': 'Node'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['form_builder']
|
"""Saving/loading utilities for models created with the KFAC Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from absl import logging
from tensorflow.python.keras.saving import hdf5_format
import tensorflow.compat.v1 as tf
from kfac.python.keras import optimizers
try:
import h5py # pylint: disable=g-import-not-at-top
except ImportError:
h5py = None
def _compile_args_from_training_config(training_config, custom_objects=None):
"""Return model.compile arguments from training config."""
if custom_objects is None:
custom_objects = {}
optimizer_config = training_config['optimizer_config']
optimizer = tf.keras.optimizers.deserialize(
optimizer_config, custom_objects=custom_objects)
# Recover loss functions and metrics.
loss_config = training_config['loss'] # Deserialize loss class.
if isinstance(loss_config, dict) and 'class_name' in loss_config:
loss_config = tf.keras.losses.get(loss_config)
loss = tf.nest.map_structure(
lambda obj: custom_objects.get(obj, obj), loss_config)
metrics = tf.nest.map_structure(
lambda obj: custom_objects.get(obj, obj), training_config['metrics'])
weighted_metrics = tf.nest.map_structure(
lambda obj: custom_objects.get(obj, obj),
training_config.get('weighted_metrics', None))
sample_weight_mode = training_config['sample_weight_mode']
loss_weights = training_config['loss_weights']
return dict(optimizer=optimizer,
loss=loss,
metrics=metrics,
weighted_metrics=weighted_metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
def load_model(filepath, custom_objects=None, optimizer_name=None):
"""Loads and compiles a Keras model saved as an HDF5 file.
Same as tf.keras.model.load_model, except it will always compile the model
and instantiate the Kfac optimizer correctly. If you do not want the model to
be compiled, or saved without the optimizer, use tf.keras.models.load_model
instead.
Example:
```python:
import tensorflow as tf
import kfac
model = tf.keras.Model(...)
loss = tf.keras.losses.MSE() # could be a serialized loss function
optimizer = kfac.keras.optimizers.Kfac(0.001, 0.01, model=model, loss=loss)
model.compile(optimizer, loss)
model.fit(...)
model.save('saved_model.hdf5') # or use tf.keras.models.save_model
...
loaded_model = kfac.keras.saving_utils.load_model('saved_model.hdf5')
loaded_model.fit(...)
```
Args:
filepath: One of the following:
- String, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization. Kfac will
be added to this dictionary automatically.
optimizer_name: Optional string that specifies what variable scope you want
the KFAC variables to be created in. Useful if you have multiple KFAC
optimizers on one graph.
Raises:
ImportError: If h5py was not imported.
Returns:
A compiled Keras model with the Kfac optimizer correctly initialized.
"""
if h5py is None:
raise ImportError('`load_model` requires h5py.')
if not custom_objects:
custom_objects = {}
custom_objects['Kfac'] = optimizers.Kfac
should_open_file = not isinstance(filepath, h5py.File)
model_file = h5py.File(filepath, mode='r') if should_open_file else filepath
model = tf.keras.models.load_model(
model_file, custom_objects=custom_objects, compile=False)
# Code below is current as of 2019-06-20 and may break due to future changes.
# github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/saving/hdf5_format.py
try:
training_config = model_file.attrs.get('training_config')
if hasattr(training_config, 'decode'):
training_config = training_config.decode('utf-8')
if training_config is None:
raise ValueError('No training configuration found in save file, meaning '
'the model was not compiled. Please use '
'tf.keras.models.load_model instead.')
training_config = json.loads(training_config)
model.compile(**_compile_args_from_training_config(training_config,
custom_objects))
model.optimizer.register_layers(model)
if optimizer_name:
model.optimizer.name = optimizer_name
if 'optimizer_weights' in model_file:
# Build train function (to get weight updates).
# Models that aren't graph networks must wait until they are called
# with data to _make_train_function() and so can't load optimizer
# weights.
model._make_train_function() # pylint: disable=protected-access
opt_weight_vals = hdf5_format.load_optimizer_weights_from_hdf5_group(
model_file)
try:
model.optimizer.set_weights(opt_weight_vals)
except ValueError:
logging.warn('Error in loading the saved optimizer state. As a '
'result, your model is starting with a freshly '
'initialized optimizer.')
finally:
if should_open_file:
model_file.close()
return model
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from builtins import object, str, zip
from collections import defaultdict, deque
from contextlib import contextmanager
from os.path import dirname
from future.utils import iteritems
from twitter.common.collections import OrderedSet
from pants.base.exceptions import TargetDefinitionException
from pants.base.parse_context import ParseContext
from pants.base.specs import AscendantAddresses, DescendantAddresses, SingleAddress, Specs
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.app_base import AppBase, Bundle
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_graph import BuildGraph
from pants.build_graph.remote_sources import RemoteSources
from pants.engine.addressable import BuildFileAddresses
from pants.engine.fs import PathGlobs, Snapshot
from pants.engine.legacy.address_mapper import LegacyAddressMapper
from pants.engine.legacy.structs import BundleAdaptor, BundlesField, HydrateableField, SourcesField
from pants.engine.mapper import AddressMapper
from pants.engine.objects import Collection
from pants.engine.parser import HydratedStruct
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Get
from pants.option.global_options import GlobMatchErrorBehavior
from pants.source.filespec import any_matches_filespec
from pants.source.wrapped_globs import EagerFilesetWithSpec, FilesetRelPathWrapper
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
def target_types_from_build_file_aliases(aliases):
"""Given BuildFileAliases, return the concrete target types constructed for each alias."""
target_types = dict(aliases.target_types)
for alias, factory in aliases.target_macro_factories.items():
target_type, = factory.target_types
target_types[alias] = target_type
return target_types
class _DestWrapper(datatype(['target_types'])):
"""A wrapper for dest field of RemoteSources target.
This is only used when instantiating RemoteSources target.
"""
class LegacyBuildGraph(BuildGraph):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
This implementation is backed by a Scheduler that is able to resolve TransitiveHydratedTargets.
"""
@classmethod
def create(cls, scheduler, build_file_aliases):
"""Construct a graph given a Scheduler and BuildFileAliases."""
return cls(scheduler, target_types_from_build_file_aliases(build_file_aliases))
def __init__(self, scheduler, target_types):
"""Construct a graph given a Scheduler, and set of target type aliases.
:param scheduler: A Scheduler that is configured to be able to resolve TransitiveHydratedTargets.
:param target_types: A dict mapping aliases to target types.
"""
self._scheduler = scheduler
self._target_types = target_types
super(LegacyBuildGraph, self).__init__()
def clone_new(self):
"""Returns a new BuildGraph instance of the same type and with the same __init__ params."""
return LegacyBuildGraph(self._scheduler, self._target_types)
def _index(self, hydrated_targets):
"""Index from the given roots into the storage provided by the base class.
This is an additive operation: any existing connections involving these nodes are preserved.
"""
all_addresses = set()
new_targets = list()
# Index the ProductGraph.
for hydrated_target in hydrated_targets:
target_adaptor = hydrated_target.adaptor
address = target_adaptor.address
all_addresses.add(address)
if address not in self._target_by_address:
new_targets.append(self._index_target(target_adaptor))
# Once the declared dependencies of all targets are indexed, inject their
# additional "traversable_(dependency_)?specs".
deps_to_inject = OrderedSet()
addresses_to_inject = set()
def inject(target, dep_spec, is_dependency):
address = Address.parse(dep_spec, relative_to=target.address.spec_path)
if not any(address == t.address for t in target.dependencies):
addresses_to_inject.add(address)
if is_dependency:
deps_to_inject.add((target.address, address))
self.apply_injectables(new_targets)
for target in new_targets:
for spec in target.compute_dependency_specs(payload=target.payload):
inject(target, spec, is_dependency=True)
for spec in target.compute_injectable_specs(payload=target.payload):
inject(target, spec, is_dependency=False)
# Inject all addresses, then declare injected dependencies.
self.inject_addresses_closure(addresses_to_inject)
for target_address, dep_address in deps_to_inject:
self.inject_dependency(dependent=target_address, dependency=dep_address)
return all_addresses
def _index_target(self, target_adaptor):
"""Instantiate the given TargetAdaptor, index it in the graph, and return a Target."""
# Instantiate the target.
address = target_adaptor.address
target = self._instantiate_target(target_adaptor)
self._target_by_address[address] = target
for dependency in target_adaptor.dependencies:
if dependency in self._target_dependencies_by_address[address]:
raise self.DuplicateAddressError(
'Addresses in dependencies must be unique. '
"'{spec}' is referenced more than once by target '{target}'."
.format(spec=dependency.spec, target=address.spec)
)
# Link its declared dependencies, which will be indexed independently.
self._target_dependencies_by_address[address].add(dependency)
self._target_dependees_by_address[dependency].add(address)
return target
def _instantiate_target(self, target_adaptor):
"""Given a TargetAdaptor struct previously parsed from a BUILD file, instantiate a Target."""
target_cls = self._target_types[target_adaptor.type_alias]
try:
# Pop dependencies, which were already consumed during construction.
kwargs = target_adaptor.kwargs()
kwargs.pop('dependencies')
# Instantiate.
if issubclass(target_cls, AppBase):
return self._instantiate_app(target_cls, kwargs)
elif target_cls is RemoteSources:
return self._instantiate_remote_sources(kwargs)
return target_cls(build_graph=self, **kwargs)
except TargetDefinitionException:
raise
except Exception as e:
raise TargetDefinitionException(
target_adaptor.address,
'Failed to instantiate Target with type {}: {}'.format(target_cls, e))
def _instantiate_app(self, target_cls, kwargs):
"""For App targets, convert BundleAdaptor to BundleProps."""
parse_context = ParseContext(kwargs['address'].spec_path, dict())
bundleprops_factory = Bundle(parse_context)
kwargs['bundles'] = [
bundleprops_factory.create_bundle_props(bundle)
for bundle in kwargs['bundles']
]
return target_cls(build_graph=self, **kwargs)
def _instantiate_remote_sources(self, kwargs):
"""For RemoteSources target, convert "dest" field to its real target type."""
kwargs['dest'] = _DestWrapper((self._target_types[kwargs['dest']],))
return RemoteSources(build_graph=self, **kwargs)
def inject_address_closure(self, address):
self.inject_addresses_closure([address])
def inject_addresses_closure(self, addresses):
addresses = set(addresses) - set(self._target_by_address.keys())
if not addresses:
return
dependencies = tuple(SingleAddress(a.spec_path, a.target_name) for a in addresses)
for _ in self._inject_specs(Specs(dependencies=tuple(dependencies))):
pass
def inject_roots_closure(self, target_roots, fail_fast=None):
for address in self._inject_specs(target_roots.specs):
yield address
def inject_specs_closure(self, specs, fail_fast=None):
# Request loading of these specs.
for address in self._inject_specs(Specs(dependencies=tuple(specs))):
yield address
def resolve_address(self, address):
if not self.contains_address(address):
self.inject_address_closure(address)
return self.get_target(address)
@contextmanager
def _resolve_context(self):
try:
yield
except Exception as e:
raise AddressLookupError(
'Build graph construction failed: {} {}'.format(type(e).__name__, str(e))
)
def _inject_addresses(self, subjects):
"""Injects targets into the graph for each of the given `Address` objects, and then yields them.
TODO: See #5606 about undoing the split between `_inject_addresses` and `_inject_specs`.
"""
logger.debug('Injecting addresses to %s: %s', self, subjects)
with self._resolve_context():
addresses = tuple(subjects)
thts, = self._scheduler.product_request(TransitiveHydratedTargets,
[BuildFileAddresses(addresses)])
self._index(thts.closure)
yielded_addresses = set()
for address in subjects:
if address not in yielded_addresses:
yielded_addresses.add(address)
yield address
def _inject_specs(self, specs):
"""Injects targets into the graph for the given `Specs` object.
Yields the resulting addresses.
"""
if not specs:
return
logger.debug('Injecting specs to %s: %s', self, specs)
with self._resolve_context():
thts, = self._scheduler.product_request(TransitiveHydratedTargets,
[specs])
self._index(thts.closure)
for hydrated_target in thts.roots:
yield hydrated_target.address
class _DependentGraph(object):
"""A graph for walking dependent addresses of TargetAdaptor objects.
This avoids/imitates constructing a v1 BuildGraph object, because that codepath results
in many references held in mutable global state (ie, memory leaks).
The long term goal is to deprecate the `changed` goal in favor of sufficiently good cache
hit rates, such that rather than running:
./pants --changed-parent=master test
...you would always be able to run:
./pants test ::
...and have it complete in a similar amount of time by hitting relevant caches.
"""
@classmethod
def from_iterable(cls, target_types, address_mapper, adaptor_iter):
"""Create a new DependentGraph from an iterable of TargetAdaptor subclasses."""
inst = cls(target_types, address_mapper)
all_valid_addresses = set()
for target_adaptor in adaptor_iter:
inst._inject_target(target_adaptor)
all_valid_addresses.add(target_adaptor.address)
inst._validate(all_valid_addresses)
return inst
def __init__(self, target_types, address_mapper):
# TODO: Dependencies and implicit dependencies are mapped independently, because the latter
# cannot be validated until:
# 1) Subsystems are computed in engine: #5869. Currently instantiating a subsystem to find
# its injectable specs would require options parsing.
# 2) Targets-class Subsystem deps can be expanded in-engine (similar to Fields): #4535,
self._dependent_address_map = defaultdict(set)
self._implicit_dependent_address_map = defaultdict(set)
self._target_types = target_types
self._address_mapper = address_mapper
def _validate(self, all_valid_addresses):
"""Validate that all of the dependencies in the graph exist in the given addresses set."""
for dependency, dependents in iteritems(self._dependent_address_map):
if dependency not in all_valid_addresses:
raise AddressLookupError(
'Dependent graph construction failed: {} did not exist. Was depended on by:\n {}'.format(
dependency.spec,
'\n '.join(d.spec for d in dependents)
)
)
def _inject_target(self, target_adaptor):
"""Inject a target, respecting all sources of dependencies."""
target_cls = self._target_types[target_adaptor.type_alias]
declared_deps = target_adaptor.dependencies
implicit_deps = (Address.parse(s,
relative_to=target_adaptor.address.spec_path,
subproject_roots=self._address_mapper.subproject_roots)
for s in target_cls.compute_dependency_specs(kwargs=target_adaptor.kwargs()))
for dep in declared_deps:
self._dependent_address_map[dep].add(target_adaptor.address)
for dep in implicit_deps:
self._implicit_dependent_address_map[dep].add(target_adaptor.address)
def dependents_of_addresses(self, addresses):
"""Given an iterable of addresses, yield all of those addresses dependents."""
seen = OrderedSet(addresses)
for address in addresses:
seen.update(self._dependent_address_map[address])
seen.update(self._implicit_dependent_address_map[address])
return seen
def transitive_dependents_of_addresses(self, addresses):
"""Given an iterable of addresses, yield all of those addresses dependents, transitively."""
closure = set()
result = []
to_visit = deque(addresses)
while to_visit:
address = to_visit.popleft()
if address in closure:
continue
closure.add(address)
result.append(address)
to_visit.extend(self._dependent_address_map[address])
to_visit.extend(self._implicit_dependent_address_map[address])
return result
class HydratedTarget(datatype(['address', 'adaptor', 'dependencies'])):
"""A wrapper for a fully hydrated TargetAdaptor object.
Transitive graph walks collect ordered sets of TransitiveHydratedTargets which involve a huge amount
of hashing: we implement eq/hash via direct usage of an Address field to speed that up.
"""
@property
def addresses(self):
return self.dependencies
def __hash__(self):
return hash(self.address)
class TransitiveHydratedTarget(datatype([('root', HydratedTarget), 'dependencies'])):
"""A recursive structure wrapping a HydratedTarget root and TransitiveHydratedTarget deps."""
class TransitiveHydratedTargets(datatype(['roots', 'closure'])):
"""A set of HydratedTarget roots, and their transitive, flattened, de-duped closure."""
class HydratedTargets(Collection.of(HydratedTarget)):
"""An intransitive set of HydratedTarget objects."""
class OwnersRequest(datatype([
('sources', tuple),
('include_dependees', str),
])):
"""A request for the owners (and optionally, transitive dependees) of a set of file paths.
TODO: `include_dependees` should become an `enum` of the choices from the
`--changed-include-dependees` global option.
"""
@rule(BuildFileAddresses, [BuildConfiguration, AddressMapper, OwnersRequest])
def find_owners(build_configuration, address_mapper, owners_request):
sources_set = OrderedSet(owners_request.sources)
dirs_set = OrderedSet(dirname(source) for source in sources_set)
# Walk up the buildroot looking for targets that would conceivably claim changed sources.
candidate_specs = tuple(AscendantAddresses(directory=d) for d in dirs_set)
candidate_targets = yield Get(HydratedTargets, Specs(candidate_specs))
# Match the source globs against the expanded candidate targets.
def owns_any_source(legacy_target):
"""Given a `HydratedTarget` instance, check if it owns the given source file."""
target_kwargs = legacy_target.adaptor.kwargs()
# Handle `sources`-declaring targets.
# NB: Deleted files can only be matched against the 'filespec' (ie, `PathGlobs`) for a target,
# so we don't actually call `fileset.matches` here.
# TODO: This matching logic should be implemented using the rust `fs` crate for two reasons:
# 1) having two implementations isn't great
# 2) we're expanding sources via HydratedTarget, but it isn't necessary to do that to match
target_sources = target_kwargs.get('sources', None)
if target_sources and any_matches_filespec(sources_set, target_sources.filespec):
return True
return False
direct_owners = tuple(ht.adaptor.address
for ht in candidate_targets
if LegacyAddressMapper.any_is_declaring_file(ht.adaptor.address, sources_set) or
owns_any_source(ht))
# If the OwnersRequest does not require dependees, then we're done.
if owners_request.include_dependees == 'none':
yield BuildFileAddresses(direct_owners)
else:
# Otherwise: find dependees.
all_addresses = yield Get(BuildFileAddresses, Specs((DescendantAddresses(''),)))
all_structs = yield [Get(HydratedStruct, Address, a.to_address()) for a in all_addresses]
all_structs = [s.value for s in all_structs]
bfa = build_configuration.registered_aliases()
graph = _DependentGraph.from_iterable(target_types_from_build_file_aliases(bfa),
address_mapper,
all_structs)
if owners_request.include_dependees == 'direct':
yield BuildFileAddresses(tuple(graph.dependents_of_addresses(direct_owners)))
else:
assert owners_request.include_dependees == 'transitive'
yield BuildFileAddresses(tuple(graph.transitive_dependents_of_addresses(direct_owners)))
@rule(TransitiveHydratedTargets, [BuildFileAddresses])
def transitive_hydrated_targets(build_file_addresses):
"""Given BuildFileAddresses, kicks off recursion on expansion of TransitiveHydratedTargets.
The TransitiveHydratedTarget struct represents a structure-shared graph, which we walk
and flatten here. The engine memoizes the computation of TransitiveHydratedTarget, so
when multiple TransitiveHydratedTargets objects are being constructed for multiple
roots, their structure will be shared.
"""
transitive_hydrated_targets = yield [Get(TransitiveHydratedTarget, Address, a)
for a in build_file_addresses.addresses]
closure = OrderedSet()
to_visit = deque(transitive_hydrated_targets)
while to_visit:
tht = to_visit.popleft()
if tht.root in closure:
continue
closure.add(tht.root)
to_visit.extend(tht.dependencies)
yield TransitiveHydratedTargets(tuple(tht.root for tht in transitive_hydrated_targets), closure)
@rule(TransitiveHydratedTarget, [HydratedTarget])
def transitive_hydrated_target(root):
dependencies = yield [Get(TransitiveHydratedTarget, Address, d) for d in root.dependencies]
yield TransitiveHydratedTarget(root, dependencies)
@rule(HydratedTargets, [BuildFileAddresses])
def hydrated_targets(build_file_addresses):
"""Requests HydratedTarget instances for BuildFileAddresses."""
targets = yield [Get(HydratedTarget, Address, a) for a in build_file_addresses.addresses]
yield HydratedTargets(targets)
class HydratedField(datatype(['name', 'value'])):
"""A wrapper for a fully constructed replacement kwarg for a HydratedTarget."""
@rule(HydratedTarget, [HydratedStruct])
def hydrate_target(hydrated_struct):
target_adaptor = hydrated_struct.value
"""Construct a HydratedTarget from a TargetAdaptor and hydrated versions of its adapted fields."""
# Hydrate the fields of the adaptor and re-construct it.
hydrated_fields = yield [Get(HydratedField, HydrateableField, fa)
for fa in target_adaptor.field_adaptors]
kwargs = target_adaptor.kwargs()
for field in hydrated_fields:
kwargs[field.name] = field.value
yield HydratedTarget(target_adaptor.address,
type(target_adaptor)(**kwargs),
tuple(target_adaptor.dependencies))
def _eager_fileset_with_spec(spec_path, filespec, snapshot, include_dirs=False):
rel_include_globs = filespec['globs']
relpath_adjusted_filespec = FilesetRelPathWrapper.to_filespec(rel_include_globs, spec_path)
if 'exclude' in filespec:
relpath_adjusted_filespec['exclude'] = [FilesetRelPathWrapper.to_filespec(e['globs'], spec_path)
for e in filespec['exclude']]
return EagerFilesetWithSpec(spec_path,
relpath_adjusted_filespec,
snapshot,
include_dirs=include_dirs)
@rule(HydratedField, [SourcesField, GlobMatchErrorBehavior])
def hydrate_sources(sources_field, glob_match_error_behavior):
"""Given a SourcesField, request a Snapshot for its path_globs and create an EagerFilesetWithSpec.
"""
# TODO(#5864): merge the target's selection of --glob-expansion-failure (which doesn't exist yet)
# with the global default!
path_globs = sources_field.path_globs.copy(glob_match_error_behavior=glob_match_error_behavior)
snapshot = yield Get(Snapshot, PathGlobs, path_globs)
fileset_with_spec = _eager_fileset_with_spec(
sources_field.address.spec_path,
sources_field.filespecs,
snapshot)
sources_field.validate_fn(fileset_with_spec)
yield HydratedField(sources_field.arg, fileset_with_spec)
@rule(HydratedField, [BundlesField, GlobMatchErrorBehavior])
def hydrate_bundles(bundles_field, glob_match_error_behavior):
"""Given a BundlesField, request Snapshots for each of its filesets and create BundleAdaptors."""
path_globs_with_match_errors = [
pg.copy(glob_match_error_behavior=glob_match_error_behavior)
for pg in bundles_field.path_globs_list
]
snapshot_list = yield [Get(Snapshot, PathGlobs, pg) for pg in path_globs_with_match_errors]
spec_path = bundles_field.address.spec_path
bundles = []
zipped = zip(bundles_field.bundles,
bundles_field.filespecs_list,
snapshot_list)
for bundle, filespecs, snapshot in zipped:
rel_spec_path = getattr(bundle, 'rel_path', spec_path)
kwargs = bundle.kwargs()
# NB: We `include_dirs=True` because bundle filesets frequently specify directories in order
# to trigger a (deprecated) default inclusion of their recursive contents. See the related
# deprecation in `pants.backend.jvm.tasks.bundle_create`.
kwargs['fileset'] = _eager_fileset_with_spec(rel_spec_path,
filespecs,
snapshot,
include_dirs=True)
bundles.append(BundleAdaptor(**kwargs))
yield HydratedField('bundles', bundles)
def create_legacy_graph_tasks():
"""Create tasks to recursively parse the legacy graph."""
return [
transitive_hydrated_targets,
transitive_hydrated_target,
hydrated_targets,
hydrate_target,
find_owners,
hydrate_sources,
hydrate_bundles,
RootRule(OwnersRequest),
]
|
"""
Support for Anthem Network Receivers and Processors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.anthemav/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, EVENT_HOMEASSISTANT_STOP, STATE_OFF,
STATE_ON, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['anthemav==1.1.8']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'anthemav'
DEFAULT_PORT = 14999
SUPPORT_ANTHEMAV = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up our socket to the AVR."""
import anthemav
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
device = None
_LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port)
def async_anthemav_update_callback(message):
"""Receive notification from transport that new data exists."""
_LOGGER.info("Received update callback from AVR: %s", message)
hass.async_add_job(device.async_update_ha_state())
avr = yield from anthemav.Connection.create(
host=host, port=port, loop=hass.loop,
update_callback=async_anthemav_update_callback)
device = AnthemAVR(avr, name)
_LOGGER.debug("dump_devicedata: %s", device.dump_avrdata)
_LOGGER.debug("dump_conndata: %s", avr.dump_conndata)
_LOGGER.debug("dump_rawdata: %s", avr.protocol.dump_rawdata)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close)
async_add_entities([device])
class AnthemAVR(MediaPlayerDevice):
"""Entity reading values from Anthem AVR protocol."""
def __init__(self, avr, name):
"""Initialize entity with transport."""
super().__init__()
self.avr = avr
self._name = name
def _lookup(self, propname, dval=None):
return getattr(self.avr.protocol, propname, dval)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANTHEMAV
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return name of device."""
return self._name or self._lookup('model')
@property
def state(self):
"""Return state of power on/off."""
pwrstate = self._lookup('power')
if pwrstate is True:
return STATE_ON
if pwrstate is False:
return STATE_OFF
return STATE_UNKNOWN
@property
def is_volume_muted(self):
"""Return boolean reflecting mute state on device."""
return self._lookup('mute', False)
@property
def volume_level(self):
"""Return volume level from 0 to 1."""
return self._lookup('volume_as_percentage', 0.0)
@property
def media_title(self):
"""Return current input name (closest we have to media title)."""
return self._lookup('input_name', 'No Source')
@property
def app_name(self):
"""Return details about current video and audio stream."""
return self._lookup('video_input_resolution_text', '') + ' ' \
+ self._lookup('audio_input_name', '')
@property
def source(self):
"""Return currently selected input."""
return self._lookup('input_name', "Unknown")
@property
def source_list(self):
"""Return all active, configured inputs."""
return self._lookup('input_list', ["Unknown"])
@asyncio.coroutine
def async_select_source(self, source):
"""Change AVR to the designated source (by name)."""
self._update_avr('input_name', source)
@asyncio.coroutine
def async_turn_off(self):
"""Turn AVR power off."""
self._update_avr('power', False)
@asyncio.coroutine
def async_turn_on(self):
"""Turn AVR power on."""
self._update_avr('power', True)
@asyncio.coroutine
def async_set_volume_level(self, volume):
"""Set AVR volume (0 to 1)."""
self._update_avr('volume_as_percentage', volume)
@asyncio.coroutine
def async_mute_volume(self, mute):
"""Engage AVR mute."""
self._update_avr('mute', mute)
def _update_avr(self, propname, value):
"""Update a property in the AVR."""
_LOGGER.info(
"Sending command to AVR: set %s to %s", propname, str(value))
setattr(self.avr.protocol, propname, value)
@property
def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
return(
'dump_avrdata: '
+ ', '.join('%s: %s' % item for item in attrs.items()))
|
"""Tests for task_set.tasks.language_model."""
from task_set.tasks import family_test_utils
from task_set.tasks import language_model
import tensorflow.compat.v1 as tf
class CharLanguageModelTest(family_test_utils.TaskFamilyTestCase):
def __init__(self, *args, **kwargs):
super(CharLanguageModelTest, self).__init__(
language_model.sample_char_rnn_language_model_family_cfg,
language_model.get_char_language_model_family, *args, **kwargs)
class WordLanguageModelTest(family_test_utils.TaskFamilyTestCase):
def __init__(self, *args, **kwargs):
super(WordLanguageModelTest,
self).__init__(language_model.sample_word_language_model_family_cfg,
language_model.get_word_language_model_family, *args,
**kwargs)
if __name__ == "__main__":
tf.test.main()
|
import compileall
import glob
import os
import re
import subprocess
import shutil
import sys
import zipfile
def run(cmd, exit=True, cwd=None):
print cmd
if subprocess.Popen(cmd.split(), cwd=cwd).wait() != 0:
if exit:
print 'Failed!'
sys.exit(1)
else:
print 'Ignoring failure.'
def find(directory, pattern=None, exclude=None):
print 'Looking for paths in %r matching %r' % (directory, pattern)
matches = []
misses = []
if exclude is None:
exclude = []
directory = os.path.abspath(directory)
for root, dirs, files in os.walk(directory):
for basename in dirs + files:
if basename in exclude:
if basename in dirs:
dirs.remove(basename)
continue
path = os.path.join(root, basename)
if pattern is None or re.search(pattern, path):
matches.append(path)
else:
misses.append(path)
print 'Found %d matches and %d misses' % (len(matches), len(misses))
return matches, misses
def rm(path):
print 'Deleting %r' % path
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
pass
def strip(path):
run('arm-eabi-strip %s' % path)
def zipup(out_path, in_path, top, exclude=None):
zip_file = zipfile.ZipFile(out_path, 'w', compression=zipfile.ZIP_DEFLATED)
for path in find(in_path, exclude=exclude)[0]:
if not os.path.isdir(path):
arcname = path[len(top):].lstrip('/')
print 'Adding %s to %s' % (arcname, out_path)
zip_file.write(path, arcname)
zip_file.close()
gcc_path = subprocess.Popen(['which', 'arm-eabi-gcc'],
stdout=subprocess.PIPE).communicate()[0]
match = re.match(r'(.*)/prebuilt', gcc_path)
if match is None:
print 'Could not find arm-eabi-gcc on your path.'
sys.exit(1)
android_src = match.group(1)
os.environ['ANDROID_SRC'] = android_src
pwd = os.getcwd()
os.chdir('src')
assert os.path.exists('Parser/hostpgen'), 'hostpgen not found'
run('make')
run('make install -k', False)
assert os.path.exists('android'), 'build result not found'
print 'Installing xmppy.'
xmpppy_path = os.path.join(pwd, 'xmpppy', 'xmpp')
compileall.compile_dir(xmpppy_path)
shutil.copytree(xmpppy_path, 'android/python/lib/python2.6/xmpp')
print 'Installing BeautifulSoup.'
beautifulsoup_path = os.path.join(pwd, 'BeautifulSoup')
compileall.compile_dir(beautifulsoup_path)
shutil.copy(os.path.join(beautifulsoup_path, 'BeautifulSoup.pyc'),
'android/python/lib/python2.6/BeautifulSoup.pyc')
print 'Installing gdata.'
gdata_path = os.path.join(pwd, 'gdata')
run('python setup.py build', cwd=gdata_path)
gdata_build_path = os.path.join(gdata_path, 'build')
gdata_result_path = os.path.join(gdata_build_path,
os.listdir(gdata_build_path)[0])
compileall.compile_dir(gdata_result_path)
shutil.copytree(os.path.join(gdata_result_path, 'gdata'),
'android/python/lib/python2.6/gdata')
shutil.copytree(os.path.join(gdata_result_path, 'atom'),
'android/python/lib/python2.6/atom')
print 'Removing unecessary files and directories from installation.'
map(rm, find('android/python/bin', 'python$')[1])
map(rm, find('android', '\.py$')[0])
map(rm, find('android', 'test')[0])
rm('android/python/share')
rm('android/python/include')
rm('android/python/lib/libpython2.6.a')
map(strip, find('android', '\.so$')[0])
strip('android/python/bin/python')
libs_to_remove = [
'compiler',
'config',
'curses',
'distutils',
'hotshot',
'idlelib',
'lib2to3',
'lib-old',
'lib-tk',
'multiprocessing',
'site-packages',
]
for lib in libs_to_remove:
rm('android/python/lib/python2.6/'+lib)
print 'Zipping up standard library.'
libs = os.path.join(pwd, 'src/android/python/lib/python2.6')
zipup('android/python/lib/python26.zip', libs, libs, exclude=['lib-dynload'])
map(rm, find(libs, exclude=['lib-dynload'])[0])
shutil.copy(os.path.join(pwd, 'ase', 'android.py'),
'android/python/lib/python2.6')
print 'Zipping up python interpreter for deployment.'
zipup(os.path.join(pwd, 'python.zip'),
os.path.join(pwd, 'src', 'android', 'python'),
os.path.join(pwd, 'src', 'android'))
print 'Done.'
|
import os
import zipfile
from tensorlayer import logging
from tensorlayer.files.utils import maybe_download_and_extract
__all__ = ['load_matt_mahoney_text8_dataset']
def load_matt_mahoney_text8_dataset(path='data'):
"""Load Matt Mahoney's dataset.
Download a text file from Matt Mahoney's website
if not present, and make sure it's the right size.
Extract the first file enclosed in a zip file as a list of words.
This dataset can be used for Word Embedding.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/mm_test8/``.
Returns
--------
list of str
The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> print('Data size', len(words))
"""
path = os.path.join(path, 'mm_test8')
logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path))
filename = 'text8.zip'
url = 'http://mattmahoney.net/dc/'
maybe_download_and_extract(filename, path, url, expected_bytes=31344016)
with zipfile.ZipFile(os.path.join(path, filename)) as f:
word_list = f.read(f.namelist()[0]).split()
for idx, _ in enumerate(word_list):
word_list[idx] = word_list[idx].decode()
return word_list
|
__all__=['ezRPConfig', 'ezRPConfigNginx', 'ezRPGreenlet', 'ezRPKazoo', 'ezRPKazookeeper', 'ezRPNginx', 'ezRPParser', 'ezRPRegistration', 'ezRPService']
|
import re
import sqlakeyset # type: ignore
from flask import request
from flask_sqlalchemy import Pagination # type: ignore
from sqlakeyset import get_page
from sqlakeyset.columns import OC # type: ignore
from sqlakeyset.results import Paging # type: ignore
from sqlalchemy import func, desc, or_, asc, exc
from sqlalchemy.orm import class_mapper, Mapper
from app.vulnerability.views.details import VulnerabilityDetails
from app.vulnerability.views.vulnerability import VulnerabilityView
from data.database import DEFAULT_DATABASE as db
from data.models import (
Description,
Nvd,
Vulnerability,
RepositoryFileComments,
User,
RepositoryFileMarkers,
)
from data.models.nvd import default_nvd_view_options
from data.models.vulnerability import VulnerabilityState
from lib.utils import parse_pagination_param
def custom_value_from_thing(thing, desc, ocol): # pylint: disable=redefined-outer-name
"""
Replacement function for the sqlakeyset value_from_thing method.
This function supports auto aliasing as is required when sorting by
columns that appear in subqueries.
:param thing:
:param desc:
:param ocol:
:return:
"""
entity = desc["entity"]
expr = desc["expr"]
try:
is_a_table = entity == expr
except exc.ArgumentError:
is_a_table = False
if isinstance(expr, Mapper) and expr.class_ == entity:
# Is a table mapper. Just treat as a table.
is_a_table = True
if is_a_table is True: # is a table
mapper = class_mapper(desc["type"])
base_elem = ocol.element.base_columns
if not base_elem:
base_elem = ocol.element
elif len(base_elem) == 1:
base_elem = next(iter(base_elem))
else:
raise ValueError("Multiple columns")
# If the OCOL is a hybrid attribute it won't belong to one table.
if not hasattr(base_elem, "table"):
raise ValueError
order_column_table = base_elem.table.name
if entity.__table__.name == order_column_table:
prop = mapper.get_property_by_column(base_elem)
return getattr(thing, prop.key)
raise ValueError
# is an attribute
if hasattr(expr, "info"):
mapper = expr.parent
tname = mapper.local_table.description
if ocol.table_name == tname and ocol.name == expr.name:
return thing
raise ValueError
ocol_quoted_full_name = re.sub("[()]", "", ocol.quoted_full_name)
# is an attribute with label
if ocol_quoted_full_name == OC(expr).full_name:
return thing
raise ValueError
class VulncodeDB:
def __init__(self):
self.keyword = None
self.top_contributors = []
# TODO: Look into neabling this once public contributions are enabled.
# self.fetch_top_contributors()
has_annotations_col = Vulnerability.has_annotations
vcdb_entries = db.session.query(Vulnerability, Nvd, has_annotations_col)
vcdb_entries = vcdb_entries.filter(
Vulnerability.state == VulnerabilityState.PUBLISHED
)
vcdb_entries = vcdb_entries.outerjoin(Nvd, Vulnerability.cve_id == Nvd.cve_id)
vcdb_entries = vcdb_entries.options(default_nvd_view_options)
vcdb_entries = vcdb_entries.from_self()
vcdb_entries = vcdb_entries.order_by(
desc(has_annotations_col),
asc(Vulnerability.date_created),
desc(Vulnerability.id),
)
self.vcdb_entries = vcdb_entries
nvd_entries = db.session.query(Nvd)
nvd_entries = nvd_entries.outerjoin(
Vulnerability, Nvd.cve_id == Vulnerability.cve_id
)
nvd_entries = nvd_entries.options(default_nvd_view_options)
nvd_entries = nvd_entries.filter(Vulnerability.cve_id.is_(None))
nvd_entries = nvd_entries.order_by(desc(Nvd.published_date), desc(Nvd.id))
self.nvd_entries = nvd_entries
self.keyword = request.args.get("keyword", None, type=str)
apply_filter = None
if self.keyword:
# TODO: Make the filtering work with fulltext search as well.
if VulnerabilityDetails.is_cve_id(self.keyword):
apply_filter = or_(False, Nvd.cve_id == self.keyword)
elif VulnerabilityDetails.is_vcdb_id(self.keyword):
apply_filter = or_(False, Vulnerability.id == self.keyword)
else:
escaped_keyword = self.keyword.replace("%", "")
# escaped_keyword = re.sub('[\W]+', ' ', self.keyword)
# Attention: We can't use FullText search here because of some
# buggy Mysql 5.7 behavior (using FullText on Join results seems
# is doing bad things. We might need to apply the filter before
# joining below.
# apply_filter = or_(
# FullTextSearch(escaped_keyword, Nvd,
# FullTextMode.BOOLEAN),
# FullTextSearch(escaped_keyword, Vulnerability,
# FullTextMode.BOOLEAN))
apply_filter = or_(
Nvd.descriptions.any(
Description.value.like("%" + escaped_keyword + "%")
),
Vulnerability.comment.like("%" + escaped_keyword + "%"),
)
# TODO: add product search support.
# apply_filter = or_(apply_filter, Cpe.product == keyword)
if apply_filter is not None:
self.vcdb_entries = self.vcdb_entries.filter(apply_filter)
self.nvd_entries = self.nvd_entries.filter(apply_filter)
per_page = 7
vcdb_bookmarked_page = parse_pagination_param("vcdb_p")
# Replace a sqlakeyset function to support our use case.
# TODO: File a PR for this?
sqlakeyset.paging.value_from_thing = custom_value_from_thing
self.vcdb_pagination = get_page(
self.vcdb_entries, per_page, page=vcdb_bookmarked_page
)
self.vcdb_pagination = VulnViewTypesetPaginationObjectWrapper(
self.vcdb_pagination.paging
)
num_vuln_entries = db.session.query(func.count(Vulnerability.id)).scalar()
self.vcdb_pagination.set_total(num_vuln_entries)
nvd_bookmarked_page = parse_pagination_param("nvd_p")
self.nvd_pagination = get_page(
self.nvd_entries, per_page, page=nvd_bookmarked_page
)
self.nvd_pagination = VulnViewTypesetPaginationObjectWrapper(
self.nvd_pagination.paging
)
num_nvd_entries = db.session.query(func.count(Nvd.id)).scalar()
num_unique_nvd_estimate = num_nvd_entries - num_vuln_entries
self.nvd_pagination.set_total(num_unique_nvd_estimate)
def fetch_top_contributors(self):
# TODO: count number of contributions to vulnerabilities instead of
# single annotations
num_comments = self.get_annotation_query(RepositoryFileComments)
num_markers = self.get_annotation_query(RepositoryFileMarkers)
num_both = num_comments.c.count + num_markers.c.count
self.top_contributors = (
db.session.query(
User,
func.coalesce(num_comments.c.count, 0).label("num_comments"),
func.coalesce(num_markers.c.count, 0).label("num_markers"),
)
.outerjoin(num_comments, num_comments.c.creator_id == User.id)
.outerjoin(num_markers, num_markers.c.creator_id == User.id)
.filter(num_both > 0)
.order_by(num_both.desc())
.limit(10)
.all()
)
@staticmethod
def get_annotation_query(model):
return (
db.session.query(
model.creator_id.label("creator_id"), func.count(1).label("count")
)
.filter_by(active=True)
.group_by(model.creator_id)
.subquery()
)
def wrap_entries(target_entries):
"""
Wraps all Vulnerability/Nvd entries into the VulnerabilityView class.
:return:
"""
new_entries = []
if not target_entries:
return
first_element = target_entries[0]
if isinstance(first_element, Nvd):
for nvd in target_entries:
vuln_view = VulnerabilityView(None, nvd, preview=True)
new_entries.append(vuln_view)
elif isinstance(first_element, Vulnerability):
for vulnerability in target_entries:
vuln_view = VulnerabilityView(vulnerability, None, preview=True)
new_entries.append(vuln_view)
else:
for columns in target_entries:
vulnerability = columns[0]
nvd = columns[1]
if len(columns) > 2 and isinstance(columns[2], bool):
annotation_exists = columns[2]
vulnerability.set_has_annotations(annotation_exists)
vuln_view = VulnerabilityView(vulnerability, nvd, preview=True)
new_entries.append(vuln_view)
del target_entries[:]
target_entries.extend(new_entries)
class VulnViewTypesetPaginationObjectWrapper(Paging):
"""
An sqlakeyset Paging object wrapper class which wraps Vuln/Nvd rows
inside a VulnView.
"""
def __init__(self, pagination_object): # pylint: disable=super-init-not-called
"""
:param pagination_object: A Flask SQLalchemy Pagination object.
"""
# hot patching the class instance, therefore no super() call
self.__class__ = type(
pagination_object.__class__.__name__,
(self.__class__, pagination_object.__class__),
{},
)
self.__dict__ = pagination_object.__dict__
self.total = 0
wrap_entries(self.rows)
def set_total(self, new_total):
self.total = new_total
class VulnViewSqlalchemyPaginationObjectWrapper(Pagination):
"""
A Flask SQLAlchemy Pagination object wrapper class which wraps Vuln/Nvd rows
inside a VulnView.
"""
def __init__(self, pagination_object): # pylint: disable=super-init-not-called
"""
:param pagination_object: A Flask SQLalchemy Pagination object.
"""
# hot patching the class instance, therefore no super() call
self.__class__ = type(
pagination_object.__class__.__name__,
(self.__class__, pagination_object.__class__),
{},
)
self.__dict__ = pagination_object.__dict__
wrap_entries(self.items)
|
"""Tests for the CPIO resolver helper implementation."""
import unittest
from dfvfs.resolver import cpio_resolver_helper
from tests.resolver import test_lib
class CPIOResolverHelperTest(test_lib.ResolverHelperTestCase):
"""Tests for the CPIO resolver helper implementation."""
def testNewFileObject(self):
"""Tests the NewFileObject function."""
resolver_helper_object = cpio_resolver_helper.CPIOResolverHelper()
self._TestNewFileObject(resolver_helper_object)
def testNewFileSystem(self):
"""Tests the NewFileSystem function."""
resolver_helper_object = cpio_resolver_helper.CPIOResolverHelper()
self._TestNewFileSystem(resolver_helper_object)
if __name__ == '__main__':
unittest.main()
|
"""
Mail (SMTP) notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.smtp/
"""
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import email.utils
from email.mime.application import MIMEApplication
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_PORT, CONF_TIMEOUT,
CONF_SENDER, CONF_RECIPIENT)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_IMAGES = 'images' # optional embedded image file attachments
CONF_STARTTLS = 'starttls'
CONF_DEBUG = 'debug'
CONF_SERVER = 'server'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 25
DEFAULT_TIMEOUT = 5
DEFAULT_DEBUG = False
DEFAULT_STARTTLS = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RECIPIENT): vol.All(cv.ensure_list, [vol.Email()]),
vol.Optional(CONF_SERVER, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SENDER): vol.Email(),
vol.Optional(CONF_STARTTLS, default=DEFAULT_STARTTLS): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
})
def get_service(hass, config, discovery_info=None):
"""Get the mail notification service."""
mail_service = MailNotificationService(
config.get(CONF_SERVER),
config.get(CONF_PORT),
config.get(CONF_TIMEOUT),
config.get(CONF_SENDER),
config.get(CONF_STARTTLS),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_RECIPIENT),
config.get(CONF_DEBUG))
if mail_service.connection_is_valid():
return mail_service
else:
return None
class MailNotificationService(BaseNotificationService):
"""Implement the notification service for E-Mail messages."""
def __init__(self, server, port, timeout, sender, starttls, username,
password, recipients, debug):
"""Initialize the service."""
self._server = server
self._port = port
self._timeout = timeout
self._sender = sender
self.starttls = starttls
self.username = username
self.password = password
self.recipients = recipients
self.debug = debug
self.tries = 2
def connect(self):
"""Connect/authenticate to SMTP Server."""
mail = smtplib.SMTP(self._server, self._port, timeout=self._timeout)
mail.set_debuglevel(self.debug)
mail.ehlo_or_helo_if_needed()
if self.starttls:
mail.starttls()
mail.ehlo()
if self.username and self.password:
mail.login(self.username, self.password)
return mail
def connection_is_valid(self):
"""Check for valid config, verify connectivity."""
server = None
try:
server = self.connect()
except smtplib.socket.gaierror:
_LOGGER.exception(
"SMTP server not found (%s:%s). "
"Please check the IP address or hostname of your SMTP server",
self._server, self._port)
return False
except (smtplib.SMTPAuthenticationError, ConnectionRefusedError):
_LOGGER.exception(
"Login not possible. "
"Please check your setting and/or your credentials")
return False
finally:
if server:
server.quit()
return True
def send_message(self, message="", **kwargs):
"""
Build and send a message to a user.
Will send plain text normally, or will build a multipart HTML message
with inline image attachments if images config is defined.
"""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
if data:
msg = _build_multipart_msg(message, images=data.get(ATTR_IMAGES))
else:
msg = _build_text_msg(message)
msg['Subject'] = subject
msg['To'] = ','.join(self.recipients)
msg['From'] = self._sender
msg['X-Mailer'] = 'HomeAssistant'
msg['Date'] = email.utils.format_datetime(dt_util.now())
msg['Message-Id'] = email.utils.make_msgid()
return self._send_email(msg)
def _send_email(self, msg):
"""Send the message."""
mail = self.connect()
for _ in range(self.tries):
try:
mail.sendmail(self._sender, self.recipients,
msg.as_string())
break
except smtplib.SMTPException:
_LOGGER.warning(
"SMTPException sending mail: retrying connection")
mail.quit()
mail = self.connect()
mail.quit()
def _build_text_msg(message):
"""Build plaintext email."""
_LOGGER.debug("Building plain text email")
return MIMEText(message)
def _build_multipart_msg(message, images):
"""Build Multipart message with in-line images."""
_LOGGER.debug("Building multipart email with embedded attachment(s)")
msg = MIMEMultipart('related')
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
body_txt = MIMEText(message)
msg_alt.attach(body_txt)
body_text = ['<p>{}</p><br>'.format(message)]
for atch_num, atch_name in enumerate(images):
cid = 'image{}'.format(atch_num)
body_text.append('<img src="cid:{}"><br>'.format(cid))
try:
with open(atch_name, 'rb') as attachment_file:
file_bytes = attachment_file.read()
try:
attachment = MIMEImage(file_bytes)
msg.attach(attachment)
attachment.add_header('Content-ID', '<{}>'.format(cid))
except TypeError:
_LOGGER.warning("Attachment %s has an unkown MIME type. "
"Falling back to file", atch_name)
attachment = MIMEApplication(file_bytes, Name=atch_name)
attachment['Content-Disposition'] = ('attachment; '
'filename="%s"' %
atch_name)
msg.attach(attachment)
except FileNotFoundError:
_LOGGER.warning("Attachment %s not found. Skipping", atch_name)
body_html = MIMEText(''.join(body_text), 'html')
msg_alt.attach(body_html)
return msg
|
import httplib as http
import json
import responses
import time
import urlparse
from nose.tools import * # noqa
from framework.auth import authenticate
from framework.exceptions import PermissionsError, HTTPError
from framework.sessions import get_session
from website.oauth.models import (
ExternalAccount,
ExternalProvider,
OAUTH1,
OAUTH2,
)
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase
from tests.factories import (
AuthUserFactory,
ExternalAccountFactory,
MockOAuth2Provider,
UserFactory,
)
class MockOAuth1Provider(ExternalProvider):
_oauth_version = OAUTH1
name = "Mock OAuth 1.0a Provider"
short_name = "mock1a"
client_id = "mock1a_client_id"
client_secret = "mock1a_client_secret"
auth_url_base = "http://mock1a.com/auth"
request_token_url = "http://mock1a.com/request"
callback_url = "http://mock1a.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
def _prepare_mock_oauth2_handshake_response(expires_in=3600):
responses.add(
responses.POST,
'https://mock2.com/callback',
body=json.dumps({
'access_token': 'mock_access_token',
'expires_at': time.time() + expires_in,
'expires_in': expires_in,
'refresh_token': 'mock_refresh_token',
'scope': ['all'],
'token_type': 'bearer',
}),
status=200,
content_type='application/json',
)
def _prepare_mock_500_error():
responses.add(
responses.POST,
'https://mock2.com/callback',
body='{"error": "not found"}',
status=503,
content_type='application/json',
)
class TestExternalAccount(OsfTestCase):
# Test the ExternalAccount object and associated views.
#
# Functionality not specific to the OAuth version used by the
# ExternalProvider should go here.
def setUp(self):
super(TestExternalAccount, self).setUp()
self.user = AuthUserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalAccount, self).tearDown()
def test_disconnect(self):
# Disconnect an external account from a user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
)
self.user.external_accounts.append(external_account)
self.user.save()
# If the external account isn't attached, this test has no meaning
assert_equal(ExternalAccount.find().count(), 1)
assert_in(
external_account,
self.user.external_accounts,
)
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# external_account.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
def test_disconnect_with_multiple_connected(self):
# Disconnect an account connected to multiple users from one user
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
)
self.user.external_accounts.append(external_account)
self.user.save()
other_user = UserFactory()
other_user.external_accounts.append(external_account)
other_user.save()
response = self.app.delete(
api_url_for('oauth_disconnect',
external_account_id=external_account._id),
auth=self.user.auth
)
# Request succeeded
assert_equal(
response.status_code,
http.OK,
)
self.user.reload()
# External account has been disassociated with the user
assert_not_in(
external_account,
self.user.external_accounts,
)
# External account is still in the database
assert_equal(ExternalAccount.find().count(), 1)
other_user.reload()
# External account is still associated with the other user
assert_in(
external_account,
other_user.external_accounts,
)
class TestExternalProviderOAuth1(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 1.0a
def setUp(self):
super(TestExternalProviderOAuth1, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth1Provider()
def tearDown(self):
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth1, self).tearDown()
@responses.activate
def test_start_flow(self):
# Request temporary credentials from provider, provide auth redirect
responses.add(responses.POST, 'http://mock1a.com/request',
body='{"oauth_token_secret": "temp_secret", '
'"oauth_token": "temp_token", '
'"oauth_callback_confirmed": "true"}',
status=200,
content_type='application/json')
with self.app.app.test_request_context('/oauth/connect/mock1a/'):
# make sure the user is logged in
authenticate(user=self.user, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
# The URL to which the user would be redirected
assert_equal(url, "http://mock1a.com/auth?oauth_token=temp_token")
session = get_session()
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_equal(creds['token'], 'temp_token')
assert_equal(creds['secret'], 'temp_secret')
@responses.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# mock a successful call to the provider to exchange temp keys for
# permanent keys
responses.add(
responses.POST,
'http://mock1a.com/callback',
body=(
'oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true'
),
)
user = UserFactory()
# Fake a request context for the callback
ctx = self.app.app.test_request_context(
path='/oauth/callback/mock1a/',
query_string='oauth_token=temp_key&oauth_verifier=mock_verifier',
)
with ctx:
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'token': 'temp_key',
'secret': 'temp_secret',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'perm_token')
assert_equal(account.oauth_secret, 'perm_secret')
assert_equal(account.provider_id, 'mock_provider_id')
@responses.activate
def test_callback_wrong_user(self):
# Reject temporary credentials not assigned to the user
#
# This prohibits users from associating their external account with
# another user's OSF account by using XSS or similar attack vector to
# complete the OAuth flow using the logged-in user but their own account
# on the external service.
#
# If the OSF were to allow login via OAuth with the provider in question,
# this would allow attackers to hijack OSF accounts with a simple script
# injection.
# mock a successful call to the provider to exchange temp keys for
# permanent keys
responses.add(
responses.POST,
'http://mock1a.com/callback',
body='oauth_token=perm_token'
'&oauth_token_secret=perm_secret'
'&oauth_callback_confirmed=true',
)
user = UserFactory()
account = ExternalAccountFactory(
provider="mock1a",
oauth_key="temp_key",
oauth_secret="temp_secret",
temporary=True
)
account.save()
# associate this ExternalAccount instance with the user
user.external_accounts.append(account)
user.save()
malicious_user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock1a/",
query_string="oauth_token=temp_key&oauth_verifier=mock_verifier"
) as ctx:
# make sure the user is logged in
authenticate(user=malicious_user, response=None)
with assert_raises(PermissionsError):
# do the key exchange
self.provider.auth_callback(user=malicious_user)
class TestExternalProviderOAuth2(OsfTestCase):
# Test functionality of the ExternalProvider class, for OAuth 2.0
def setUp(self):
super(TestExternalProviderOAuth2, self).setUp()
self.user = UserFactory()
self.provider = MockOAuth2Provider()
def tearDown(self):
ExternalAccount._clear_caches()
ExternalAccount.remove()
self.user.remove()
super(TestExternalProviderOAuth2, self).tearDown()
def test_oauth_version_default(self):
# OAuth 2.0 is the default version
assert_is(self.provider._oauth_version, OAUTH2)
def test_start_flow(self):
# Generate the appropriate URL and state token
with self.app.app.test_request_context("/oauth/connect/mock2/") as ctx:
# make sure the user is logged in
authenticate(user=self.user, response=None)
# auth_url is a property method - it calls out to the external
# service to get a temporary key and secret before returning the
# auth url
url = self.provider.auth_url
session = get_session()
# Temporary credentials are added to the session
creds = session.data['oauth_states'][self.provider.short_name]
assert_in('state', creds)
# The URL to which the user would be redirected
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
# check parameters
assert_equal(
params,
{
'state': [creds['state']],
'response_type': ['code'],
'client_id': [self.provider.client_id],
'redirect_uri':[
web_url_for('oauth_callback',
service_name=self.provider.short_name,
_absolute=True)
]
}
)
# check base URL
assert_equal(
url.split("?")[0],
"https://mock2.com/auth",
)
@responses.activate
def test_callback(self):
# Exchange temporary credentials for permanent credentials
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user)
account = ExternalAccount.find_one()
assert_equal(account.oauth_key, 'mock_access_token')
assert_equal(account.provider_id, 'mock_provider_id')
@responses.activate
def test_provider_down(self):
# Create a 500 error
_prepare_mock_500_error()
user = UserFactory()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
):
# make sure the user is logged in
authenticate(user=user, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
with assert_raises(HTTPError) as error_raised:
self.provider.auth_callback(user=user)
assert_equal(
error_raised.exception.code,
503,
)
@responses.activate
def test_multiple_users_associated(self):
# Create only one ExternalAccount for multiple OSF users
#
# For some providers (ex: GitHub), the act of completing the OAuth flow
# revokes previously generated credentials. In addition, there is often no
# way to know the user's id on the external service until after the flow
# has completed.
#
# Having only one ExternalAccount instance per account on the external
# service means that connecting subsequent OSF users to the same external
# account will not invalidate the credentials used by the OSF for users
# already associated.
user_a = UserFactory()
external_account = ExternalAccountFactory(
provider='mock2',
provider_id='mock_provider_id',
)
user_a.external_accounts.append(external_account)
user_a.save()
user_b = UserFactory()
# Mock the exchange of the code for an access token
_prepare_mock_oauth2_handshake_response()
# Fake a request context for the callback
with self.app.app.test_request_context(
path="/oauth/callback/mock2/",
query_string="code=mock_code&state=mock_state"
) as ctx:
# make sure the user is logged in
authenticate(user=user_b, response=None)
session = get_session()
session.data['oauth_states'] = {
self.provider.short_name: {
'state': 'mock_state',
},
}
session.save()
# do the key exchange
self.provider.auth_callback(user=user_b)
user_a.reload()
user_b.reload()
external_account.reload()
assert_equal(
user_a.external_accounts,
user_b.external_accounts,
)
assert_equal(
ExternalAccount.find().count(),
1
)
|
from mhctools import NetMHCpan
predictor = NetMHCpan(alleles=["A*02:01", "hla-a0101"])
protein_sequences = {
"1L2Y": "NLYIQWLKDGGPSSGRPPPS",
"1L3Y": "ECDTINCERYNGQVCGGPGRGLCFCGKCRCHPGFEGSACQA"
}
epitope_collection = predictor.predict(protein_sequences)
df = epitope_collection.dataframe()
strongest_predicted_binder = epitope_collection[0]
print strongest_predicted_binder.source_sequence
|
from pathlib import Path
from random import choice
from taskcat.exceptions import TaskCatException
def generate_name():
path: Path = (Path(__file__).parent / "./cfg/").resolve()
if not (path / "animals.txt").is_file() or not (path / "descriptors.txt").is_file():
raise TaskCatException("cannot find dictionary files")
animals = open(str(path / "animals.txt"), "r").read().split("\n")
descriptors = open(str(path / "descriptors.txt"), "r").read().split("\n")
return choice(descriptors) + "-" + choice(animals) # nosec: B311
|
class Weather:
def __init__(self, weather):
self.temperature = weather[0]
self.conditions = weather[1]
|
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.random_data
@d1_test.d1_test_case.reproducible_random_decorator("TestIdentifier")
class TestIdentifier(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""generate()"""
id_list = [
d1_test.instance_generator.identifier.generate(
d1_test.instance_generator.random_data.random_lower_ascii(), i, i + 5
).toxml("utf-8")
for i in range(10)
]
self.sample.assert_equals(id_list, "inst_gen_identifier")
|
import logging
import posixpath
import sys
import pycurl
import cStringIO
import StringIO
import pdb
try:
import json
except ImportError:
import simplejson as json
from ambari_client.core.http_utils import uri_encoding
__docformat__ = "epytext"
LOG = logging.getLogger(__name__)
class HttpClient(object):
"""
Basic HTTP client for rest APIs.
"""
def __init__(self, host_url, user_name , password ):
"""
@param host_url: The base url to the API.
"""
self._host_url = host_url.rstrip('/')
self._headers = { }
self.c = pycurl.Curl()
if user_name is not None:
self.c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
userpass = user_name + ':'
if password is not None:
userpass += password
LOG.debug( "pycurl.USERPWD value = "+str(userpass))
self.c.setopt(pycurl.USERPWD, userpass)
def set_headers(self, headers):
"""
Add headers to the request
"""
self._headers = headers
return self
@property
def host_url(self):
return self._host_url
def _get_headers(self, headers):
res = self._headers.copy()
if headers:
res.update(headers)
return res
def invoke(self, http_method, path, payload=None, headers=None):
"""
Submit an HTTP request.
@param http_method: GET, POST, PUT, DELETE
@param path: The path of the resource.
@param payload: The payload to attach to the body of the request.
@param headers: The headers to set for this request.
@return: The result of REST request
"""
#pdb.set_trace()
LOG.debug ("invoke : http_method = "+str(http_method))
# Prepare URL and params
url = self._normalize(path)
if http_method in ("GET", "DELETE"):
if payload is not None:
self.logger.warn(
"GET http_method does not pass any payload. Path '%s'" % (path,))
payload = None
buf = cStringIO.StringIO()
self.c.setopt(pycurl.WRITEFUNCTION, buf.write)
self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
LOG.debug ("invoke : url = "+str(url))
# set http_method
if http_method == "GET":
self.c.setopt(pycurl.HTTPGET, 1)
elif http_method == "HEAD":
self.c.setopt(pycurl.HTTPGET, 1)
self.c.setopt(pycurl.NOBODY, 1)
elif http_method == "POST":
self.c.setopt(pycurl.POST, 1)
elif http_method == "PUT":
self.c.setopt(pycurl.UPLOAD, 1)
else:
self.c.setopt(pycurl.CUSTOMREQUEST, http_method)
if http_method in ('POST','PUT'):
LOG.debug( "data..........."+str(payload))
data = json.dumps(payload)
data= data.decode('unicode-escape')
LOG.debug( data)
data = self._to_bytestring(data)
LOG.debug( data)
content = StringIO.StringIO(data)
LOG.debug( content)
content_length = len(data)
LOG.debug( "content_length........."+str(content_length))
if http_method == 'POST':
self.c.setopt(pycurl.POSTFIELDSIZE, content_length)
else:
self.c.setopt(pycurl.INFILESIZE, content_length)
self.c.setopt(pycurl.READFUNCTION, content.read)
self.c.setopt(self.c.URL, url)
headers = self._get_headers(headers)
self.c.setopt(pycurl.HTTPHEADER,
["%s: %s" % pair for pair in sorted(headers.iteritems())])
LOG.debug ("invoke : pycurl.EFFECTIVE_URL = "+self.c.getinfo(pycurl.EFFECTIVE_URL))
try:
self.c.perform()
except Exception, ex:
LOG.debug (sys.stderr, str(ex))
raise ex
contents_type= self.c.getinfo(pycurl.CONTENT_TYPE)
LOG.debug ("invoke : pycurl.CONTENT_TYPE = "+contents_type)
code = self.c.getinfo(pycurl.RESPONSE_CODE)
LOG.debug ("invoke : pycurl.RESPONSE_CODE = "+str(code))
response = buf.getvalue()
buf.close()
LOG.debug ("invoke : COMPLETED ")
return response , code , contents_type
def _to_bytestring(self ,s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def _normalize(self, path):
res = self._host_url
if path:
res += posixpath.normpath('/' + path.lstrip('/'))
return uri_encoding(res)
|
"""
Model classes for AppDynamics REST API
.. moduleauthor:: Todd Radel <tradel@appdynamics.com>
"""
from . import JsonObject, JsonList
from .metric_value import MetricValues
class MetricDataSingle(JsonObject):
FIELDS = {
'frequency': '',
'path': 'metricPath'
}
FREQUENCIES = ('ONE_MIN', 'TEN_MIN', 'SIXTY_MIN')
def __init__(self, path='', frequency='ONE_MIN', values=MetricValues()):
self._frequency = None
self.path, self.frequency, self.values = path, frequency, values
@classmethod
def _set_fields_from_json_dict(cls, obj, json_dict):
JsonObject._set_fields_from_json_dict(obj, json_dict)
obj.values = MetricValues.from_json(json_dict['metricValues'])
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, new_freq):
self._list_setter('_frequency', new_freq, MetricDataSingle.FREQUENCIES)
class MetricData(JsonList):
def __init__(self, initial_list=None):
super(MetricData, self).__init__(MetricDataSingle, initial_list)
def __getitem__(self, i):
"""
:rtype: MetricDataSingle
"""
return self.data[i]
def by_partial_name(self, name):
return MetricData([x for x in self if name in x.path])
def by_leaf_name(self, name):
return MetricData([x for x in self if x.path.split('|')[-1] == name])
def by_path(self, path):
return MetricData([x for x in self if x.path == path])
def first_value(self):
return self[0].values[0].value
|
import logging
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
from subprocess import PIPE, CalledProcessError, run as subprocess_run # nosec
from uuid import UUID, uuid5
from requests.exceptions import ReadTimeout
import docker
from ._config import Config
from .exceptions import TaskCatException
LOG = logging.getLogger(__name__)
class LambdaBuild:
NULL_UUID = UUID("{00000000-0000-0000-0000-000000000000}")
def __init__(self, config: Config, project_root: Path):
self._docker = docker.from_env()
self._config = config
self._project_root = Path(project_root).expanduser().resolve()
self._lambda_source_path = (
self._project_root / config.config.project.lambda_source_path
).resolve()
self._lambda_zip_path = (
self._project_root / config.config.project.lambda_zip_path
).resolve()
self._build_lambdas(self._lambda_source_path, self._lambda_zip_path)
self._build_submodules()
def _build_submodules(self):
if not self._config.config.project.build_submodules:
return
rel_source = self._lambda_source_path.relative_to(self._project_root)
rel_zip = self._lambda_zip_path.relative_to(self._project_root)
self._recurse(self._project_root, rel_source, rel_zip)
def _recurse(self, base_path, rel_source, rel_zip):
submodules_path = Path(base_path) / "submodules"
if not submodules_path.is_dir():
return
for submodule in submodules_path.iterdir():
source_path = submodule / rel_source
if not source_path.is_dir():
continue
output_path = submodule / rel_zip
self._build_lambdas(source_path, output_path)
self._recurse(submodule, rel_source, rel_zip)
def _build_lambdas(self, parent_path: Path, output_path):
if not parent_path.is_dir():
return
for path in parent_path.iterdir():
if (path / "Dockerfile").is_file():
tag = f"taskcat-build-{uuid5(self.NULL_UUID, str(path)).hex}"
LOG.info(
f"Packaging lambda source from {path} using docker image {tag}"
)
self._docker_build(path, tag)
self._docker_extract(tag, output_path / path.stem)
elif (path / "requirements.txt").is_file():
LOG.info(f"Packaging python lambda source from {path} using pip")
self._pip_build(path, output_path / path.stem)
else:
LOG.info(
f"Packaging lambda source from {path} without building "
f"dependencies"
)
self._zip_dir(path, output_path / path.stem)
@staticmethod
def _make_pip_command(base_path):
return [
"pip",
"install",
"--no-cache-dir",
"--no-color",
"--disable-pip-version-check",
"--upgrade",
"--requirement",
str(base_path / "requirements.txt"),
"--target",
str(base_path),
]
@classmethod
def _pip_build(cls, base_path, output_path):
tmp_path = Path(tempfile.mkdtemp())
try:
build_path = tmp_path / "build"
shutil.copytree(base_path, build_path)
command = cls._make_pip_command(build_path)
LOG.debug("command is '%s'", command)
LOG.info("Starting pip build.")
try:
completed_proc = subprocess_run( # nosec
command, cwd=build_path, check=True, stdout=PIPE, stderr=PIPE
)
except (FileNotFoundError, CalledProcessError) as e:
raise TaskCatException("pip build failed") from e
LOG.debug("--- pip stdout:\n%s", completed_proc.stdout)
LOG.debug("--- pip stderr:\n%s", completed_proc.stderr)
cls._zip_dir(build_path, output_path)
shutil.rmtree(tmp_path, ignore_errors=True)
except Exception as e: # pylint: disable=broad-except
shutil.rmtree(tmp_path, ignore_errors=True)
raise e
@staticmethod
def _zip_dir(build_path, output_path):
output_path.mkdir(parents=True, exist_ok=True)
zip_path = output_path / "lambda.zip"
if zip_path.is_file():
zip_path.unlink()
shutil.make_archive(output_path / "lambda", "zip", build_path)
@staticmethod
def _clean_build_log(line):
if "stream" in line:
line = line["stream"]
elif "aux" in line:
line = line["aux"]
return str(line).strip()
def _docker_build(self, path, tag):
_, logs = self._docker.images.build(path=str(path), tag=tag)
build_logs = []
for line in logs:
line = self._clean_build_log(line)
if line:
build_logs.append(line)
LOG.debug("docker build logs: \n{}".format("\n".join(build_logs)))
def _docker_extract(self, tag, package_path):
container = self._docker.containers.run(image=tag, detach=True)
exit_code = container.wait()["StatusCode"]
logs = container.logs()
LOG.debug("docker run logs: \n{}".format(logs.decode("utf-8").strip()))
if exit_code != 0:
raise TaskCatException("docker build failed")
arc, _ = container.get_archive("/output/")
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
for chunk in arc:
tmpfile.write(chunk)
with tarfile.open(tmpfile.name) as tar:
for member in tar.getmembers():
if member.name.startswith("output/"):
member.name = member.name[len("output/") :]
tar.extract(member)
tar.extractall(path=str(package_path))
try:
container.remove()
except ReadTimeout:
LOG.warning(f"Could not remove container {container.id}")
os.unlink(tmpfile.name)
os.removedirs(str(package_path / "output"))
|
""" Base class of polymorphic hardware structures """
from datetime import datetime
from inspect import isclass
import re
from sqlalchemy import (Column, Integer, Sequence, ForeignKey, UniqueConstraint,
String, DateTime)
from sqlalchemy.orm import relation, backref, lazyload, validates, deferred
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.attributes import set_committed_value
from aquilon.exceptions_ import AquilonError, ArgumentError, NotFoundException
from aquilon.aqdb.model import Base, Location, Model, DnsRecord
from aquilon.aqdb.column_types import AqStr
_TN = "hardware_entity"
class HardwareEntity(Base):
__tablename__ = _TN
_instance_label = 'printable_name'
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
label = Column(AqStr(63), nullable=False, unique=True)
hardware_type = Column(AqStr(64), nullable=False)
location_id = Column(ForeignKey(Location.id), nullable=False, index=True)
model_id = Column(ForeignKey(Model.id), nullable=False, index=True)
serial_no = Column(String(64), nullable=True)
primary_name_id = Column(ForeignKey(DnsRecord.id,
name='%s_pri_name_fk' % _TN),
nullable=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
# Most of the update_* commands need to load the comments due to
# snapshot_hw(), so it is not worth deferring it
comments = Column(String(255), nullable=True)
location = relation(Location, innerjoin=True)
model = relation(Model, innerjoin=True)
# When working with machines the primary name always crops up, so load it
# eagerly
# This is a one-to-one relation, so we need uselist=False on the backref
primary_name = relation(DnsRecord, lazy=False,
backref=backref('hardware_entity', uselist=False,
passive_deletes=True))
__table_args__ = (UniqueConstraint(primary_name_id,
name='%s_pri_name_uk' % _TN),
{'info': {'unique_fields': ['label']}},)
__mapper_args__ = {'polymorphic_on': hardware_type}
_label_check = re.compile("^[a-z][a-z0-9]{,62}$")
@classmethod
def check_label(cls, label):
if not cls._label_check.match(label):
raise ArgumentError("Illegal hardware label format '%s'. Only "
"alphanumeric characters are allowed, and "
"the first character must be a letter." % label)
@validates('label')
def validate_label(self, key, value): # pylint: disable=W0613
self.check_label(value)
return value
def __init__(self, label=None, **kwargs):
label = AqStr.normalize(label)
if not label:
raise AquilonError("HardwareEntity needs a label.")
super(HardwareEntity, self).__init__(label=label, **kwargs)
@property
def fqdn(self):
""" Returns the FQDN, if there is a primary name """
if self.primary_name:
return str(self.primary_name.fqdn)
else:
return None
@property
def primary_ip(self):
""" Returns the primary IP, if there is one """
if self.primary_name and hasattr(self.primary_name, "ip"):
return self.primary_name.ip
else:
return None
@property
def printable_name(self):
""" Returns the most meaningful name """
if self.primary_name:
return str(self.primary_name.fqdn)
else:
return self.label
@classmethod
def get_unique(cls, sess, name, hardware_type=None, compel=False,
preclude=False, query_options=None):
""" Returns a unique HardwareEntity given session and fqdn """
# If the hardware_type param isn't explicitly set and we have a
# polymorphic identity, assume we're querying only for items of our
# hardware_type.
if hardware_type:
if isclass(hardware_type):
clslabel = hardware_type._get_class_label()
hardware_type = hardware_type.__mapper_args__['polymorphic_identity']
else:
pcls = cls.__mapper__.polymorphic_map[hardware_type].class_
clslabel = pcls._get_class_label()
else:
if 'polymorphic_identity' in cls.__mapper_args__:
hardware_type = cls.__mapper_args__['polymorphic_identity']
clslabel = cls._get_class_label()
# The automagic DNS lookup does not really make sense with preclude=True
if preclude:
name = AqStr.normalize(name)
cls.check_label(name)
q = sess.query(cls)
if "." in name:
dns_rec = DnsRecord.get_unique(sess, fqdn=name, compel=True)
# We know the primary name, do not load it again
q = q.options(lazyload('primary_name'))
q = q.filter_by(primary_name=dns_rec)
else:
dns_rec = None
q = q.filter_by(label=name)
if query_options:
q = q.options(*query_options)
try:
hwe = q.one()
except NoResultFound:
# Check if the name is in use by a different hardware type
q = sess.query(HardwareEntity)
if dns_rec:
# We know the primary name, do not load it again
q = q.options(lazyload('primary_name'))
q = q.filter_by(primary_name=dns_rec)
else:
q = q.filter_by(label=name)
try:
hwe = q.one()
if dns_rec:
# We know the primary name, do not load it again
set_committed_value(hwe, 'primary_name', dns_rec)
raise ArgumentError("{0} exists, but is not a {1}."
.format(hwe, clslabel.lower()))
except NoResultFound:
hwe = None
if compel:
raise NotFoundException("%s %s not found." % (clslabel, name))
if hwe:
if preclude:
raise ArgumentError('{0} already exists.'.format(hwe))
if dns_rec:
# We know the primary name, do not load it again
set_committed_value(hwe, 'primary_name', dns_rec)
set_committed_value(dns_rec, 'hardware_entity', hwe)
return hwe
def all_addresses(self):
""" Iterator returning all addresses of the hardware. """
for iface in self.interfaces:
for addr in iface.assignments:
yield addr
class DeviceLinkMixin(object):
_bus_address_checks = {
# PCI: <domain>:<bus>:<device>.<function>
'pci': re.compile(r'^[0-9a-f]{4}:[0-9a-f]{2}:[01][0-9a-f]\.[0-7]$')
}
bus_address = Column(AqStr(32), nullable=True)
@validates('bus_address')
def validate_bus_address(self, key, value): # pylint: disable=W0613
if value is None:
return value
if ":" not in value:
raise ValueError("Malformed bus URI specification.")
# Poor man's URI parser
scheme, rest = value.split(":", 1)
if scheme not in self._bus_address_checks:
raise ValueError("Unknown hardware bus type.")
if not self._bus_address_checks[scheme].match(rest):
raise ValueError("Invalid bus address.")
return value
|
from lightning import readers as reader
from lightning.resources import values
print(reader.listsubdir('resources/images'))
print(reader.listsubdirflat('resources/images'))
|
"""Test Home Assistant date util methods."""
from datetime import datetime, timedelta
import pytest
import homeassistant.util.dt as dt_util
DEFAULT_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
TEST_TIME_ZONE = "America/Los_Angeles"
def teardown():
"""Stop everything that was started."""
dt_util.set_default_time_zone(DEFAULT_TIME_ZONE)
def test_get_time_zone_retrieves_valid_time_zone():
"""Test getting a time zone."""
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
assert time_zone is not None
assert TEST_TIME_ZONE == time_zone.zone
def test_get_time_zone_returns_none_for_garbage_time_zone():
"""Test getting a non existing time zone."""
time_zone = dt_util.get_time_zone("Non existing time zone")
assert time_zone is None
def test_set_default_time_zone():
"""Test setting default time zone."""
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
dt_util.set_default_time_zone(time_zone)
# We cannot compare the timezones directly because of DST
assert time_zone.zone == dt_util.now().tzinfo.zone
def test_utcnow():
"""Test the UTC now method."""
assert abs(dt_util.utcnow().replace(tzinfo=None) - datetime.utcnow()) < timedelta(
seconds=1
)
def test_now():
"""Test the now method."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
assert abs(
dt_util.as_utc(dt_util.now()).replace(tzinfo=None) - datetime.utcnow()
) < timedelta(seconds=1)
def test_as_utc_with_naive_object():
"""Test the now method."""
utcnow = datetime.utcnow()
assert utcnow == dt_util.as_utc(utcnow).replace(tzinfo=None)
def test_as_utc_with_utc_object():
"""Test UTC time with UTC object."""
utcnow = dt_util.utcnow()
assert utcnow == dt_util.as_utc(utcnow)
def test_as_utc_with_local_object():
"""Test the UTC time with local object."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
localnow = dt_util.now()
utcnow = dt_util.as_utc(localnow)
assert localnow == utcnow
assert localnow.tzinfo != utcnow.tzinfo
def test_as_local_with_naive_object():
"""Test local time with native object."""
now = dt_util.now()
assert abs(now - dt_util.as_local(datetime.utcnow())) < timedelta(seconds=1)
def test_as_local_with_local_object():
"""Test local with local object."""
now = dt_util.now()
assert now == now
def test_as_local_with_utc_object():
"""Test local time with UTC object."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
utcnow = dt_util.utcnow()
localnow = dt_util.as_local(utcnow)
assert localnow == utcnow
assert localnow.tzinfo != utcnow.tzinfo
def test_utc_from_timestamp():
"""Test utc_from_timestamp method."""
assert datetime(1986, 7, 9, tzinfo=dt_util.UTC) == dt_util.utc_from_timestamp(
521251200
)
def test_as_timestamp():
"""Test as_timestamp method."""
ts = 1462401234
utc_dt = dt_util.utc_from_timestamp(ts)
assert ts == dt_util.as_timestamp(utc_dt)
utc_iso = utc_dt.isoformat()
assert ts == dt_util.as_timestamp(utc_iso)
# confirm the ability to handle a string passed in
delta = dt_util.as_timestamp("2016-01-01 12:12:12")
delta -= dt_util.as_timestamp("2016-01-01 12:12:11")
assert delta == 1
def test_parse_datetime_converts_correctly():
"""Test parse_datetime converts strings."""
assert datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC) == dt_util.parse_datetime(
"1986-07-09T12:00:00Z"
)
utcnow = dt_util.utcnow()
assert utcnow == dt_util.parse_datetime(utcnow.isoformat())
def test_parse_datetime_returns_none_for_incorrect_format():
"""Test parse_datetime returns None if incorrect format."""
assert dt_util.parse_datetime("not a datetime string") is None
def test_get_age():
"""Test get_age."""
diff = dt_util.now() - timedelta(seconds=0)
assert dt_util.get_age(diff) == "0 seconds"
diff = dt_util.now() - timedelta(seconds=1)
assert dt_util.get_age(diff) == "1 second"
diff = dt_util.now() - timedelta(seconds=30)
assert dt_util.get_age(diff) == "30 seconds"
diff = dt_util.now() - timedelta(minutes=5)
assert dt_util.get_age(diff) == "5 minutes"
diff = dt_util.now() - timedelta(minutes=1)
assert dt_util.get_age(diff) == "1 minute"
diff = dt_util.now() - timedelta(minutes=300)
assert dt_util.get_age(diff) == "5 hours"
diff = dt_util.now() - timedelta(minutes=320)
assert dt_util.get_age(diff) == "5 hours"
diff = dt_util.now() - timedelta(minutes=1.6 * 60 * 24)
assert dt_util.get_age(diff) == "2 days"
diff = dt_util.now() - timedelta(minutes=2 * 60 * 24)
assert dt_util.get_age(diff) == "2 days"
diff = dt_util.now() - timedelta(minutes=32 * 60 * 24)
assert dt_util.get_age(diff) == "1 month"
diff = dt_util.now() - timedelta(minutes=365 * 60 * 24)
assert dt_util.get_age(diff) == "1 year"
def test_parse_time_expression():
"""Test parse_time_expression."""
assert [x for x in range(60)] == dt_util.parse_time_expression("*", 0, 59)
assert [x for x in range(60)] == dt_util.parse_time_expression(None, 0, 59)
assert [x for x in range(0, 60, 5)] == dt_util.parse_time_expression("/5", 0, 59)
assert [1, 2, 3] == dt_util.parse_time_expression([2, 1, 3], 0, 59)
assert [x for x in range(24)] == dt_util.parse_time_expression("*", 0, 23)
assert [42] == dt_util.parse_time_expression(42, 0, 59)
with pytest.raises(ValueError):
dt_util.parse_time_expression(61, 0, 60)
def test_find_next_time_expression_time_basic():
"""Test basic stuff for find_next_time_expression_time."""
def find(dt, hour, minute, second):
"""Call test_find_next_time_expression_time."""
seconds = dt_util.parse_time_expression(second, 0, 59)
minutes = dt_util.parse_time_expression(minute, 0, 59)
hours = dt_util.parse_time_expression(hour, 0, 23)
return dt_util.find_next_time_expression_time(dt, seconds, minutes, hours)
assert datetime(2018, 10, 7, 10, 30, 0) == find(
datetime(2018, 10, 7, 10, 20, 0), "*", "/30", 0
)
assert datetime(2018, 10, 7, 10, 30, 0) == find(
datetime(2018, 10, 7, 10, 30, 0), "*", "/30", 0
)
assert datetime(2018, 10, 7, 12, 0, 30) == find(
datetime(2018, 10, 7, 10, 30, 0), "/3", "/30", [30, 45]
)
assert datetime(2018, 10, 8, 5, 0, 0) == find(
datetime(2018, 10, 7, 10, 30, 0), 5, 0, 0
)
assert find(datetime(2018, 10, 7, 10, 30, 0, 999999), "*", "/30", 0) == datetime(
2018, 10, 7, 10, 30, 0
)
def test_find_next_time_expression_time_dst():
"""Test daylight saving time for find_next_time_expression_time."""
tz = dt_util.get_time_zone("Europe/Vienna")
dt_util.set_default_time_zone(tz)
def find(dt, hour, minute, second):
"""Call test_find_next_time_expression_time."""
seconds = dt_util.parse_time_expression(second, 0, 59)
minutes = dt_util.parse_time_expression(minute, 0, 59)
hours = dt_util.parse_time_expression(hour, 0, 23)
return dt_util.find_next_time_expression_time(dt, seconds, minutes, hours)
# Entering DST, clocks are rolled forward
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 25, 1, 50, 0)), 2, 30, 0
)
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 25, 3, 50, 0)), 2, 30, 0
)
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 26, 1, 50, 0)), 2, 30, 0
)
# Leaving DST, clocks are rolled back
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=False), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=True), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 4, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=True), 4, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=True) == find(
tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=True), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 29, 2, 30, 0)) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=False), 2, 30, 0
)
|
import unittest
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.state import State
from datetime import datetime
class BaseExecutorTest(unittest.TestCase):
def test_get_event_buffer(self):
executor = BaseExecutor()
date = datetime.utcnow()
key1 = ("my_dag1", "my_task1", date)
key2 = ("my_dag2", "my_task1", date)
key3 = ("my_dag2", "my_task2", date)
state = State.SUCCESS
executor.event_buffer[key1] = state
executor.event_buffer[key2] = state
executor.event_buffer[key3] = state
self.assertEqual(len(executor.get_event_buffer(("my_dag1",))), 1)
self.assertEqual(len(executor.get_event_buffer()), 2)
self.assertEqual(len(executor.event_buffer), 0)
|
import telegram
import logging
import pg8000
from gemynd import Config
from gemynd import Database
from gemynd.core import Message
from gemynd.api.telegramapi import Users, Chats
logger = logging.getLogger(__name__)
class Telegram:
def __init__(self, config):
try:
if (config['verbose'] == 'on'):
logger.setLevel(logging.DEBUG)
self.bot = telegram.Bot(token = config['api']['telegram']['token'])
self.users = Users(config)
self.chats = Chats(config)
self.db = Database(config)
self.db.connect()
self.source = self.getSourceID()
except Exception, ex:
logger.error('Exception occured initializing the bot')
logger.error('%s' % str(ex))
raise ex
def getInfo(self):
self.bot.getMe()
def getSourceID(self):
retset = self.db.fetch(
"""select id
from core.sources
where source = 'telegram'
""")
if len(retset) == 0:
raise Exception("Source ID for 'telegram' is not found")
return retset[0][0]
def getLastUpdate(self):
retset = self.db.fetch(
"""select update_id
from telegram.updates
where id = (
select max(id)
from telegram.updates
)""")
last_update_id = 0
if len(retset) > 0:
last_update_id = retset[0][0]
logger.debug("Last update fetched from database is: %d" % last_update_id)
return last_update_id
def setLastUpdate(self, last_update_id):
self.db.call("insert into telegram.updates (update_id) values (%d)" % last_update_id)
logger.debug("New last update written to database is: %d" % last_update_id)
def fetchDbBufferMessages(self, last_update_id):
retset = self.db.fetch(
"""select id,
chat_id,
message
from telegram.message_buffer
where update_id > %d
""" % last_update_id)
res = []
if len(retset) > 0:
for msg in retset:
res.append(
Message(id = msg[0],
chat_id = msg[1],
text = msg[2])
)
retset = self.db.fetch("select max(update_id) from telegram.message_buffer")
last_update_id = retset[0][0]
return res, last_update_id
def putDbBufferMessage(self, update_id, msg):
self.db.call(
"""insert into telegram.message_buffer (update_id, id, chat_id, message)
values (%d, %d, %d, '%s')""" %
(update_id,
msg.id,
msg.chat_id,
msg.text.replace("'", "''"))
)
return
def putDbMessage(self, msg, direction):
self.db.call(
"""insert into core.messages (source_id,
source_message_id,
direction,
chat_id,
message)
values (%d, %d, '%s', %d, '%s')""" %
(self.source,
msg.id,
direction,
msg.chat_id,
msg.text.replace("'", "''"))
)
return
def getNewMessages(self):
last_update_id = self.getLastUpdate()
dbmessages, last_update_id = self.fetchDbBufferMessages(last_update_id)
new_update_id = last_update_id
apimessages = []
updates = self.bot.getUpdates(offset = last_update_id + 1)
for item in updates:
msg = item.message
new_update_id = max(new_update_id, item.update_id)
user_id = self.users.getUser(msg.from_user.id, msg.from_user.name)
chat_id = self.chats.getChat(msg.chat_id, user_id)
apimessages.append(
Message(id = item.update_id,
chat_id = chat_id,
text = msg.text)
)
self.putDbBufferMessage(item.update_id, apimessages[-1])
self.putDbMessage(apimessages[-1], 'I')
result = list(set(dbmessages + apimessages))
return result, new_update_id
def commitNewMessages(self, new_update_id):
self.setLastUpdate(new_update_id)
self.db.call("delete from telegram.message_buffer where update_id <= %d" % new_update_id)
def sendMessage(self, msg):
telegram_chat_id = self.chats.getCoreChat(msg.chat_id)
self.bot.sendMessage(chat_id = telegram_chat_id, text = msg.text)
self.putDbMessage(msg, 'O')
def close(self):
self.db.close()
|
__author__ = 'Jon Nappi'
|
'''
Splunk user access control related utilities.
'''
import re
import json
from splunklib import binding
from solnlib.utils import retry
import solnlib.splunk_rest_client as rest_client
__all__ = ['ObjectACLException',
'ObjectACL',
'ObjectACLManagerException',
'ObjectACLManager',
'AppCapabilityManagerException',
'AppCapabilityManager',
'UserAccessException',
'check_user_access',
'InvalidSessionKeyException',
'get_current_username',
'UserNotExistException',
'get_user_capabilities',
'user_is_capable',
'get_user_roles']
class ObjectACLException(Exception):
pass
class ObjectACL(object):
'''Object ACL record.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: Flag of object is shared by inclusion.
:type obj_shared_by_inclusion: ``bool``
Usage::
>>> from solnlib import user_access
>>> obj_acl = user_access.ObjectACL(
>>> 'test_collection',
>>> '9defa6f510d711e6be16a45e60e34295',
>>> 'test_object',
>>> 'Splunk_TA_test',
>>> 'admin',
>>> {'read': ['*'], 'write': ['admin'], 'delete': ['admin']},
>>> False)
'''
OBJ_COLLECTION_KEY = 'obj_collection'
OBJ_ID_KEY = 'obj_id'
OBJ_TYPE_KEY = 'obj_type'
OBJ_APP_KEY = 'obj_app'
OBJ_OWNER_KEY = 'obj_owner'
OBJ_PERMS_KEY = 'obj_perms'
OBJ_PERMS_READ_KEY = 'read'
OBJ_PERMS_WRITE_KEY = 'write'
OBJ_PERMS_DELETE_KEY = 'delete'
OBJ_PERMS_ALLOW_ALL = '*'
OBJ_SHARED_BY_INCLUSION_KEY = 'obj_shared_by_inclusion'
def __init__(self, obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion):
self.obj_collection = obj_collection
self.obj_id = obj_id
self.obj_type = obj_type
self.obj_app = obj_app
self.obj_owner = obj_owner
self._check_perms(obj_perms)
self._obj_perms = obj_perms
self.obj_shared_by_inclusion = obj_shared_by_inclusion
@classmethod
def _check_perms(cls, obj_perms):
if not isinstance(obj_perms, dict):
raise ObjectACLException(
'Invalid object acl perms type: %s, should be a dict.' %
type(obj_perms))
if not (cls.OBJ_PERMS_READ_KEY in obj_perms and
cls.OBJ_PERMS_WRITE_KEY in obj_perms and
cls.OBJ_PERMS_DELETE_KEY in obj_perms):
raise ObjectACLException(
'Invalid object acl perms: %s, '
'should include read, write and delete perms.' % obj_perms)
@property
def obj_perms(self):
return self._obj_perms
@obj_perms.setter
def obj_perms(self, obj_perms):
self._check_perms(obj_perms)
self._obj_perms = obj_perms
@property
def record(self):
'''Get object acl record.
:returns: Object acl record, like: {
'_key': 'test_collection-1234',
'obj_collection': 'test_collection',
'obj_id': '1234',
'obj_type': 'test_object',
'obj_app': 'Splunk_TA_test',
'obj_owner': 'admin',
'obj_perms': {'read': ['*'], 'write': ['admin'], 'delete': ['admin']},
'obj_shared_by_inclusion': True}
:rtype: ``dict``
'''
return {
'_key': self.generate_key(self.obj_collection, self.obj_id),
self.OBJ_COLLECTION_KEY: self.obj_collection,
self.OBJ_ID_KEY: self.obj_id,
self.OBJ_TYPE_KEY: self.obj_type,
self.OBJ_APP_KEY: self.obj_app,
self.OBJ_OWNER_KEY: self.obj_owner,
self.OBJ_PERMS_KEY: self._obj_perms,
self.OBJ_SHARED_BY_INCLUSION_KEY: self.obj_shared_by_inclusion}
@staticmethod
def generate_key(obj_collection, obj_id):
'''Generate object acl record key.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:returns: Object acl record key.
:rtype: ``string``
'''
return '{obj_collection}_{obj_id}'.format(
obj_collection=obj_collection, obj_id=obj_id)
@staticmethod
def parse(obj_acl_record):
'''Parse object acl record and construct a new `ObjectACL` object from it.
:param obj_acl_record: Object acl record.
:type obj_acl: ``dict``
:returns: New `ObjectACL` object.
:rtype: `ObjectACL`
'''
return ObjectACL(
obj_acl_record[ObjectACL.OBJ_COLLECTION_KEY],
obj_acl_record[ObjectACL.OBJ_ID_KEY],
obj_acl_record[ObjectACL.OBJ_TYPE_KEY],
obj_acl_record[ObjectACL.OBJ_APP_KEY],
obj_acl_record[ObjectACL.OBJ_OWNER_KEY],
obj_acl_record[ObjectACL.OBJ_PERMS_KEY],
obj_acl_record[ObjectACL.OBJ_SHARED_BY_INCLUSION_KEY])
def merge(self, obj_acl):
'''Merge current object perms with perms of `obj_acl`.
:param obj_acl: Object acl to merge.
:type obj_acl: ``ObjectACL``
'''
for perm_key in self._obj_perms:
self._obj_perms[perm_key] = list(
set.union(
set(self._obj_perms[perm_key]),
set(obj_acl._obj_perms[perm_key])))
if self.OBJ_PERMS_ALLOW_ALL in self._obj_perms[perm_key]:
self._obj_perms[perm_key] = [self.OBJ_PERMS_ALLOW_ALL]
def __str__(self):
return json.dumps(self.record)
@retry(exceptions=[binding.HTTPError])
def _get_collection_data(collection_name, session_key, app, owner,
scheme, host, port, **context):
kvstore = rest_client.SplunkRestClient(session_key,
app,
owner=owner,
scheme=scheme,
host=host,
port=port,
**context).kvstore
collection_name = re.sub(r'[^\w]+', '_', collection_name)
try:
kvstore.get(name=collection_name)
except binding.HTTPError as e:
if e.status != 404:
raise
kvstore.create(collection_name)
collections = kvstore.list(search=collection_name)
for collection in collections:
if collection.name == collection_name:
return collection.data
else:
raise KeyError('Get collection data: %s failed.' % collection_name)
class ObjectACLManagerException(Exception):
pass
class ObjectACLNotExistException(Exception):
pass
class ObjectACLManager(object):
'''Object ACL manager.
:param collection_name: Collection name to store object ACL info.
:type collection_name: ``string``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises ObjectACLManagerException: If init ObjectACLManager failed.
Usage::
>>> from solnlib import user_access
>>> oaclm = user_access.ObjectACLManager(session_key,
'Splunk_TA_test')
'''
def __init__(self, collection_name, session_key, app, owner='nobody',
scheme=None, host=None, port=None, **context):
collection_name = '{app}_{collection_name}'.format(
app=app, collection_name=collection_name)
try:
self._collection_data = _get_collection_data(
collection_name, session_key, app, owner,
scheme, host, port, **context)
except KeyError:
raise ObjectACLManagerException(
'Get object acl collection: %s fail.' % collection_name)
@retry(exceptions=[binding.HTTPError])
def update_acl(self, obj_collection, obj_id, obj_type, obj_app, obj_owner,
obj_perms, obj_shared_by_inclusion=True, replace_existing=True):
'''Update acl info of object.
Construct a new object acl info first, if `replace_existing` is True
then replace existing acl info else merge new object acl info with the
old one and replace the old acl info with merged acl info.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: (optional) Flag of object is shared by
inclusion, default is True.
:type obj_shared_by_inclusion: ``bool``
:param replace_existing: (optional) Replace existing acl info flag, True
indicates replace old acl info with new one else merge with old acl
info, default is True.
:type replace_existing: ``bool``
'''
obj_acl = ObjectACL(
obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion)
if not replace_existing:
try:
old_obj_acl = self.get_acl(obj_collection, obj_id)
except ObjectACLNotExistException:
old_obj_acl = None
if old_obj_acl:
obj_acl.merge(old_obj_acl)
self._collection_data.batch_save(obj_acl.record)
@retry(exceptions=[binding.HTTPError])
def update_acls(self, obj_collection, obj_ids, obj_type, obj_app, obj_owner,
obj_perms, obj_shared_by_inclusion=True, replace_existing=True):
'''Batch update object acl info to all provided `obj_ids`.
:param obj_collection: Collection where objects currently stored.
:type obj_collection: ``string``
:param obj_id: IDs list of objects.
:type obj_id: ``list``
:param obj_app: App of this object.
:param obj_type: ``string``
:param obj_owner: Owner of this object.
:param obj_owner: ``string``
:param obj_perms: Object perms, like: {
'read': ['*'],
'write': ['admin'],
'delete': ['admin']}.
:type obj_perms: ``dict``
:param obj_shared_by_inclusion: (optional) Flag of object is shared by
inclusion, default is True.
:type obj_shared_by_inclusion: ``bool``
:param replace_existing: (optional) Replace existing acl info flag, True
indicates replace old acl info with new one else merge with old acl
info, default is True.
:type replace_existing: ``bool``
'''
obj_acl_records = []
for obj_id in obj_ids:
obj_acl = ObjectACL(
obj_collection, obj_id, obj_type,
obj_app, obj_owner, obj_perms, obj_shared_by_inclusion)
if not replace_existing:
try:
old_obj_acl = self.get_acl(obj_collection, obj_id)
except ObjectACLNotExistException:
old_obj_acl = None
if old_obj_acl:
obj_acl.merge(old_obj_acl)
obj_acl_records.append(obj_acl.record)
self._collection_data.batch_save(*obj_acl_records)
@retry(exceptions=[binding.HTTPError])
def get_acl(self, obj_collection, obj_id):
'''Get acl info.
Query object acl info with parameter of the combination of
`obj_collection` and `obj_id` from `self.collection_name` and
return it.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:returns: Object acl info if success else None.
:rtype: ``ObjectACL``
:raises ObjectACLNotExistException: If object ACL info does not exist.
'''
key = ObjectACL.generate_key(obj_collection, obj_id)
try:
obj_acl = self._collection_data.query_by_id(key)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ObjectACLNotExistException(
'Object ACL info of %s_%s does not exist.' %
(obj_collection, obj_id))
return ObjectACL.parse(obj_acl)
@retry(exceptions=[binding.HTTPError])
def get_acls(self, obj_collection, obj_ids):
'''Batch get acl info.
Query objects acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and return them.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_ids: ``list``
:returns: List of `ObjectACL` instances.
:rtype: ``list``
'''
query = json.dumps(
{'$or': [{'_key': ObjectACL.generate_key(obj_collection, obj_id)}
for obj_id in obj_ids]})
obj_acls = self._collection_data.query(query=query)
return [ObjectACL.parse(obj_acl) for obj_acl in obj_acls]
@retry(exceptions=[binding.HTTPError])
def delete_acl(self, obj_collection, obj_id):
'''Delete acl info.
Query object acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and delete it.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_id: ID of this object.
:type obj_id: ``string``
:raises ObjectACLNotExistException: If object ACL info does not exist.
'''
key = ObjectACL.generate_key(obj_collection, obj_id)
try:
self._collection_data.delete_by_id(key)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ObjectACLNotExistException(
'Object ACL info of %s_%s does not exist.' %
(obj_collection, obj_id))
@retry(exceptions=[binding.HTTPError])
def delete_acls(self, obj_collection, obj_ids):
'''Batch delete acl info.
Query objects acl info with parameter of the combination of
`obj_collection` and `obj_ids` from KVStore and delete them.
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_id: ``list``
'''
query = json.dumps(
{'$or': [{'_key': ObjectACL.generate_key(obj_collection, obj_id)}
for obj_id in obj_ids]})
self._collection_data.delete(query=query)
@retry(exceptions=[binding.HTTPError])
def get_accessible_object_ids(self, user, operation, obj_collection, obj_ids):
'''Get accessible IDs of objects from `obj_acls`.
:param user: User name of current `operation`.
:type user: ``string``
:param operation: User operation, possible option: (read/write/delete).
:type operation: ``string``
:param obj_collection: Collection where object currently stored.
:type obj_collection: ``string``
:param obj_ids: IDs of objects.
:type obj_id: ``list``
:returns: List of IDs of accessible objects.
:rtype: ``list``
'''
obj_acls = self.get_acls(obj_collection, obj_ids)
accessible_obj_ids = []
for obj_acl in obj_acls:
perms = obj_acl.obj_perms[operation]
if ObjectACL.OBJ_PERMS_ALLOW_ALL in perms or user in perms:
accessible_obj_ids.append(obj_acl.obj_id)
return accessible_obj_ids
class AppCapabilityManagerException(Exception):
pass
class AppCapabilityNotExistException(Exception):
pass
class AppCapabilityManager(object):
'''App capability manager.
:param collection_name: Collection name to store capabilities.
:type collection_name: ``string``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises AppCapabilityManagerException: If init AppCapabilityManager failed.
Usage::
>>> from solnlib import user_access
>>> acm = user_access.AppCapabilityManager('test_collection',
session_key,
'Splunk_TA_test')
>>> acm.register_capabilities(...)
>>> acm.unregister_capabilities(...)
'''
def __init__(self, collection_name, session_key, app, owner='nobody',
scheme=None, host=None, port=None, **context):
self._app = app
collection_name = '{app}_{collection_name}'.format(
app=app, collection_name=collection_name)
try:
self._collection_data = _get_collection_data(
collection_name, session_key, app, owner,
scheme, host, port, **context)
except KeyError:
raise AppCapabilityManagerException(
'Get app capabilities collection: %s failed.' %
collection_name)
@retry(exceptions=[binding.HTTPError])
def register_capabilities(self, capabilities):
'''Register app capabilities.
:param capabilities: App capabilities, example: {
'object_type1': {
'read': 'read_app_object_type1',
'write': 'write_app_object_type1',
'delete': 'delete_app_object_type1'},
'object_type2': {
'read': 'read_app_object_type2',
'write': 'write_app_object_type2',
'delete': 'delete_app_object_type2'},
...}
:type capabilities: ``dict``
'''
record = {'_key': self._app, 'capabilities': capabilities}
self._collection_data.batch_save(record)
@retry(exceptions=[binding.HTTPError])
def unregister_capabilities(self):
'''Unregister app capabilities.
:raises AppCapabilityNotExistException: If app capabilities are
not registered.
'''
try:
self._collection_data.delete_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
raise AppCapabilityNotExistException(
'App capabilities for %s have not been registered.' % self._app)
@retry(exceptions=[binding.HTTPError])
def capabilities_are_registered(self):
'''Check if app capabilities are registered.
:returns: True if app capabilities are registered else
False.
:rtype: ``bool``
'''
try:
self._collection_data.query_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
return False
return True
@retry(exceptions=[binding.HTTPError])
def get_capabilities(self):
'''Get app capabilities.
:returns: App capabilities.
:rtype: ``dict``
:raises AppCapabilityNotExistException: If app capabilities are
not registered.
'''
try:
record = self._collection_data.query_by_id(self._app)
except binding.HTTPError as e:
if e.status != 404:
raise
raise AppCapabilityNotExistException(
'App capabilities for %s have not been registered.' % self._app)
return record['capabilities']
class UserAccessException(Exception):
pass
def check_user_access(session_key, capabilities, obj_type, operation,
scheme=None, host=None, port=None, **context):
'''User access checker.
It will fetch user capabilities from given `session_key` and check if
the capability extracted from `capabilities`, `obj_type` and `operation`
is contained, if user capabilities include the extracted capability user
access is ok else fail.
:param session_key: Splunk access token.
:type session_key: ``string``
:param capabilities: App capabilities, example: {
'object_type1': {
'read': 'read_app_object_type1',
'write': 'write_app_object_type1',
'delete': 'delete_app_object_type1'},
'object_type2': {
'read': 'read_app_object_type2',
'write': 'write_app_object_type2',
'delete': 'delete_app_object_type2'},
...}
:type capabilities: ``dict``
:param obj_type: Object type.
:type obj_type: ``string``
:param operation: User operation, possible option: (read/write/delete).
:type operation: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:raises UserAccessException: If user access permission is denied.
Usage::
>>> from solnlib.user_access import check_user_access
>>> def fun():
>>> check_user_access(
>>> session_key, capabilities, 'test_object', 'read')
>>> ...
'''
username = get_current_username(
session_key, scheme=scheme, host=host, port=port, **context)
capability = capabilities[obj_type][operation]
if not user_is_capable(session_key, username, capability,
scheme=scheme, host=host, port=port, **context):
raise UserAccessException(
'Permission denied, %s does not have the capability: %s.' %
(username, capability))
class InvalidSessionKeyException(Exception):
pass
@retry(exceptions=[binding.HTTPError])
def get_current_username(session_key,
scheme=None, host=None, port=None, **context):
'''Get current user name from `session_key`.
:param session_key: Splunk access token.
:type session_key: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: Current user name.
:rtype: ``string``
:raises InvalidSessionKeyException: If `session_key` is invalid.
Usage::
>>> from solnlib import user_access
>>> user_name = user_access.get_current_username(session_key)
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
try:
response = _rest_client.get('/services/authentication/current-context',
output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 401:
raise
raise InvalidSessionKeyException('Invalid session key.')
return json.loads(response)['entry'][0]['content']['username']
class UserNotExistException(Exception):
pass
@retry(exceptions=[binding.HTTPError])
def get_user_capabilities(session_key, username,
scheme=None, host=None, port=None, **context):
'''Get user capabilities.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: User name of capabilities to get.
:type username: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: User capabilities.
:rtype: ``list``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> user_capabilities = user_access.get_user_capabilities(
>>> session_key, 'test_user')
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
url = '/services/authentication/users/{username}'.format(username=username)
try:
response = _rest_client.get(url, output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise UserNotExistException('User: %s does not exist.' % username)
return json.loads(response)['entry'][0]['content']['capabilities']
def user_is_capable(session_key, username, capability,
scheme=None, host=None, port=None, **context):
'''Check if user is capable for given `capability`.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: (optional) User name of roles to get.
:type username: ``string``
:param capability: The capability we wish to check for.
:type capability: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: True if user is capable else False.
:rtype: ``bool``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> is_capable = user_access.user_is_capable(
>>> session_key, 'test_user', 'object_read_capability')
'''
capabilities = get_user_capabilities(
session_key, username, scheme=scheme, host=host, port=port, **context)
return capability in capabilities
@retry(exceptions=[binding.HTTPError])
def get_user_roles(session_key, username,
scheme=None, host=None, port=None, **context):
'''Get user roles.
:param session_key: Splunk access token.
:type session_key: ``string``
:param username: (optional) User name of roles to get.
:type username: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
:returns: User roles.
:rtype: ``list``
:raises UserNotExistException: If `username` does not exist.
Usage::
>>> from solnlib import user_access
>>> user_roles = user_access.get_user_roles(session_key, 'test_user')
'''
_rest_client = rest_client.SplunkRestClient(
session_key,
'-',
scheme=scheme,
host=host,
port=port,
**context)
url = '/services/authentication/users/{username}'.format(username=username)
try:
response = _rest_client.get(url, output_mode='json').body.read()
except binding.HTTPError as e:
if e.status != 404:
raise
raise UserNotExistException('User: %s does not exist.' % username)
return json.loads(response)['entry'][0]['content']['roles']
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.create_volume, 'volume4', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm1'],
[TestAction.ps_migrate_vm, 'vm1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot10'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.delete_volume, 'volume2'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:[]
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume4-snapshot5', 'vm1-snapshot10', 'volume1-snapshot10', 'volume2-snapshot10', 'volume3-snapshot10', 'volume4-snapshot10', 'vm1-image1']
attached:['volume1', 'volume3', 'volume4']
Detached:[]
Deleted:['volume2', 'vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume4-snapshot5']---vm1volume1_volume2_volume3_volume4
vm_snap3:['vm1-snapshot10', 'volume1-snapshot10', 'volume2-snapshot10', 'volume3-snapshot10', 'volume4-snapshot10']---vm1volume1_volume2_volume3_volume4
'''
|
import mock
from openstack.tests.unit import base
class TestProxyBase(base.TestCase):
def setUp(self):
super(TestProxyBase, self).setUp()
self.session = mock.MagicMock()
def _verify(self, mock_method, test_method,
method_args=None, method_kwargs=None,
expected_args=None, expected_kwargs=None,
expected_result=None):
with mock.patch(mock_method) as mocked:
mocked.return_value = expected_result
if any([method_args, method_kwargs,
expected_args, expected_kwargs]):
method_args = method_args or ()
method_kwargs = method_kwargs or {}
expected_args = expected_args or ()
expected_kwargs = expected_kwargs or {}
self.assertEqual(expected_result, test_method(*method_args,
**method_kwargs))
mocked.assert_called_with(self.session,
*expected_args, **expected_kwargs)
else:
self.assertEqual(expected_result, test_method())
mocked.assert_called_with(self.session)
# NOTE(briancurtin): This is a duplicate version of _verify that is
# temporarily here while we shift APIs. The difference is that
# calls from the Proxy classes aren't going to be going directly into
# the Resource layer anymore, so they don't pass in the session which
# was tested in assert_called_with.
# This is being done in lieu of adding logic and complicating
# the _verify method. It will be removed once there is one API to
# be verifying.
def _verify2(self, mock_method, test_method,
method_args=None, method_kwargs=None,
expected_args=None, expected_kwargs=None,
expected_result=None):
with mock.patch(mock_method) as mocked:
mocked.return_value = expected_result
if any([method_args, method_kwargs,
expected_args, expected_kwargs]):
method_args = method_args or ()
method_kwargs = method_kwargs or {}
expected_args = expected_args or ()
expected_kwargs = expected_kwargs or {}
self.assertEqual(expected_result, test_method(*method_args,
**method_kwargs))
mocked.assert_called_with(*expected_args, **expected_kwargs)
else:
self.assertEqual(expected_result, test_method())
mocked.assert_called_with(self.session)
def verify_create(self, test_method, resource_type,
mock_method="openstack.proxy.BaseProxy._create",
expected_result="result", **kwargs):
the_kwargs = {"x": 1, "y": 2, "z": 3}
method_kwargs = kwargs.pop("method_kwargs", the_kwargs)
expected_args = [resource_type]
expected_kwargs = kwargs.pop("expected_kwargs", the_kwargs)
self._verify2(mock_method, test_method,
expected_result=expected_result,
method_kwargs=method_kwargs,
expected_args=expected_args,
expected_kwargs=expected_kwargs,
**kwargs)
def verify_delete(self, test_method, resource_type, ignore,
input_path_args=None, expected_path_args=None,
mock_method="openstack.proxy.BaseProxy._delete"):
method_kwargs = {"ignore_missing": ignore}
if isinstance(input_path_args, dict):
for key in input_path_args:
method_kwargs[key] = input_path_args[key]
expected_kwargs = {"ignore_missing": ignore}
if expected_path_args:
expected_kwargs["path_args"] = expected_path_args
self._verify2(mock_method, test_method,
method_args=["resource_or_id"],
method_kwargs=method_kwargs,
expected_args=[resource_type, "resource_or_id"],
expected_kwargs=expected_kwargs)
def verify_get(self, test_method, resource_type, value=None,
mock_method="openstack.proxy.BaseProxy._get", **kwargs):
the_value = value if value is not None else ["value"]
expected_kwargs = {"path_args": kwargs} if kwargs else {}
self._verify2(mock_method, test_method,
method_args=the_value,
method_kwargs=kwargs,
expected_args=[resource_type] + the_value,
expected_kwargs=expected_kwargs)
def verify_head(self, test_method, resource_type,
mock_method="openstack.proxy.BaseProxy._head",
value=None, **kwargs):
the_value = [value] if value is not None else []
expected_kwargs = {"path_args": kwargs} if kwargs else {}
self._verify2(mock_method, test_method,
method_args=the_value,
method_kwargs=kwargs,
expected_args=[resource_type] + the_value,
expected_kwargs=expected_kwargs)
def verify_find(self, mock_method, test_method, **kwargs):
self._verify(mock_method, test_method, method_args=["name_or_id"],
expected_args=["name_or_id"],
expected_kwargs={'ignore_missing': True},
expected_result="result", **kwargs)
self._verify(mock_method, test_method,
method_args=["name_or_id", False],
expected_args=["name_or_id"],
expected_kwargs={'ignore_missing': False},
expected_result="result", **kwargs)
def verify_find2(self, mock_method, test_method, path_args, **kwargs):
method_args = ["name_or_id"]
for key in path_args:
method_args.append(path_args[key])
self._verify(mock_method, test_method,
method_args=method_args,
expected_args=["name_or_id"],
expected_kwargs={"path_args": path_args},
expected_result="result",
**kwargs)
def verify_list(self, test_method, resource_type, paginated=False,
mock_method="openstack.proxy.BaseProxy._list",
**kwargs):
expected_kwargs = kwargs.pop("expected_kwargs", {})
expected_kwargs.update({"paginated": paginated})
expected_kwargs['limit'] = 2
method_kwargs = kwargs.pop("method_kwargs", {})
method_kwargs['limit'] = 2
self._verify2(mock_method, test_method,
method_kwargs=method_kwargs,
expected_args=[resource_type],
expected_kwargs=expected_kwargs,
expected_result=["result"],
**kwargs)
def verify_update(self, test_method, resource_type,
mock_method="openstack.proxy.BaseProxy._update",
expected_result="result", **kwargs):
the_kwargs = {"x": 1, "y": 2, "z": 3}
method_args = ["resource_or_id"]
method_kwargs = the_kwargs
expected_args = [resource_type, "resource_or_id"]
expected_kwargs = the_kwargs
self._verify2(mock_method, test_method,
expected_result=expected_result,
method_args=method_args,
method_kwargs=method_kwargs,
expected_args=expected_args,
expected_kwargs=expected_kwargs,
**kwargs)
def verify_wait_for_status(
self, test_method,
mock_method="openstack.resource.wait_for_status", **kwargs):
self._verify(mock_method, test_method, **kwargs)
|
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_grad # pylint: disable=unused-import
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.where(math_ops.abs(dense_t) > 0)
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _add_test(test, op_name, testcase_name, fn): # pylint: disable=redefined-outer-name
if fn is None:
return
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class CSRSparseMatrixGradTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(CSRSparseMatrixGradTest, cls).setUpClass()
cls._gpu_available = test_util.is_gpu_available()
# TODO(penporn): Make these tests runnable on eager mode.
# (tf.gradients and gradient_checker only run in graph mode.)
@test_util.run_deprecated_v1
def testLargeBatchConversionGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
mats_val = sparsify(np.random.randn(*dense_shape))
with self.test_session() as sess:
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
sparse_mats = dense_to_csr_sparse_matrix(mats)
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_mats, dtypes.float32)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
grad_out = gradients_impl.gradients([dense_mats], [mats],
[grad_vals])[0]
self.assertEqual(grad_out.dtype, dtypes.float32)
self.assertEqual(grad_out.shape, dense_shape)
grad_out_value = sess.run(grad_out)
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
nonzero_indices = abs(mats_val) > 0.0
self.assertAllEqual(grad_out_value[nonzero_indices],
grad_vals[nonzero_indices])
self.assertTrue(
np.all(grad_out_value[np.logical_not(nonzero_indices)] == 0.0))
@test_util.run_deprecated_v1
def testLargeBatchSparseConversionGrad(self):
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
mats_val = sparsify(np.random.randn(*dense_shape))
with self.session(use_gpu=True) as sess:
indices = array_ops.where_v2(
math_ops.not_equal(mats_val, array_ops.zeros_like(mats_val)))
values = math_ops.cast(
array_ops.gather_nd(mats_val, indices), dtype=dtypes.float32)
grad_vals = np.random.randn(*sess.run(values).shape).astype(np.float32)
csr_matrix = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
indices, values, dense_shape)
new_coo_tensor = (
sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
csr_matrix, type=dtypes.float32))
grad_out = gradients_impl.gradients([new_coo_tensor.values], [values],
[grad_vals])[0]
self.assertEqual(grad_out.dtype, dtypes.float32)
grad_out_vals = sess.run(grad_out)
self.assertAllClose(grad_vals, grad_out_vals)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="sparse-matrix-add op not supported on ROCm")
@test_util.run_deprecated_v1
def testLargeBatchSparseMatrixAddGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
a_mats_val = sparsify(np.random.randn(*dense_shape))
b_mats_val = sparsify(np.random.randn(*dense_shape))
alpha = np.float32(0.5)
beta = np.float32(-1.5)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
expected_a_grad = alpha * grad_vals
expected_b_grad = beta * grad_vals
expected_a_grad[abs(a_mats_val) == 0.0] = 0.0
expected_b_grad[abs(b_mats_val) == 0.0] = 0.0
with self.test_session() as sess:
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
a_grad, b_grad = gradients_impl.gradients([c_dense], [a_mats, b_mats],
[grad_vals])
self.assertEqual(a_grad.dtype, dtypes.float32)
self.assertEqual(b_grad.dtype, dtypes.float32)
self.assertEqual(a_grad.shape, dense_shape)
self.assertEqual(b_grad.shape, dense_shape)
a_grad_value, b_grad_value = sess.run((a_grad, b_grad))
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
self.assertAllEqual(expected_a_grad, a_grad_value)
self.assertAllEqual(expected_b_grad, b_grad_value)
if __name__ == "__main__":
test.main()
|
from functools import partial
from solnado import SolrClient
from tornado import ioloop, gen
c = SolrClient()
@gen.coroutine
def create_core():
p = partial(
c.core_create,
'foo',
)
res = yield gen.Task(p)
raise gen.Return(res)
@gen.coroutine
def create_collection():
p = partial(
c.create_collection,
'foo',
)
res = yield gen.Task(p)
raise gen.Return(res)
@gen.coroutine
def index_documents(docs):
p = partial(
c.add_json_documents,
'foo',
docs,
**{'commitWithin': 0}
)
res = yield gen.Task(p)
raise gen.Return(res)
@gen.coroutine
def main_coro():
yield create_core()
yield create_collection()
res = yield index_documents([
{
'id':'123',
'Title': 'A tale of two documents',
},{
'id': '456',
'Title': 'It was the best of times',
}])
print res.body, res.code
ioloop.IOLoop.instance().run_sync(main_coro)
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'IccpModeEnum' : _MetaInfoEnum('IccpModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg',
{
'singleton':'SINGLETON',
}, 'Cisco-IOS-XR-rgmgr-cfg', _yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg']),
'RedundancyGroupManager.Aps.DefaultRedundancyGroup' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps.DefaultRedundancyGroup',
False,
[
_MetaInfoClassMember('backup-interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Backup interface name
''',
'backup_interface_name',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('next-hop-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address of remote peer
''',
'next_hop_address',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'default-redundancy-group',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Aps.Groups.Group.Controllers.Controller' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps.Groups.Group.Controllers.Controller',
False,
[
_MetaInfoClassMember('controller-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Controller Name
''',
'controller_name',
'Cisco-IOS-XR-rgmgr-cfg', True),
_MetaInfoClassMember('backup-interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Backup interface name
''',
'backup_interface_name',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('next-hop-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address of remote peer
''',
'next_hop_address',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'controller',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Aps.Groups.Group.Controllers' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps.Groups.Group.Controllers',
False,
[
_MetaInfoClassMember('controller', REFERENCE_LIST, 'Controller' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps.Groups.Group.Controllers.Controller',
[], [],
''' none
''',
'controller',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'controllers',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Aps.Groups.Group' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps.Groups.Group',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[(1, 32)], [],
''' The redundancy group ID
''',
'group_id',
'Cisco-IOS-XR-rgmgr-cfg', True),
_MetaInfoClassMember('controllers', REFERENCE_CLASS, 'Controllers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps.Groups.Group.Controllers',
[], [],
''' Controller configuration
''',
'controllers',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'group',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Aps.Groups' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps.Groups',
False,
[
_MetaInfoClassMember('group', REFERENCE_LIST, 'Group' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps.Groups.Group',
[], [],
''' Redundancy Group Configuration
''',
'group',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'groups',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Aps' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Aps',
False,
[
_MetaInfoClassMember('default-redundancy-group', REFERENCE_CLASS, 'DefaultRedundancyGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps.DefaultRedundancyGroup',
[], [],
''' Default SONET controller backup configuration
''',
'default_redundancy_group',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('groups', REFERENCE_CLASS, 'Groups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps.Groups',
[], [],
''' Redundancy Group Table
''',
'groups',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'aps',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones.Backbone' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones.Backbone',
False,
[
_MetaInfoClassMember('backbone-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' none
''',
'backbone_name',
'Cisco-IOS-XR-rgmgr-cfg', True),
],
'Cisco-IOS-XR-rgmgr-cfg',
'backbone',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones',
False,
[
_MetaInfoClassMember('backbone', REFERENCE_LIST, 'Backbone' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones.Backbone',
[], [],
''' ICCP backbone interface configuration
''',
'backbone',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'backbones',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members.Member' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members.Member',
False,
[
_MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor_address',
'Cisco-IOS-XR-rgmgr-cfg', True),
],
'Cisco-IOS-XR-rgmgr-cfg',
'member',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members',
False,
[
_MetaInfoClassMember('member', REFERENCE_LIST, 'Member' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members.Member',
[], [],
''' ICCP member configuration
''',
'member',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'members',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.NvSatellite' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.NvSatellite',
False,
[
_MetaInfoClassMember('system-mac', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Optional identifier for this system
''',
'system_mac',
'Cisco-IOS-XR-icpe-infra-cfg', False),
],
'Cisco-IOS-XR-icpe-infra-cfg',
'nv-satellite',
_yang_ns._namespaces['Cisco-IOS-XR-icpe-infra-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups.IccpGroup',
False,
[
_MetaInfoClassMember('group-number', ATTRIBUTE, 'int' , None, None,
[(1, 4294967295)], [],
''' The redundancy icc group number
''',
'group_number',
'Cisco-IOS-XR-rgmgr-cfg', True),
_MetaInfoClassMember('backbones', REFERENCE_CLASS, 'Backbones' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones',
[], [],
''' ICCP backbone configuration
''',
'backbones',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('isolation-recovery-delay', ATTRIBUTE, 'int' , None, None,
[(30, 600)], [],
''' ICCP isolation recovery delay
''',
'isolation_recovery_delay',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('members', REFERENCE_CLASS, 'Members' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members',
[], [],
''' ICCP member configuration
''',
'members',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'IccpModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'IccpModeEnum',
[], [],
''' ICCP mode
''',
'mode',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('nv-satellite', REFERENCE_CLASS, 'NvSatellite' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.NvSatellite',
[], [],
''' nV Satellite configuration
''',
'nv_satellite',
'Cisco-IOS-XR-icpe-infra-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'iccp-group',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp.IccpGroups' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp.IccpGroups',
False,
[
_MetaInfoClassMember('iccp-group', REFERENCE_LIST, 'IccpGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups.IccpGroup',
[], [],
''' Redundancy Group Configuration
''',
'iccp_group',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'iccp-groups',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager.Iccp' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager.Iccp',
False,
[
_MetaInfoClassMember('iccp-groups', REFERENCE_CLASS, 'IccpGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp.IccpGroups',
[], [],
''' Redundancy Group Table Configuration
''',
'iccp_groups',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'iccp',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
'RedundancyGroupManager' : {
'meta_info' : _MetaInfoClass('RedundancyGroupManager',
False,
[
_MetaInfoClassMember('aps', REFERENCE_CLASS, 'Aps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Aps',
[], [],
''' MR-APS groups
''',
'aps',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable redundancy group manager
''',
'enable',
'Cisco-IOS-XR-rgmgr-cfg', False),
_MetaInfoClassMember('iccp', REFERENCE_CLASS, 'Iccp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg', 'RedundancyGroupManager.Iccp',
[], [],
''' ICCP configuration
''',
'iccp',
'Cisco-IOS-XR-rgmgr-cfg', False),
],
'Cisco-IOS-XR-rgmgr-cfg',
'redundancy-group-manager',
_yang_ns._namespaces['Cisco-IOS-XR-rgmgr-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_rgmgr_cfg'
),
},
}
_meta_table['RedundancyGroupManager.Aps.Groups.Group.Controllers.Controller']['meta_info'].parent =_meta_table['RedundancyGroupManager.Aps.Groups.Group.Controllers']['meta_info']
_meta_table['RedundancyGroupManager.Aps.Groups.Group.Controllers']['meta_info'].parent =_meta_table['RedundancyGroupManager.Aps.Groups.Group']['meta_info']
_meta_table['RedundancyGroupManager.Aps.Groups.Group']['meta_info'].parent =_meta_table['RedundancyGroupManager.Aps.Groups']['meta_info']
_meta_table['RedundancyGroupManager.Aps.DefaultRedundancyGroup']['meta_info'].parent =_meta_table['RedundancyGroupManager.Aps']['meta_info']
_meta_table['RedundancyGroupManager.Aps.Groups']['meta_info'].parent =_meta_table['RedundancyGroupManager.Aps']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones.Backbone']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members.Member']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Backbones']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.Members']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup.NvSatellite']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups.IccpGroup']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp.IccpGroups']['meta_info']
_meta_table['RedundancyGroupManager.Iccp.IccpGroups']['meta_info'].parent =_meta_table['RedundancyGroupManager.Iccp']['meta_info']
_meta_table['RedundancyGroupManager.Aps']['meta_info'].parent =_meta_table['RedundancyGroupManager']['meta_info']
_meta_table['RedundancyGroupManager.Iccp']['meta_info'].parent =_meta_table['RedundancyGroupManager']['meta_info']
|
import collections
import networkx as nx
from taskflow import exceptions as exc
from taskflow import flow
from taskflow.utils import graph_utils
class Flow(flow.Flow):
"""Graph flow pattern
Contained *flows/tasks* will be executed according to their dependencies
which will be resolved by using the *flows/tasks* provides and requires
mappings or by following manually created dependency links.
Note: Cyclic dependencies are not allowed.
"""
def __init__(self, name):
super(Flow, self).__init__(name)
self._graph = nx.freeze(nx.DiGraph())
def _validate(self, graph=None):
if graph is None:
graph = self._graph
# Ensure that there is a valid topological ordering.
if not nx.is_directed_acyclic_graph(graph):
raise exc.DependencyFailure("No path through the items in the"
" graph produces an ordering that"
" will allow for correct dependency"
" resolution")
def link(self, u, v):
"""Link existing node u as a runtime dependency of existing node v."""
if not self._graph.has_node(u):
raise ValueError('Item %s not found to link from' % (u))
if not self._graph.has_node(v):
raise ValueError('Item %s not found to link to' % (v))
self._swap(self._link(u, v, manual=True))
return self
def _link(self, u, v, graph=None, reason=None, manual=False):
mutable_graph = True
if graph is None:
graph = self._graph
mutable_graph = False
# NOTE(harlowja): Add an edge to a temporary copy and only if that
# copy is valid then do we swap with the underlying graph.
attrs = graph_utils.get_edge_attrs(graph, u, v)
if not attrs:
attrs = {}
if manual:
attrs['manual'] = True
if reason is not None:
if 'reasons' not in attrs:
attrs['reasons'] = set()
attrs['reasons'].add(reason)
if not mutable_graph:
graph = nx.DiGraph(graph)
graph.add_edge(u, v, **attrs)
return graph
def _swap(self, replacement_graph):
"""Validates the replacement graph and then swaps the underlying graph
with a frozen version of the replacement graph (this maintains the
invariant that the underlying graph is immutable).
"""
self._validate(replacement_graph)
self._graph = nx.freeze(replacement_graph)
def add(self, *items):
"""Adds a given task/tasks/flow/flows to this flow."""
items = [i for i in items if not self._graph.has_node(i)]
if not items:
return self
requirements = collections.defaultdict(list)
provided = {}
def update_requirements(node):
for value in node.requires:
requirements[value].append(node)
for node in self:
update_requirements(node)
for value in node.provides:
provided[value] = node
# NOTE(harlowja): Add items and edges to a temporary copy of the
# underlying graph and only if that is successful added to do we then
# swap with the underlying graph.
tmp_graph = nx.DiGraph(self._graph)
for item in items:
tmp_graph.add_node(item)
update_requirements(item)
for value in item.provides:
if value in provided:
raise exc.DependencyFailure(
"%(item)s provides %(value)s but is already being"
" provided by %(flow)s and duplicate producers"
" are disallowed"
% dict(item=item.name,
flow=provided[value].name,
value=value))
provided[value] = item
for value in item.requires:
if value in provided:
self._link(provided[value], item,
graph=tmp_graph, reason=value)
for value in item.provides:
if value in requirements:
for node in requirements[value]:
self._link(item, node,
graph=tmp_graph, reason=value)
self._swap(tmp_graph)
return self
def __len__(self):
return self._graph.number_of_nodes()
def __iter__(self):
for n in self._graph.nodes_iter():
yield n
@property
def provides(self):
provides = set()
for subflow in self:
provides.update(subflow.provides)
return provides
@property
def requires(self):
requires = set()
for subflow in self:
requires.update(subflow.requires)
return requires - self.provides
@property
def graph(self):
return self._graph
|
"""Tests for ldif.util.tf_util."""
import numpy as np
from parameterized import parameterized
import tensorflow as tf
from ldif.util import tf_util
DISTANCE_EPS = 1e-6
class TfUtilTest(tf.test.TestCase):
@parameterized.expand([('RemoveSecondRow', 1, 0, [[1.0, 2.0]]),
('RemoveFirstRow', 0, 0, [[3.0, 4.0]]),
('RemoveFirstCol', 0, 1, [[2.0], [4.0]]),
('RemoveSecondCol', 1, 1, [[1.0], [3.0]])])
def testRemoveElement(self, name, elt, axis, expected):
initial = tf.constant([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32)
removed = tf_util.remove_element(initial, tf.constant([elt],
dtype=tf.int32), axis)
expected = np.array(expected, dtype=np.float32)
with self.test_session() as sess:
returned = sess.run(removed)
distance = float(np.sum(np.abs(expected - returned)))
self.assertLess(
distance, DISTANCE_EPS, 'Expected \n%s\n but got \n%s' %
(np.array_str(expected), np.array_str(returned)))
if __name__ == '__main__':
tf.test.main()
|
__version__ = '0.4.0'
import logging
logger = logging.getLogger(__name__)
|
from collections import Counter
from functools import partial
from itertools import chain, product
import numpy as np
import torch
import torch.nn as nn
from scipy.sparse import issparse
from torch.utils.data import DataLoader
from metal.classifier import Classifier
from metal.label_model.graph_utils import get_clique_tree
from metal.label_model.lm_defaults import lm_default_config
from metal.utils import MetalDataset, recursive_merge_dicts
class LabelModel(Classifier):
"""A LabelModel...TBD
Args:
k: (int) the cardinality of the classifier
"""
# This class variable is explained in the Classifier class
implements_l2 = True
def __init__(self, k=2, **kwargs):
config = recursive_merge_dicts(lm_default_config, kwargs)
super().__init__(k, config)
def _check_L(self, L):
"""Run some basic checks on L."""
# TODO: Take this out?
if issparse(L):
L = L.todense()
# Check for correct values, e.g. warning if in {-1,0,1}
if np.any(L < 0):
raise ValueError("L must have values in {0,1,...,k}.")
def _create_L_ind(self, L):
"""Convert a label matrix with labels in 0...k to a one-hot format
Args:
L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
Returns:
L_ind: An [n,m*k] dense np.ndarray with values in {0,1}
Note that no column is required for 0 (abstain) labels.
"""
# TODO: Update LabelModel to keep L variants as sparse matrices
# throughout and remove this line.
if issparse(L):
L = L.todense()
L_ind = np.zeros((self.n, self.m * self.k))
for y in range(1, self.k + 1):
# A[x::y] slices A starting at x at intervals of y
# e.g., np.arange(9)[0::3] == np.array([0,3,6])
L_ind[:, (y - 1) :: self.k] = np.where(L == y, 1, 0)
return L_ind
def _get_augmented_label_matrix(self, L, higher_order=False):
"""Returns an augmented version of L where each column is an indicator
for whether a certain source or clique of sources voted in a certain
pattern.
Args:
L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
"""
# Create a helper data structure which maps cliques (as tuples of member
# sources) --> {start_index, end_index, maximal_cliques}, where
# the last value is a set of indices in this data structure
self.c_data = {}
for i in range(self.m):
self.c_data[i] = {
"start_index": i * self.k,
"end_index": (i + 1) * self.k,
"max_cliques": set(
[
j
for j in self.c_tree.nodes()
if i in self.c_tree.node[j]["members"]
]
),
}
L_ind = self._create_L_ind(L)
# Get the higher-order clique statistics based on the clique tree
# First, iterate over the maximal cliques (nodes of c_tree) and
# separator sets (edges of c_tree)
if higher_order:
L_aug = np.copy(L_ind)
for item in chain(self.c_tree.nodes(), self.c_tree.edges()):
if isinstance(item, int):
C = self.c_tree.node[item]
C_type = "node"
elif isinstance(item, tuple):
C = self.c_tree[item[0]][item[1]]
C_type = "edge"
else:
raise ValueError(item)
members = list(C["members"])
nc = len(members)
# If a unary maximal clique, just store its existing index
if nc == 1:
C["start_index"] = members[0] * self.k
C["end_index"] = (members[0] + 1) * self.k
# Else add one column for each possible value
else:
L_C = np.ones((self.n, self.k ** nc))
for i, vals in enumerate(product(range(self.k), repeat=nc)):
for j, v in enumerate(vals):
L_C[:, i] *= L_ind[:, members[j] * self.k + v]
# Add to L_aug and store the indices
if L_aug is not None:
C["start_index"] = L_aug.shape[1]
C["end_index"] = L_aug.shape[1] + L_C.shape[1]
L_aug = np.hstack([L_aug, L_C])
else:
C["start_index"] = 0
C["end_index"] = L_C.shape[1]
L_aug = L_C
# Add to self.c_data as well
id = tuple(members) if len(members) > 1 else members[0]
self.c_data[id] = {
"start_index": C["start_index"],
"end_index": C["end_index"],
"max_cliques": set([item]) if C_type == "node" else set(item),
}
return L_aug
else:
return L_ind
def _build_mask(self):
"""Build mask applied to O^{-1}, O for the matrix approx constraint"""
self.mask = torch.ones(self.d, self.d).byte()
for ci in self.c_data.values():
si, ei = ci["start_index"], ci["end_index"]
for cj in self.c_data.values():
sj, ej = cj["start_index"], cj["end_index"]
# Check if ci and cj are part of the same maximal clique
# If so, mask out their corresponding blocks in O^{-1}
if len(ci["max_cliques"].intersection(cj["max_cliques"])) > 0:
self.mask[si:ei, sj:ej] = 0
self.mask[sj:ej, si:ei] = 0
def _generate_O(self, L):
"""Form the overlaps matrix, which is just all the different observed
combinations of values of pairs of sources
Note that we only include the k non-abstain values of each source,
otherwise the model not minimal --> leads to singular matrix
"""
L_aug = self._get_augmented_label_matrix(L)
self.d = L_aug.shape[1]
self.O = torch.from_numpy(L_aug.T @ L_aug / self.n).float()
def _generate_O_inv(self, L):
"""Form the *inverse* overlaps matrix"""
self._generate_O(L)
self.O_inv = torch.from_numpy(np.linalg.inv(self.O.numpy())).float()
def _init_params(self):
"""Initialize the learned params
- \mu is the primary learned parameter, where each row corresponds to
the probability of a clique C emitting a specific combination of labels,
conditioned on different values of Y (for each column); that is:
self.mu[i*self.k + j, y] = P(\lambda_i = j | Y = y)
and similarly for higher-order cliques.
- Z is the inverse form version of \mu.
"""
train_config = self.config["train_config"]
# Initialize mu so as to break basic reflective symmetry
# Note that we are given either a single or per-LF initial precision
# value, prec_i = P(Y=y|\lf=y), and use:
# mu_init = P(\lf=y|Y=y) = P(\lf=y) * prec_i / P(Y=y)
# Handle single or per-LF values
if isinstance(train_config["prec_init"], (int, float)):
prec_init = train_config["prec_init"] * torch.ones(self.m)
else:
prec_init = torch.from_numpy(train_config["prec_init"])
if prec_init.shape[0] != self.m:
raise ValueError(f"prec_init must have shape {self.m}.")
# Get the per-value labeling propensities
# Note that self.O must have been computed already!
lps = torch.diag(self.O).numpy()
# TODO: Update for higher-order cliques!
self.mu_init = torch.zeros(self.d, self.k)
for i in range(self.m):
for y in range(self.k):
idx = i * self.k + y
mu_init = torch.clamp(lps[idx] * prec_init[i] / self.p[y], 0, 1)
self.mu_init[idx, y] += mu_init
# Initialize randomly based on self.mu_init
self.mu = nn.Parameter(self.mu_init.clone() * np.random.random()).float()
if self.inv_form:
self.Z = nn.Parameter(torch.randn(self.d, self.k)).float()
# Build the mask over O^{-1}
# TODO: Put this elsewhere?
self._build_mask()
def get_conditional_probs(self, source=None):
"""Returns the full conditional probabilities table as a numpy array,
where row i*(k+1) + ly is the conditional probabilities of source i
emmiting label ly (including abstains 0), conditioned on different
values of Y, i.e.:
c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y)
Note that this simply involves inferring the kth row by law of total
probability and adding in to mu.
If `source` is not None, returns only the corresponding block.
"""
c_probs = np.zeros((self.m * (self.k + 1), self.k))
mu = self.mu.detach().clone().numpy()
for i in range(self.m):
# si = self.c_data[(i,)]['start_index']
# ei = self.c_data[(i,)]['end_index']
# mu_i = mu[si:ei, :]
mu_i = mu[i * self.k : (i + 1) * self.k, :]
c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i
# The 0th row (corresponding to abstains) is the difference between
# the sums of the other rows and one, by law of total prob
c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0)
c_probs = np.clip(c_probs, 0.01, 0.99)
if source is not None:
return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)]
else:
return c_probs
def predict_proba(self, L):
"""Returns the [n,k] matrix of label probabilities P(Y | \lambda)
Args:
L: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}
"""
self._set_constants(L)
L_aug = self._get_augmented_label_matrix(L)
mu = np.clip(self.mu.detach().clone().numpy(), 0.01, 0.99)
# Create a "junction tree mask" over the columns of L_aug / mu
if len(self.deps) > 0:
jtm = np.zeros(L_aug.shape[1])
# All maximal cliques are +1
for i in self.c_tree.nodes():
node = self.c_tree.node[i]
jtm[node["start_index"] : node["end_index"]] = 1
# All separator sets are -1
for i, j in self.c_tree.edges():
edge = self.c_tree[i][j]
jtm[edge["start_index"] : edge["end_index"]] = 1
else:
jtm = np.ones(L_aug.shape[1])
# Note: We omit abstains, effectively assuming uniform distribution here
X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p))
Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.k)
return X / Z
def get_Q(self):
"""Get the model's estimate of Q = \mu P \mu^T
We can then separately extract \mu subject to additional constraints,
e.g. \mu P 1 = diag(O).
"""
Z = self.Z.detach().clone().numpy()
O = self.O.numpy()
I_k = np.eye(self.k)
return O @ Z @ np.linalg.inv(I_k + Z.T @ O @ Z) @ Z.T @ O
# These loss functions get all their data directly from the LabelModel
# (for better or worse). The unused *args make these compatible with the
# Classifer._train() method which expect loss functions to accept an input.
def loss_l2(self, l2=0):
"""L2 loss centered around mu_init, scaled optionally per-source.
In other words, diagonal Tikhonov regularization,
||D(\mu-\mu_{init})||_2^2
where D is diagonal.
Args:
- l2: A float or np.array representing the per-source regularization
strengths to use
"""
if isinstance(l2, (int, float)):
D = l2 * torch.eye(self.d)
else:
D = torch.diag(torch.from_numpy(l2))
# Note that mu is a matrix and this is the *Frobenius norm*
return torch.norm(D @ (self.mu - self.mu_init)) ** 2
def loss_inv_Z(self, *args):
return torch.norm((self.O_inv + self.Z @ self.Z.t())[self.mask]) ** 2
def loss_inv_mu(self, *args, l2=0):
loss_1 = torch.norm(self.Q - self.mu @ self.P @ self.mu.t()) ** 2
loss_2 = torch.norm(torch.sum(self.mu @ self.P, 1) - torch.diag(self.O)) ** 2
return loss_1 + loss_2 + self.loss_l2(l2=l2)
def loss_mu(self, *args, l2=0):
loss_1 = torch.norm((self.O - self.mu @ self.P @ self.mu.t())[self.mask]) ** 2
loss_2 = torch.norm(torch.sum(self.mu @ self.P, 1) - torch.diag(self.O)) ** 2
return loss_1 + loss_2 + self.loss_l2(l2=l2)
def _set_class_balance(self, class_balance, Y_dev):
"""Set a prior for the class balance
In order of preference:
1) Use user-provided class_balance
2) Estimate balance from Y_dev
3) Assume uniform class distribution
"""
if class_balance is not None:
self.p = np.array(class_balance)
elif Y_dev is not None:
class_counts = Counter(Y_dev)
sorted_counts = np.array([v for k, v in sorted(class_counts.items())])
self.p = sorted_counts / sum(sorted_counts)
else:
self.p = (1 / self.k) * np.ones(self.k)
self.P = torch.diag(torch.from_numpy(self.p)).float()
def _set_constants(self, L):
self.n, self.m = L.shape
self.t = 1
def _set_dependencies(self, deps):
nodes = range(self.m)
self.deps = deps
self.c_tree = get_clique_tree(nodes, deps)
def train_model(
self,
L_train,
Y_dev=None,
deps=[],
class_balance=None,
log_writer=None,
**kwargs,
):
"""Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b)
"""
self.config = recursive_merge_dicts(self.config, kwargs, misses="ignore")
train_config = self.config["train_config"]
# TODO: Implement logging for label model?
if log_writer is not None:
raise NotImplementedError("Logging for LabelModel.")
# Note that the LabelModel class implements its own (centered) L2 reg.
l2 = train_config.get("l2", 0)
self._set_class_balance(class_balance, Y_dev)
self._set_constants(L_train)
self._set_dependencies(deps)
self._check_L(L_train)
# Whether to take the simple conditionally independent approach, or the
# "inverse form" approach for handling dependencies
# This flag allows us to eg test the latter even with no deps present
self.inv_form = len(self.deps) > 0
# Creating this faux dataset is necessary for now because the LabelModel
# loss functions do not accept inputs, but Classifer._train_model()
# expects training data to feed to the loss functions.
dataset = MetalDataset([0], [0])
train_loader = DataLoader(dataset)
if self.inv_form:
# Compute O, O^{-1}, and initialize params
if self.config["verbose"]:
print("Computing O^{-1}...")
self._generate_O_inv(L_train)
self._init_params()
# Estimate Z, compute Q = \mu P \mu^T
if self.config["verbose"]:
print("Estimating Z...")
self._train_model(train_loader, self.loss_inv_Z)
self.Q = torch.from_numpy(self.get_Q()).float()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_inv_mu, l2=l2))
else:
# Compute O and initialize params
if self.config["verbose"]:
print("Computing O...")
self._generate_O(L_train)
self._init_params()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_mu, l2=l2))
|
import logging
import ntpath
import os
import shutil
import tarfile
import zipfile
from lib.utils import common, log_util, util, constants
from .collectinfo_reader import CollectinfoReader
from .collectinfo_log import CollectinfoLog
DATE_SEG = 0
YEAR = 0
MONTH = 1
DATE = 2
TIME_SEG = 1
HH = 0
MM = 1
SS = 2
COLLECTINFO_DIR = constants.ADMIN_HOME + "collectinfo/"
COLLECTINFO_INTERNAL_DIR = "collectinfo_analyser_extracted_files"
class CollectinfoLogHandler(object):
all_cinfo_logs = {}
selected_cinfo_logs = {}
def __init__(self, cinfo_path):
self.cinfo_path = cinfo_path
self.collectinfo_dir = COLLECTINFO_DIR + str(os.getpid())
self._validate_and_extract_compressed_files(
cinfo_path, dest_dir=self.collectinfo_dir
)
self.cinfo_timestamp = None
self.logger = logging.getLogger("asadm")
self.reader = CollectinfoReader()
try:
self._add_cinfo_log_files(cinfo_path)
except Exception as e:
self.close()
raise e
def __str__(self):
status_str = ""
if not self.all_cinfo_logs:
return status_str
i = 1
for timestamp in sorted(self.all_cinfo_logs.keys()):
nodes = list(self.all_cinfo_logs[timestamp].get_node_names().keys())
if len(nodes) == 0:
continue
status_str += "\n " + str(i) + ": "
status_str += ntpath.basename(self.all_cinfo_logs[timestamp].cinfo_file)
status_str += " ("
status_str += str(timestamp)
status_str += ")"
status_str += "\n\tFound %s nodes" % (len(nodes))
status_str += "\n\tOnline: %s" % (", ".join(nodes))
status_str += "\n"
i = i + 1
return status_str
def close(self):
if self.all_cinfo_logs:
for timestamp in self.all_cinfo_logs:
try:
self.all_cinfo_logs[timestamp].destroy()
except Exception:
pass
self.all_cinfo_logs.clear()
self.selected_cinfo_logs.clear()
if os.path.exists(self.collectinfo_dir):
shutil.rmtree(self.collectinfo_dir)
def get_cinfo_log_at(self, timestamp=""):
if not timestamp or timestamp not in self.all_cinfo_logs:
return None
return self.all_cinfo_logs[timestamp]
def get_unique_data_usage(self):
return self.license_data_usage
def get_principal(self, timestamp):
service_data = self.info_statistics(stanza="service")
principal = None
if timestamp not in service_data:
return principal
for node_ip in service_data[timestamp]:
temp_principal = service_data[timestamp][node_ip]["cluster_principal"]
if principal and temp_principal != principal:
self.logger.warning("Found multiple cluster principals.")
return principal
elif not principal:
principal = temp_principal
return principal
def get_node_id_to_ip_mapping(self, timestamp):
meta_data = self.info_meta_data()
node_to_ip = {}
if timestamp not in meta_data:
return {}
for node_ip in meta_data[timestamp]:
node_id = meta_data[timestamp][node_ip]["node_id"]
node_to_ip[node_id] = node_ip
return node_to_ip
def get_ip_to_node_id_mapping(self, timestamp):
meta_data = self.info_meta_data()
ip_to_node = {}
if timestamp not in meta_data:
return {}
for node_ip in meta_data[timestamp]:
node_id = meta_data[timestamp][node_ip]["node_id"]
ip_to_node[node_ip] = node_id
return ip_to_node
def info_getconfig(self, stanza="", flip=False):
return self._fetch_from_cinfo_log(type="config", stanza=stanza, flip=flip)
def info_get_originalconfig(self, stanza="", flip=False):
return self._fetch_from_cinfo_log(
type="original_config", stanza=stanza, flip=flip
)
def info_statistics(self, stanza="", flip=False):
return self._fetch_from_cinfo_log(type="statistics", stanza=stanza, flip=flip)
def info_histogram(self, stanza="", byte_distribution=False, flip=False):
if byte_distribution and stanza == "objsz":
stanza = "object-size"
hist_dict = self._fetch_from_cinfo_log(
type="histogram", stanza=stanza, flip=flip
)
res_dict = {}
version = self.info_meta_data(stanza="asd_build")
for timestamp, hist_snapshot in hist_dict.items():
res_dict[timestamp] = {}
if not hist_snapshot:
continue
for node, node_snapshot in hist_snapshot.items():
res_dict[timestamp][node] = {}
if not node_snapshot:
continue
for namespace, namespace_snapshot in node_snapshot.items():
if not namespace_snapshot:
continue
try:
as_version = version[timestamp][node]
d = common.parse_raw_histogram(
stanza,
namespace_snapshot,
logarithmic=byte_distribution,
new_histogram_version=common.is_new_histogram_version(
as_version
),
)
if d and not isinstance(d, Exception):
res_dict[timestamp][node][namespace] = d
except Exception:
pass
return res_dict
def info_latency(self):
return self._fetch_from_cinfo_log(type="latency")
def info_meta_data(self, stanza=""):
return self._fetch_from_cinfo_log(type="meta_data", stanza=stanza)
def info_pmap(self):
return self._fetch_from_cinfo_log(type="pmap")
def info_namespaces(self):
return self._fetch_from_cinfo_log(type="config", stanza="namespace_list")
def admin_acl(self, stanza):
data = self._fetch_from_cinfo_log(type="acl", stanza=stanza)
"""
Asadm 2.1 stored user data as {user: [role1, role2, . . .]} which had to be
changed to {user: {roles: [role1, role2], connections: int, . . .}} in
Asadm 2.2. This snippet can be removed when 2.1 is considered old enough :)
"""
if stanza == "users":
for nodes_data in data.values():
for users_data in nodes_data.values():
for user, user_data in users_data.items():
if isinstance(user_data, list):
users_data[user] = {"roles": user_data}
return data
def get_sys_data(self, stanza=""):
res_dict = {}
if not stanza:
return res_dict
for timestamp in sorted(self.selected_cinfo_logs.keys()):
try:
out = self.selected_cinfo_logs[timestamp].get_sys_data(stanza=stanza)
res_dict[timestamp] = util.restructure_sys_data(out, stanza)
except Exception:
continue
return res_dict
def _get_valid_files(self, cinfo_path=""):
try:
if not cinfo_path:
cinfo_path = self.cinfo_path
log_files = log_util.get_all_files(cinfo_path)
valid_files = []
for log_file in log_files:
try:
if self.reader.is_cinfo_log_file(log_file):
valid_files.append(log_file)
continue
except Exception:
pass
try:
# ToDo: It should be some proper check for asadm
# collectinfo json file.
if os.path.splitext(log_file)[1] == ".json":
valid_files.append(log_file)
continue
except Exception:
pass
try:
if self.reader.is_system_log_file(log_file):
valid_files.append(log_file)
continue
except Exception:
pass
try:
# ToDo: It should be some proper check for asadm
# conf file.
if os.path.splitext(log_file)[1] == ".conf":
valid_files.append(log_file)
except Exception:
pass
return valid_files
except Exception:
return []
def _get_all_file_paths(self, cinfo_path):
files = []
if os.path.isfile(cinfo_path):
if not self._is_compressed_file(cinfo_path):
files.append(cinfo_path)
else:
files += log_util.get_all_files(self.collectinfo_dir)
elif os.path.isdir(cinfo_path):
files += log_util.get_all_files(cinfo_path)
if os.path.exists(self.collectinfo_dir):
# ToDo: Before adding file from collectinfo_dir, we need to check file already exists in input file list or not,
# ToDo: collectinfo_parser fails if same file exists twice in input file list. This is possible if input has zip file and
# ToDo: user unzipped it but did not remove zipped file, in that case collectinfo-analyser creates new unzipped file,
# ToDo: which results in two copies of same file (one unzipped by user and one unzipped by collectinfo-analyser).
files += self._get_valid_files(self.collectinfo_dir)
return files
def _add_cinfo_log_files(self, cinfo_path=""):
if not cinfo_path:
raise Exception("Collectinfo path not specified.")
if not os.path.exists(cinfo_path):
raise Exception("Wrong Collectinfo path.")
files = self._get_all_file_paths(cinfo_path)
if not files:
raise Exception("No valid Aerospike collectinfo log available.")
cinfo_log = CollectinfoLog(cinfo_path, files, self.reader)
self.selected_cinfo_logs = cinfo_log.snapshots
self.all_cinfo_logs = cinfo_log.snapshots
self.license_data_usage = cinfo_log.license_data_usage
snapshots_added = len(self.all_cinfo_logs)
if not snapshots_added:
raise Exception("Multiple snapshots available without JSON dump.")
def _fetch_from_cinfo_log(self, type="", stanza="", flip=False):
res_dict = {}
if not type:
return res_dict
for timestamp in sorted(self.selected_cinfo_logs.keys()):
try:
out = self.selected_cinfo_logs[timestamp].get_data(
type=type, stanza=stanza
)
if flip:
out = util.flip_keys(out)
res_dict[timestamp] = out
except Exception:
continue
return res_dict
def _is_compressed_file(self, file):
if not file or not os.path.exists(file):
return False
if zipfile.is_zipfile(file) or tarfile.is_tarfile(file):
return True
return False
def _extract_to(self, file, dest_dir):
if not file or not os.path.exists(file):
return False
try:
if tarfile.is_tarfile(file):
compressed_file = tarfile.open(file)
elif zipfile.is_zipfile(file):
compressed_file = zipfile.ZipFile(file, "r")
else:
return False
except Exception:
return False
file_extracted = False
try:
compressed_file.extractall(path=dest_dir)
file_extracted = True
except Exception:
file_extracted = False
finally:
compressed_file.close()
return file_extracted
def _validate_and_extract_compressed_files(self, cinfo_path, dest_dir=None):
if not cinfo_path or not os.path.exists(cinfo_path):
return
if not dest_dir:
dest_dir = self.collectinfo_dir
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if os.path.isfile(cinfo_path):
if not self._is_compressed_file(cinfo_path):
return
if self._extract_to(cinfo_path, dest_dir):
self._validate_and_extract_compressed_files(
dest_dir, dest_dir=os.path.join(dest_dir, COLLECTINFO_INTERNAL_DIR)
)
return
files = log_util.get_all_files(cinfo_path)
if not files:
return
file_extracted = False
for file in files:
if not self._is_compressed_file(file):
continue
if self._extract_to(file, dest_dir):
file_extracted = True
if file_extracted:
self._validate_and_extract_compressed_files(
dest_dir, dest_dir=os.path.join(dest_dir, COLLECTINFO_INTERNAL_DIR)
)
|
import os
import readline # https://github.com/ContinuumIO/anaconda-issues/issues/152
from unittest.mock import MagicMock
from unittest.mock import Mock
import numpy
from pandas import DataFrame
from pandas import Series
import pytest
pytest.importorskip("rpy2")
from pandas.rpy.common import convert_robj
@pytest.fixture
def ObjectMixin(monkeypatch):
from palladium.R import ObjectMixin
r_dict = {}
r = MagicMock()
r.__getitem__.side_effect = r_dict.__getitem__
r.__setitem__.side_effect = r_dict.__setitem__
r['myfunc'] = Mock()
r['predict'] = Mock()
monkeypatch.setattr(ObjectMixin, 'r', r)
return ObjectMixin
class TestDatasetLoader:
@pytest.fixture
def DatasetLoader(self, ObjectMixin):
from palladium.R import DatasetLoader
return DatasetLoader
def test_it(self, DatasetLoader):
X, y = object(), object()
DatasetLoader.r['myfunc'].return_value = X, y
dloader = DatasetLoader('myscript', 'myfunc', some='kwarg')
assert dloader() == (X, y)
dloader.r.source.assert_called_with('myscript')
dloader.r['myfunc'].assert_called_with(some='kwarg')
class TestAbstractModel:
@pytest.fixture
def data(self):
X = numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = numpy.array([1, 2])
return X, y
@pytest.fixture
def dataframe(self, data):
X, y = data
return DataFrame(X, columns=['one', 'two', 'three']), Series(y)
@pytest.fixture
def Model(self, ObjectMixin, monkeypatch):
from palladium.R import AbstractModel
monkeypatch.setattr(AbstractModel, '__abstractmethods__', set())
return AbstractModel
def test_fit_with_numpy_data(self, Model, data):
X, y = data
model = Model(scriptname='myscript', funcname='myfunc', some='kwarg')
model.fit(X, y)
funcargs = model.r['myfunc'].call_args
assert (numpy.asarray(funcargs[0][0]) == X).all()
assert (numpy.asarray(funcargs[0][1]) == y).all()
assert funcargs[1]['some'] == 'kwarg'
def test_fit_with_pandas_data(self, Model, dataframe):
X, y = dataframe
model = Model(scriptname='myscript', funcname='myfunc', some='kwarg')
model.fit(X, y)
funcargs = model.r['myfunc'].call_args
assert (convert_robj(funcargs[0][0]) == X).all().all()
assert (convert_robj(funcargs[0][1]) == y).all()
assert funcargs[1]['some'] == 'kwarg'
class TestClassificationModel(TestAbstractModel):
@pytest.fixture
def Model(self, ObjectMixin, monkeypatch):
from palladium.R import ClassificationModel
return ClassificationModel
def test_predict_with_numpy_data(self, Model, data):
X, y = data
model = Model(scriptname='myscript', funcname='myfunc', some='kwarg')
model.r['predict'].return_value = numpy.array(
[[0.1, 0.2, 0.7], [0.8, 0.1, 0.1]])
model.fit(X, y)
result = model.predict(X)
predictargs = model.r['predict'].call_args
assert predictargs[0][0] is model.rmodel_
assert (numpy.asarray(predictargs[0][1]) == X).all()
assert predictargs[1]['type'] == 'prob'
assert (result ==
numpy.argmax(model.r['predict'].return_value, axis=1)).all()
result = model.predict_proba(X)
assert (result == model.r['predict'].return_value).all()
def test_predict_with_pandas_data(self, Model, dataframe):
X, y = dataframe
model = Model(scriptname='myscript', funcname='myfunc', some='kwarg')
model.r['predict'].return_value = numpy.array(
[[0.1, 0.2, 0.7], [0.8, 0.1, 0.1]])
model.fit(X, y)
result = model.predict(X)
predictargs = model.r['predict'].call_args
assert predictargs[0][0] is model.rmodel_
assert (convert_robj(predictargs[0][1]) == X).all().all()
assert predictargs[1]['type'] == 'prob'
assert (result ==
numpy.argmax(model.r['predict'].return_value, axis=1)).all()
result = model.predict_proba(X)
assert (result == model.r['predict'].return_value).all()
class TestClassification:
@pytest.fixture
def dataset(self):
from palladium.R import DatasetLoader
return DatasetLoader(
scriptname=os.path.join(os.path.dirname(__file__), 'test_R.R'),
funcname='dataset',
)
@pytest.fixture
def model(self):
from palladium.R import ClassificationModel
return ClassificationModel(
scriptname=os.path.join(os.path.dirname(__file__), 'test_R.R'),
funcname='train.randomForest',
encode_labels=True,
)
def test_fit_and_predict(self, dataset, model):
X, y = dataset()
model.fit(X, y)
probas = model.predict_proba(X)
assert probas.shape == (150, 3)
assert (probas.sum(axis=1) == 1).all()
assert (model.predict(X) == numpy.asarray(y)).all()
def test_fit_and_score(self, dataset, model):
X, y = dataset()
model.fit(X, y)
assert model.score(X, y) == 1.0
class TestClassificationWithNumpyDataset(TestClassification):
@pytest.fixture
def dataset(self):
X = numpy.array([
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
] * 50)
y = numpy.array([1, 2, 3] * 50)
return lambda: (X, y)
class TestClassificationWithPandasDataset(TestClassification):
@pytest.fixture
def dataset(self):
X = DataFrame([
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
] * 50)
y = Series([1, 2, 3] * 50)
return lambda: (X, y)
|
class Solution:
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ret = []
nums.sort()
self.dfs(ret, nums, 0, [])
return ret
def dfs(self, ret, nums, start, temp):
ret.append(temp)
for i in range(start, len(nums)):
if i > start and nums[i] == nums[i-1]:
continue
self.dfs(ret, nums, i + 1, temp + [nums[i]])
|
"""Config flow for DLNA DMR."""
from __future__ import annotations
from collections.abc import Callable
import logging
from pprint import pformat
from typing import Any, Mapping, Optional
from urllib.parse import urlparse
from async_upnp_client.client import UpnpError
from async_upnp_client.profiles.dlna import DmrDevice
from async_upnp_client.profiles.profile import find_device_of_type
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_HOST,
CONF_NAME,
CONF_TYPE,
CONF_URL,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import IntegrationError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import (
CONF_CALLBACK_URL_OVERRIDE,
CONF_LISTEN_PORT,
CONF_POLL_AVAILABILITY,
DEFAULT_NAME,
DOMAIN,
)
from .data import get_domain_data
LOGGER = logging.getLogger(__name__)
FlowInput = Optional[Mapping[str, Any]]
class ConnectError(IntegrationError):
"""Error occurred when trying to connect to a device."""
class DlnaDmrFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a DLNA DMR config flow.
The Unique Device Name (UDN) of the DMR device is used as the unique_id for
config entries and for entities. This UDN may differ from the root UDN if
the DMR is an embedded device.
"""
VERSION = 1
def __init__(self) -> None:
"""Initialize flow."""
self._discoveries: dict[str, Mapping[str, Any]] = {}
self._location: str | None = None
self._udn: str | None = None
self._device_type: str | None = None
self._name: str | None = None
self._options: dict[str, Any] = {}
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return DlnaDmrOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input: FlowInput = None) -> FlowResult:
"""Handle a flow initialized by the user.
Let user choose from a list of found and unconfigured devices or to
enter an URL manually.
"""
LOGGER.debug("async_step_user: user_input: %s", user_input)
if user_input is not None:
host = user_input.get(CONF_HOST)
if not host:
# No device chosen, user might want to directly enter an URL
return await self.async_step_manual()
# User has chosen a device, ask for confirmation
discovery = self._discoveries[host]
await self._async_set_info_from_discovery(discovery)
return self._create_entry()
discoveries = await self._async_get_discoveries()
if not discoveries:
# Nothing found, maybe the user knows an URL to try
return await self.async_step_manual()
self._discoveries = {
discovery.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
or urlparse(discovery[ssdp.ATTR_SSDP_LOCATION]).hostname: discovery
for discovery in discoveries
}
data_schema = vol.Schema(
{vol.Optional(CONF_HOST): vol.In(self._discoveries.keys())}
)
return self.async_show_form(step_id="user", data_schema=data_schema)
async def async_step_manual(self, user_input: FlowInput = None) -> FlowResult:
"""Manual URL entry by the user."""
LOGGER.debug("async_step_manual: user_input: %s", user_input)
# Device setup manually, assume we don't get SSDP broadcast notifications
self._options[CONF_POLL_AVAILABILITY] = True
errors = {}
if user_input is not None:
self._location = user_input[CONF_URL]
try:
await self._async_connect()
except ConnectError as err:
errors["base"] = err.args[0]
else:
return self._create_entry()
data_schema = vol.Schema({CONF_URL: str})
return self.async_show_form(
step_id="manual", data_schema=data_schema, errors=errors
)
async def async_step_import(self, import_data: FlowInput = None) -> FlowResult:
"""Import a new DLNA DMR device from a config entry.
This flow is triggered by `async_setup_platform`. If the device has not
been migrated, and can be connected to, automatically import it. If it
cannot be connected to, prompt the user to turn it on. If it has already
been migrated, do nothing.
"""
LOGGER.debug("async_step_import: import_data: %s", import_data)
if not import_data or CONF_URL not in import_data:
LOGGER.debug("Entry not imported: incomplete_config")
return self.async_abort(reason="incomplete_config")
self._location = import_data[CONF_URL]
self._async_abort_entries_match({CONF_URL: self._location})
# Use the location as this config flow's unique ID until UDN is known
await self.async_set_unique_id(self._location)
# Set options from the import_data, except listen_ip which is no longer used
self._options[CONF_LISTEN_PORT] = import_data.get(CONF_LISTEN_PORT)
self._options[CONF_CALLBACK_URL_OVERRIDE] = import_data.get(
CONF_CALLBACK_URL_OVERRIDE
)
# Override device name if it's set in the YAML
self._name = import_data.get(CONF_NAME)
discoveries = await self._async_get_discoveries()
# Find the device in the list of unconfigured devices
for discovery in discoveries:
if discovery[ssdp.ATTR_SSDP_LOCATION] == self._location:
# Device found via SSDP, it shouldn't need polling
self._options[CONF_POLL_AVAILABILITY] = False
# Discovery info has everything required to create config entry
await self._async_set_info_from_discovery(discovery)
LOGGER.debug(
"Entry %s found via SSDP, with UDN %s",
self._location,
self._udn,
)
return self._create_entry()
# This device will need to be polled
self._options[CONF_POLL_AVAILABILITY] = True
# Device was not found via SSDP, connect directly for configuration
try:
await self._async_connect()
except ConnectError as err:
# This will require user action
LOGGER.debug("Entry %s not imported yet: %s", self._location, err.args[0])
return await self.async_step_import_turn_on()
LOGGER.debug("Entry %s ready for import", self._location)
return self._create_entry()
async def async_step_import_turn_on(
self, user_input: FlowInput = None
) -> FlowResult:
"""Request the user to turn on the device so that import can finish."""
LOGGER.debug("async_step_import_turn_on: %s", user_input)
self.context["title_placeholders"] = {"name": self._name or self._location}
errors = {}
if user_input is not None:
try:
await self._async_connect()
except ConnectError as err:
errors["base"] = err.args[0]
else:
return self._create_entry()
self._set_confirm_only()
return self.async_show_form(step_id="import_turn_on", errors=errors)
async def async_step_ssdp(self, discovery_info: DiscoveryInfoType) -> FlowResult:
"""Handle a flow initialized by SSDP discovery."""
LOGGER.debug("async_step_ssdp: discovery_info %s", pformat(discovery_info))
await self._async_set_info_from_discovery(discovery_info)
if _is_ignored_device(discovery_info):
return self.async_abort(reason="alternative_integration")
# Abort if the device doesn't support all services required for a DmrDevice.
# Use the discovery_info instead of DmrDevice.is_profile_device to avoid
# contacting the device again.
discovery_service_list = discovery_info.get(ssdp.ATTR_UPNP_SERVICE_LIST)
if not discovery_service_list:
return self.async_abort(reason="not_dmr")
discovery_service_ids = {
service.get("serviceId")
for service in discovery_service_list.get("service") or []
}
if not DmrDevice.SERVICE_IDS.issubset(discovery_service_ids):
return self.async_abort(reason="not_dmr")
# Abort if a migration flow for the device's location is in progress
for progress in self._async_in_progress(include_uninitialized=True):
if progress["context"].get("unique_id") == self._location:
LOGGER.debug(
"Aborting SSDP setup because migration for %s is in progress",
self._location,
)
return self.async_abort(reason="already_in_progress")
self.context["title_placeholders"] = {"name": self._name}
return await self.async_step_confirm()
async def async_step_unignore(self, user_input: Mapping[str, Any]) -> FlowResult:
"""Rediscover previously ignored devices by their unique_id."""
LOGGER.debug("async_step_unignore: user_input: %s", user_input)
self._udn = user_input["unique_id"]
assert self._udn
await self.async_set_unique_id(self._udn)
# Find a discovery matching the unignored unique_id for a DMR device
for dev_type in DmrDevice.DEVICE_TYPES:
discovery = await ssdp.async_get_discovery_info_by_udn_st(
self.hass, self._udn, dev_type
)
if discovery:
break
else:
return self.async_abort(reason="discovery_error")
await self._async_set_info_from_discovery(discovery, abort_if_configured=False)
self.context["title_placeholders"] = {"name": self._name}
return await self.async_step_confirm()
async def async_step_confirm(self, user_input: FlowInput = None) -> FlowResult:
"""Allow the user to confirm adding the device."""
LOGGER.debug("async_step_confirm: %s", user_input)
if user_input is not None:
return self._create_entry()
self._set_confirm_only()
return self.async_show_form(step_id="confirm")
async def _async_connect(self) -> None:
"""Connect to a device to confirm it works and gather extra information.
Updates this flow's unique ID to the device UDN if not already done.
Raises ConnectError if something goes wrong.
"""
LOGGER.debug("_async_connect: location: %s", self._location)
assert self._location, "self._location has not been set before connect"
domain_data = get_domain_data(self.hass)
try:
device = await domain_data.upnp_factory.async_create_device(self._location)
except UpnpError as err:
raise ConnectError("cannot_connect") from err
if not DmrDevice.is_profile_device(device):
raise ConnectError("not_dmr")
device = find_device_of_type(device, DmrDevice.DEVICE_TYPES)
if not self._udn:
self._udn = device.udn
await self.async_set_unique_id(self._udn)
# Abort if already configured, but update the last-known location
self._abort_if_unique_id_configured(
updates={CONF_URL: self._location}, reload_on_update=False
)
if not self._device_type:
self._device_type = device.device_type
if not self._name:
self._name = device.name
def _create_entry(self) -> FlowResult:
"""Create a config entry, assuming all required information is now known."""
LOGGER.debug(
"_async_create_entry: location: %s, UDN: %s", self._location, self._udn
)
assert self._location
assert self._udn
assert self._device_type
title = self._name or urlparse(self._location).hostname or DEFAULT_NAME
data = {
CONF_URL: self._location,
CONF_DEVICE_ID: self._udn,
CONF_TYPE: self._device_type,
}
return self.async_create_entry(title=title, data=data, options=self._options)
async def _async_set_info_from_discovery(
self, discovery_info: Mapping[str, Any], abort_if_configured: bool = True
) -> None:
"""Set information required for a config entry from the SSDP discovery."""
LOGGER.debug(
"_async_set_info_from_discovery: location: %s, UDN: %s",
discovery_info[ssdp.ATTR_SSDP_LOCATION],
discovery_info[ssdp.ATTR_SSDP_UDN],
)
if not self._location:
self._location = discovery_info[ssdp.ATTR_SSDP_LOCATION]
assert isinstance(self._location, str)
self._udn = discovery_info[ssdp.ATTR_SSDP_UDN]
await self.async_set_unique_id(self._udn)
if abort_if_configured:
# Abort if already configured, but update the last-known location
self._abort_if_unique_id_configured(
updates={CONF_URL: self._location}, reload_on_update=False
)
self._device_type = (
discovery_info.get(ssdp.ATTR_SSDP_NT) or discovery_info[ssdp.ATTR_SSDP_ST]
)
self._name = (
discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME)
or urlparse(self._location).hostname
or DEFAULT_NAME
)
async def _async_get_discoveries(self) -> list[Mapping[str, Any]]:
"""Get list of unconfigured DLNA devices discovered by SSDP."""
LOGGER.debug("_get_discoveries")
# Get all compatible devices from ssdp's cache
discoveries: list[Mapping[str, Any]] = []
for udn_st in DmrDevice.DEVICE_TYPES:
st_discoveries = await ssdp.async_get_discovery_info_by_st(
self.hass, udn_st
)
discoveries.extend(st_discoveries)
# Filter out devices already configured
current_unique_ids = {
entry.unique_id
for entry in self._async_current_entries(include_ignore=False)
}
discoveries = [
disc
for disc in discoveries
if disc[ssdp.ATTR_SSDP_UDN] not in current_unique_ids
]
return discoveries
class DlnaDmrOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a DLNA DMR options flow.
Configures the single instance and updates the existing config entry.
"""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
errors: dict[str, str] = {}
# Don't modify existing (read-only) options -- copy and update instead
options = dict(self.config_entry.options)
if user_input is not None:
LOGGER.debug("user_input: %s", user_input)
listen_port = user_input.get(CONF_LISTEN_PORT) or None
callback_url_override = user_input.get(CONF_CALLBACK_URL_OVERRIDE) or None
try:
# Cannot use cv.url validation in the schema itself so apply
# extra validation here
if callback_url_override:
cv.url(callback_url_override)
except vol.Invalid:
errors["base"] = "invalid_url"
options[CONF_LISTEN_PORT] = listen_port
options[CONF_CALLBACK_URL_OVERRIDE] = callback_url_override
options[CONF_POLL_AVAILABILITY] = user_input[CONF_POLL_AVAILABILITY]
# Save if there's no errors, else fall through and show the form again
if not errors:
return self.async_create_entry(title="", data=options)
fields = {}
def _add_with_suggestion(key: str, validator: Callable) -> None:
"""Add a field to with a suggested, not default, value."""
if (suggested_value := options.get(key)) is None:
fields[vol.Optional(key)] = validator
else:
fields[
vol.Optional(key, description={"suggested_value": suggested_value})
] = validator
# listen_port can be blank or 0 for "bind any free port"
_add_with_suggestion(CONF_LISTEN_PORT, cv.port)
_add_with_suggestion(CONF_CALLBACK_URL_OVERRIDE, str)
fields[
vol.Required(
CONF_POLL_AVAILABILITY,
default=options.get(CONF_POLL_AVAILABILITY, False),
)
] = bool
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(fields),
errors=errors,
)
def _is_ignored_device(discovery_info: Mapping[str, Any]) -> bool:
"""Return True if this device should be ignored for discovery.
These devices are supported better by other integrations, so don't bug
the user about them. The user can add them if desired by via the user config
flow, which will list all discovered but unconfigured devices.
"""
# Did the discovery trigger more than just this flow?
if len(discovery_info.get(ssdp.ATTR_HA_MATCHING_DOMAINS, set())) > 1:
LOGGER.debug(
"Ignoring device supported by multiple integrations: %s",
discovery_info[ssdp.ATTR_HA_MATCHING_DOMAINS],
)
return True
# Is the root device not a DMR?
if discovery_info.get(ssdp.ATTR_UPNP_DEVICE_TYPE) not in DmrDevice.DEVICE_TYPES:
return True
return False
|
"""
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
"""
from copy import deepcopy
import functools
import importlib
import operator
import os
import sys
def get_from_dict(data_dict, map_list):
return functools.reduce(operator.getitem, map_list, data_dict)
def apply_to_dict(data_dict, map_list, func, args):
getattr(get_from_dict(data_dict, map_list[:-1])[map_list[-1]], func)(*args)
memoize = functools.lru_cache(maxsize=2048)
class memoize_instancemethod(object):
"""
Cache the return value of a method on a per-instance basis
(as opposed to a per-class basis like functools.lru_cache does)
Source: http://code.activestate.com/recipes/577452/
This class is meant to be used as a decorator of methods. The return
value from a given method invocation will be cached on the instance
whose method was invoked. All arguments passed to a method decorated
with memoize must be hashable.
If a memoized method is invoked directly on its class the result
will not be cached. Instead the method will be invoked like a
static method.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return functools.partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(list(kw.items())))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
def relative_path(base_path_file, path):
"""
If ``path`` is not absolute, it is interpreted as relative to the
path of the given ``base_path_file``.
"""
# Check if base_path_file is a string because it might be an AttrDict
if not os.path.isabs(path) and isinstance(base_path_file, str):
path = os.path.join(os.path.dirname(base_path_file), path)
return path
def load_function(source):
"""
Returns a function from a module, given a source string of the form:
'module.submodule.subsubmodule.function_name'
"""
module_string, function_string = source.rsplit(".", 1)
modules = sys.modules.keys()
# Check if module already loaded, if so, don't re-import it
if module_string in modules:
module = sys.modules[module_string]
# Else load the module
else:
module = importlib.import_module(module_string)
return getattr(module, function_string)
def plugin_load(name, builtin_module):
try: # First try importing as a third-party module
func = load_function(name)
except ValueError:
# ValueError raised if we got a string without '.',
# which implies a builtin function,
# so we attempt to load from the given module
func_string = builtin_module + "." + name
func = load_function(func_string)
return func
|
import click
import sys
from base64 import urlsafe_b64encode
from zeus.config import db
from zeus.models import Repository, RepositoryProvider, Hook
from .base import cli
@cli.group("hooks")
def hooks():
pass
@hooks.command()
@click.argument("repository", required=True)
@click.argument("provider", required=True)
def add(repository, provider):
repo_bits = repository.split("/", 2)
assert (
len(repo_bits) == 3
), "repository not in valid format: {provider}/{owner}/{name}"
repo = (
Repository.query.unrestricted_unsafe()
.filter(
Repository.provider == RepositoryProvider(repo_bits[0]),
Repository.owner_name == repo_bits[1],
Repository.name == repo_bits[2],
)
.first()
)
assert repo
hook = Hook(repository_id=repo.id, provider=provider)
db.session.add(hook)
db.session.commit()
click.echo("Hook created:")
click.echo("-> id = {}".format(str(hook.id)))
click.echo(
"-> token = {}".format(urlsafe_b64encode(hook.token).decode("utf-8"))
)
click.echo("-> provider = {}".format(hook.provider))
click.echo(
"-> base_path = /hooks/{}/{}".format(str(hook.id), hook.get_signature())
)
@hooks.command()
@click.option("--provider")
def list(provider):
query = Hook.query.unrestricted_unsafe()
if provider:
query = query.filter(Hook.provider == provider)
click.echo("Registered Hooks:")
for hook in query:
click.echo(" -> {} ({})".format(hook.id, hook.provider))
@hooks.command()
@click.argument("hook_id", required=True)
def get(hook_id):
hook = Hook.query.unrestricted_unsafe().get(hook_id)
click.echo("-> id = {}".format(str(hook.id)))
click.echo("-> repository = {}".format(hook.repository.get_full_name()))
click.echo(
"-> token = {}".format(urlsafe_b64encode(hook.token).decode("utf-8"))
)
click.echo("-> provider = {}".format(hook.provider))
click.echo(
"-> base_path = /hooks/{}/{}".format(str(hook.id), hook.get_signature())
)
@hooks.command()
@click.argument("hook_id", required=True)
@click.option(
"--yes",
prompt="Are you sure you wish to remove this hook?",
is_flag=True,
required=True,
)
def remove(hook_id, yes):
if not yes:
sys.exit(1)
hook = Hook.query.unrestricted_unsafe().get(hook_id)
assert hook
db.session.delete(hook)
db.session.commit()
click.echo("Hook deleted!")
|
from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '2.2.0'
class MusicBoxExtension(ext.Extension):
dist_name = 'Mopidy-Headspring-Web'
ext_name = 'headspring_web'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(MusicBoxExtension, self).get_config_schema()
schema['musicbox'] = config.Boolean()
return schema
def setup(self, registry):
registry.add(
'http:app', {'name': self.ext_name, 'factory': self.factory})
def factory(self, config, core):
from tornado.web import RedirectHandler
from .web import IndexHandler, StaticHandler
path = os.path.join(os.path.dirname(__file__), 'static')
return [
(r'/', RedirectHandler, {'url': 'index.html'}),
(r'/(index.html)', IndexHandler, {'config': config, 'path': path}),
(r'/(.*)', StaticHandler, {'path': path})
]
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_SHA224.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class route_reflector(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/route-reflector. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Route reflector parameters for the BGPgroup
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-reflector"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"route-reflector",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/config (container)
YANG Description: Configuraton parameters relating to route reflection
for the BGPgroup
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuraton parameters relating to route reflection
for the BGPgroup
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/state (container)
YANG Description: State information relating to route reflection for the
BGPgroup
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to route reflection for the
BGPgroup
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class route_reflector(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/route-reflector. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Route reflector parameters for the BGPgroup
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "route-reflector"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"route-reflector",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/config (container)
YANG Description: Configuraton parameters relating to route reflection
for the BGPgroup
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuraton parameters relating to route reflection
for the BGPgroup
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/state (container)
YANG Description: State information relating to route reflection for the
BGPgroup
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/route_reflector/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to route reflection for the
BGPgroup
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
from index import Index
import itertools
import operator
import numpy as np
import scipy.sparse as sp
from transform import Transform
from collections import OrderedDict
import networkx as nx
from os.path import commonprefix
from time import time
from threading import Lock, Thread
import json
from rwlock import ReadWriteLock as RWLock
from helpers import is_scalar, len_iter
class DataFrame(object):
""" The DataFrame class organizes data in a block sparse array format using
a hierarchical structure.
The DataFrame maintains two sets of indices for both the row and the column
of the DataFrame. Both of these can be indexed via their hierarchical
structure.
"""
# Blue blocks are the parent of the propogation
STATUS_BLUE="blue"
# Green blocks are completely done propogating
STATUS_GREEN="green"
# Red blocks are stopped and awiting to propogate
STATUS_RED="red"
def __init__(self):
""" Initializes an empty DataFrame. """
self._row_index = Index()
self._col_index = Index()
self._partitions = {}
self._cache = {}
self._cache_lock = RWLock()
self._df_cache = {}
self._df_cache_lock = Lock()
self._row_counts = []
self._col_counts = []
self._top_df = self
self._row_query = ()
self._col_query = ()
self.hash = lambda: (self._row_query,self._col_query)
self._graph = nx.DiGraph()
self._threads = {}
self._plots = []
@classmethod
def _from_csv(cls, filename, header=True, index_col=None):
""" Load from csv file, setting column names from header and row names
from entries in indx_col."""
pass
@classmethod
def _from_pandas(cls, pandas_df):
""" Load from a pandas dataframe, keeping row and column names (but
converting to strings when they are not strings). """
pass
@classmethod
def from_matrix(cls, matrix, row_labels=None, col_labels=None,):
""" Initialize from matrix (2D numpy array or 2D numpy matrix, or
any type of scipy sparse matrix).
The DataFrame keeps whatever format the matrix is currently in.
If no row or column labels are specified, then the DataFrame defaults to
numerical labels.
Args:
matrix: The matrix from which the DataFrame is initialized.
row_labels: An optional list of labels for the rows of the DataFrame.
col_labels: An optional list of labels for the columns of the DataFrame.
Returns:
A DataFrame containing the input matrix and with row and column
labels mapping to the rows and columns of the input matrix. If no
row or column label is specified, we default to numerical labels.
Raises:
ValueError: An error occurred when comparing the shape of the matrix
with the row and column labels.
"""
if not matrix.shape>0:
raise ValueError
if row_labels==None:
row_labels = (str(i) for i in range(matrix.shape[0]))
if col_labels==None:
col_labels = (str(i) for i in range(matrix.shape[1]))
if isinstance(row_labels, list):
if not (len(row_labels) == matrix.shape[0]):
raise ValueError("Provided row labels do not match dimensions of the"
"given matrix.")
if isinstance(col_labels, list):
if not (len(col_labels) == matrix.shape[1]):
raise ValueError("Provided column labels do not match"
"dimensions of the given matrix.")
df = DataFrame()
row_id = df._add_rows(row_labels)
col_id = df._add_cols(col_labels)
df._partitions[row_id,col_id] = matrix
return df
def pwd(self):
""" Return the working directory of both the row and column index.
After a series of hierarchical indexing, pwd will return the current
working directory given these previous indices.
Returns:
A two-tuple containing the row-directory and column directory of the
current DataFrame.
"""
row = DataFrame._concat_strings(self._row_query)
col = DataFrame._concat_strings(self._col_query)
return row,col
@staticmethod
def _concat_strings(seq):
""" Given a sequence of hashable queries, return the resulting current
working directory. """
s = ""
for q in seq:
if isinstance(q,str) and q.endswith('/'):
s += q
return s
@property
def shape(self):
""" Returns the shape of the DataFrame.
Returns:
A two-tuple containing the number of rows and the number of columns
in the DataFrame.
"""
self._refresh_index()
return (len(self._row_index),len(self._col_index))
def empty(self):
""" Return whether the DataFrame has non-zero width or height.
This is equivalent to checking if any of the values returned by
shape are equal to 0.
Returns:
A boolean indicating whether the DataFrame has 0 size in its
structure.
Note:
This ignores the values or presence of of the actual underlying matrices.
"""
self._refresh_index()
n_rows,n_cols = self.shape
# return n_rows==0 or n_cols==0
if n_rows==0 or n_cols==0:
return True
def set_matrix(self, M, row_labels=None, col_labels=None):
""" Set the DataFrame's contents to be the matrix M.
This function uses numerical labels for each dimension in the DataFrame.
Args:
M: The matrix that will be the target of the DataFrame.
row_labels: (optional) list of labels for the rows of the matrix.
col_labels: (optional) list of labels for the columns of the matrix.
"""
if not self.empty():
self._refresh_index()
if row_labels == None:
row_labels = self._row_index.keys()
if col_labels == None:
col_labels = self._col_index.keys()
self.__setitem__((slice(None,None,None),slice(None,None,None)),
M, rows=row_labels, cols=col_labels)
def set_dataframe(self,M_df):
""" Set the DataFrame's contents to match the given DataFrame M_df
This function uses the labels present in the given DataFrame as the row
and column labels.
Args:
M_df: The DataFrame whose contents will be copied.
"""
self._refresh_index()
if len(self._row_query)>0:
df,r,c = self._last_query((self._row_query,self._col_query))
df[r,c] = M_df
else:
self.__setitem__((slice(None,None,None),slice(None,None,None)),M_df)
@property
def r_matrix(self):
return self.get_matrix(readonly=True)
@property
def rw_matrix(self):
return self.get_matrix(readonly=False)
@rw_matrix.setter
def rw_matrix(self,val):
# To achieve similarity in usage to normal arrays, a set without any
# indexing should be a single element set.
if is_scalar(val):
if self.shape != (1,1):
raise ValueError("Shape mismatch (trying to set a matrix with"
+" size different from 1 by 1 to a scalar. ")
val = np.array([[val]])
self.set_matrix(val)
def get_matrix(self,readonly=False,typ=None):
""" Return a matrix containing the underlying elements of the DataFrame.
Args:
readonly: assumes the data is static, and does not purge conflicts.
Multiple overlapping read-only matrices can simultaneously exist in the
cache.
type:
If a type is specified, then the matrix returned is of that type.
If a type is not specified, then the matrix returned inherits the type
of the upper left most partition of the queried matrix.
Currently accepted types are numpy.ndarray and
scipy.sparse.csr_matrix
Returns: A matrix whose contents are identical to that of the DataFrame.
"""
self._refresh_index()
i_j = (self._row_query,self._col_query)
if i_j == ((),()):
i_j = (((slice,(None,None,None)),),((slice,(None,None,None)),))
self._cache_lock.acquire_read()
try:
if (i_j) in self._cache:
# A readonly cache entry can become read-write, but not the other
# way around (otherwise, read-write entries would not have their
# changes persist after eviction)
if self._cache_readonly((i_j)):
self._cache_set_readonly(i_j, readonly)
A = self._cache_fetch(i_j)
return A
finally:
self._cache_lock.release_read()
# If matrix is empty, raise error
if self.empty():
raise KeyError("Can't get matrix of an empty DataFrame.")
# # Otherwise purge the cache of related entries and repull from DF
if not readonly:
self._safe_cache_find_and_evict(i_j)
num_rows = len(self._row_index)
num_cols = len(self._col_index)
if not (num_rows > 0 and num_cols > 0):
raise KeyError
row_id = next(self._row_index.itervalues())
col_id = next(self._col_index.itervalues())
if self._is_simple_query():
A = self._fast_get_matrix()
else:
# This following code is slow, and should only be used when forming
# matrices from overly complicated queries. Nearly all cases should be
# handled by the above helper, which assumes a benign sequence of
# queries.
# If the entire dataframe exists in a single partition
# return a subset of that partition
# TODO: return an A of the type specified by type
if all(v[0]==row_id for v in self._row_index.itervalues()) and \
all(v[0]==col_id for v in self._col_index.itervalues()):
if (row_id,col_id) in self._partitions:
partition = self._partitions[row_id,col_id]
row_idx = [[v[1]] for k,v in row_vals]
col_idx = [v[1] for k,v in col_vals]
A = partition[row_idx,col_idx]
else:
if typ == sp.csr_matrix:
A = sp.csr_matrix((num_rows, num_cols))
else:
A = np.zeros((num_rows, num_cols))
self._partitions[row_id,col_id] = A
else:
if typ == sp.csr_matrix:
A = sp.csr_matrix((num_rows, num_cols))
else:
A = np.zeros((num_rows, num_cols))
i=0
for row_id,row_idx in self._row_index.itervalues():
j=0
for col_id,col_idx in self._col_index.itervalues():
if (row_id,col_id) in self._partitions:
A[i,j] = self._partitions[row_id,col_id][row_idx,col_idx]
else:
if typ == sp.csr_matrix:
self._partitions[row_id,col_id] = \
sp.csr_matrix((self._row_counts[row_id],\
self._col_counts[col_id]))
else:
self._partitions[row_id,col_id] = \
np.zeros((self._row_counts[row_id],\
self._col_counts[col_id]))
j+=1
i+=1
if typ != None:
if typ == sp.csr_matrix and not sp.issparse(A):
A = A.toarray()
elif typ == np.ndarray and sp.issparse(A):
A = sp.csr_matrix(A)
# Finally, store the cached matrix. Need a lock in case other threads
# are iterating over the cache
self._safe_cache_add(i_j, A, readonly=readonly)
return A
def _is_simple_query(self):
""" Return whether the query for this dataframe is simple. Current
queries designated as simple are single directory queries.
Simple queries are contiguous blocks within partitions in their original
order, allowing for the use of slices as opposed to iteration over the
elements.
"""
if (len(self._row_query)==0 and len(self._col_query)==0):
return True
if (len(self._row_query)==1 and
len(self._col_query)==1 and
isinstance(self._row_query[0],str) and
isinstance(self._col_query[0],str)):
return True
row_str_slice = (all(isinstance(q,str) for q in self._row_query[:-1])
and self._is_encoded_slice(self._row_query[-1]))
col_str_slice = (all(isinstance(q,str) for q in self._col_query[:-1])
and self._is_encoded_slice(self._col_query[-1]))
if row_str_slice and col_str_slice:
return True
return False
def _fast_get_matrix(self):
""" Return the underlying matrix for the dataframe, assuming the query
is simple for optimized retrieval. """
if ((self._row_query[:-1],self._col_query[:-1]) in self._cache
and self._is_encoded_slice(self._row_query[-1])
and self._is_encoded_slice(self._col_query[-1])):
i_j = (self._row_query[:-1],self._col_query[:-1])
s1 = self._tuple_element_to_query(self._row_query[-1])
s2 = self._tuple_element_to_query(self._col_query[-1])
return self._cache_fetch(i_j)[s1,s2]
i,j = 0,0
row_it = self._row_index.itervalues()
col_it = self._col_index.itervalues()
(rp,ri) = next(row_it)
(cp,ci) = next(col_it)
(rp_end,ri_end) = self._row_index[next(reversed(self._row_index))]
(cp_end,ci_end) = self._col_index[next(reversed(self._col_index))]
row_list = []
while(rp != rp_end):
col_list = []
while(cp != cp_end):
p = self._index_partition((rp,cp),
(slice(ri,None,None),
slice(ci,None,None)))
col_list.append(p)
col_it = itertools.islice(col_it,p.shape[1]-1,None)
(cp,ci) = next(col_it)
assert(ci==0)
p = self._index_partition((rp,cp),
(slice(ri,None,None),
slice(ci,ci_end+1,None)))
col_list.append(p)
col_it = self._col_index.itervalues()
(cp,ci) = next(col_it)
if sp.issparse(p):
row_list.append(sp.hstack(col_list))
else:
row_list.append(np.hstack(col_list))
row_it = itertools.islice(row_it,p.shape[0]-1,None)
(rp,ri) = next(row_it)
# last row
col_list = []
while(cp != cp_end):
p = self._index_partition((rp,cp),
(slice(ri,ri_end+1,None),
slice(ci,None,None)))
col_list.append(p)
col_it = itertools.islice(col_it, p.shape[1]-1,None)
(cp,ci) = next(col_it)
assert(ci==0)
p = self._index_partition((rp,cp),
(slice(ri,ri_end+1,None),
slice(ci,ci_end+1,None)))
if len(row_list) == 0 and len(col_list) == 0:
return p
col_list.append(p)
if sp.issparse(p):
row_list.append(sp.hstack(col_list))
return sp.vstack(row_list)
else:
row_list.append(np.hstack(col_list))
return np.vstack(row_list)
def _index_partition(self,p_index,m_index):
""" Given a partition index and a matrix index, return the corresponding
submatrix of the indexed partition. If it doesn't exist, initialize
it to zeros. """
if p_index in self._partitions:
return self._partitions[p_index][m_index]
else:
rp,cp = p_index
return np.zeros((self._row_counts[rp],self._col_counts[cp]))[m_index]
def set_structure(self,rows,cols):
""" Sets the rows and columns labels of the DataFrame to the given lists
of rows and columns.
Args:
rows: A list of strings that will be set to the rows of this
DataFrame.
cols: A list of strings that will be set to the columns of this
DataFrame.
"""
self._refresh_index()
if self._row_index.keys()==rows and self._col_index.keys()==cols:
return
else:
self._extend(rows,cols)
def copy_structure(self,df):
""" Extend the DataFrame to match the structure given in df.
Args:
df: A DataFrame whose hierarchical structure will be copied to the
current DataFrame
"""
self._refresh_index()
df._refresh_index()
rows = df._row_index.keys()
cols = df._col_index.keys()
if self._row_index.keys()==rows and self._col_index.keys()==cols:
return
else:
self._extend(rows,cols)
def structure_to_json(self):
""" Produce a JSON object with all the hierarchical structure
information of the dataframe.
Returns:
A JSON object with properties rows, cols, row_index, col_index, and
partitions containing the respective information.
"""
rows = []
cols = []
for r in self._row_index:
self._add_index_to_json_array(rows,r)
for c in self._col_index:
self._add_index_to_json_array(cols,c)
out = {
"rows": rows,
"cols": cols,
"row_index": {k:v[0] for k,v in self._row_index.iteritems()},
"col_index": {k:v[0] for k,v in self._col_index.iteritems()},
"partitions": {str(v):u for u,v in enumerate(self._partitions.keys
())}
}
return json.dumps(out)
def graph_to_json(self):
""" Produce a JSON object with all the computational graph information
of the dataframe.
Returns:
A JSON object with properties nodes, edges, and implicit containing
the respective information (nodes, edges, and implicit edges from
the dataframe hierarchicalstructure).
"""
nodes = [DataFrame._query_to_string(n) for n in self._graph.nodes()]
edges = [(DataFrame._query_to_string(n1),
DataFrame._query_to_string(n2))
for (n1,n2) in self._graph.edges()]
implicit = []
for node in self._graph.nodes():
implicit += ([(node,e) for e
in self._get_implicit_dependents(node)
if node != e])
implicit = [(DataFrame._query_to_string(n1),
DataFrame._query_to_string(n2)) for (n1,n2) in implicit]
out = {
"nodes": nodes,
"edges": edges,
"implicit": implicit
}
return json.dumps(out)
def graph_to_cytoscape_json(self):
""" Produce a JSON object with all the computational graph information
of the dataframe in the format required for cytoscape"""
nodes = [{
"data": {"id": DataFrame._query_to_string(n)}
} for n in self._graph.nodes()]
edges = [{
"data": {
"source": DataFrame._query_to_string(n1),
"target": DataFrame._query_to_string(n2),
"type": "explicit",
"display": True
}
} for (n1,n2) in self._graph.edges()]
implicit = []
for node in self._graph.nodes():
for e in self._get_implicit_dependents(node):
if node != e:
if (e,node,True) not in implicit:
implicit.append((node,e,True))
else:
implicit.append((node,e,False))
implicit = [{
"data":{
"source": DataFrame._query_to_string(n1),
"target": DataFrame._query_to_string(n2),
"type": "implicit",
"display": display
}
} for (n1,n2,display) in implicit]
out = {
"nodes": nodes,
"edges": edges+implicit
}
return json.dumps(out)
@property
def T(self):
""" Return the Transformation that generates this DataFrame.
Returns: A transformation T if the target of T is this DataFrame, and
None otherwise.
"""
if self.hash() in self._graph.node:
return self._graph.node[self.hash()]["transform"]
else:
return None
def status(self):
""" Return the running status of the DataFrame.
Returns blue if the DataFrame is currently the root of a propogation,
red if the DataFrame is waiting to propogate, and green if the DataFrame
has finished propogating and is otherwise safe to read from.
A propogation occurs when underlying structure changes, and causes
transformations to be re-run.
Returns:
blue | green | red
"""
if self.hash() in self._graph.node:
return self._graph.node[self.hash()]["status"]
else:
return None
def rows(self):
""" Return a list of all the rows that index into the DataFrame. """
return self._row_index.keys()
def cols(self):
""" Return a list of all the columns that index into the DataFrame. """
return self._col_index.keys()
@staticmethod
def _add_index_to_json_array(arr,path):
""" Add a path to an array of hierarchical indices """
if '/' in path:
s = path.split('/',1)
query = s[0]+'/'
remaining_path = s[1]
for d in arr:
if d["query"] == query:
DataFrame._add_index_to_json_array(d["files"],remaining_path)
return
l = []
arr.append({
"directory": True,
"query": query,
"files": l
})
DataFrame._add_index_to_json_array(l,remaining_path)
else:
arr.append({"directory": False, "query": path})
def _subset(self, i, j):
""" Return a subset of a DataFrame, just creating new index. """
if(i == slice(None,None,None) and j == slice(None,None,None)):
return self
else:
subset = DataFrame()
subset._row_index = self._row_index.subset(i)
subset._col_index = self._col_index.subset(j)
subset._row_counts = self._row_counts
subset._col_counts = self._col_counts
subset._partitions = self._partitions
subset._cache = self._cache
subset._df_cache = self._df_cache
subset._cache_lock = self._cache_lock
subset._df_cache_lock = self._df_cache_lock
subset._row_query = self._row_query \
+ (DataFrame._query_to_tuple_element(i),)
subset._col_query = self._col_query \
+ (DataFrame._query_to_tuple_element(j),)
self.hash = lambda: (DataFrame._row_query,self._col_query)
subset._top_df = self._top_df
subset._graph = self._graph
subset._threads = self._threads
subset._plots = self._plots
return subset
def _skeleton_copy(self):
"""Return an empty index-level skelen copy of the Dataframe with
cleared caches for the purposes of indexing without caches
Should only be called on top_df
"""
df = DataFrame()
df._row_index = self._row_index
df._col_index = self._col_index
df._row_counts = self._row_counts
df._col_counts = self._col_counts
df._top_df = df
df._row_query = self._row_query
df._col_query = self._col_query
df.hash = lambda: (self._row_query,self._col_query)
return df
def __getitem__(self, i_j_type):
""" Get a portion of the dataframe, passing row/column indices and an
optional type parameter.
The calling convertions for the getitem class are:
df[row_indexing, col_indexing, type=DataFrame]
If type == DataFrame, the method will subset the rows and column indices
based upon the i,j terms, and return a dataframe with the same
This function currently only tested and coded for dense numpy arrays
"""
if len(i_j_type) == 2:
i,j = i_j_type
typ = DataFrame
elif len(i_j_type) == 3:
i,j,typ = i_j_type
else:
raise ValueError("Indices must be i,j pairs or i,j,type triplets")
if typ == DataFrame:
k_l = (self._row_query + (self._query_to_tuple_element(i),),
self._col_query + (self._query_to_tuple_element(j),))
if k_l in self._df_cache:
return self._df_cache[k_l]
else:
df_subset = self._subset(i,j)
self._df_cache_add(k_l,df_subset)
return df_subset
raise ValueError("Other types not implemented yet.")
@staticmethod
def _is_encoded_slice(s):
return isinstance(s,tuple) and s[0] is slice
@staticmethod
def _is_encoded_list(s):
return isinstance(s,tuple) and s[0] is list
@staticmethod
def _query_to_tuple_element(i):
""" Convert a query to a tuple used to hash into dictionaries.
str -> str
int -> int
slice(a,b,c) -> (a,b,c)
"""
if isinstance(i,str) or isinstance(i,int):
return i
elif isinstance(i,slice):
return (slice,(i.start,i.stop,i.step))
elif isinstance(i,list):
return (list,tuple(i))
raise ValueError
@staticmethod
def _tuple_element_to_query(i):
""" Convert a query to a tuple used to hash into dictionaries.
str -> str
int -> int
(a,b,c) -> slice(a,b,c)
"""
if isinstance(i,str) or isinstance(i,int):
return i
elif DataFrame._is_encoded_slice(i):
return slice(*(i[1]))
elif DataFrame._is_encoded_list(i):
return list(i[1])
raise ValueError("Unparseable tuple element: "+str(i))
@staticmethod
def _tuple_element_to_string(i):
""" Convert a tuple element to a string that visually matches
the query typed by the user """
if isinstance(i,str):
return i
elif isinstance(i,tuple):
return str(i[0])+":"+str(i[1])+":"+str(i[2])
else:
return str(i)
@staticmethod
def _query_to_string(i_j):
""" Convert an i_j query to a string that matches the query typed by the
user """
i,j = i_j
s = ""
for k in range(len(i)):
s += "["
s += DataFrame._tuple_element_to_string(i[k])
s += ","
s += DataFrame._tuple_element_to_string(j[k])
s += "]"
return s
def _reindex(self,i_j,ignore_df_cache=False):
""" Reindexes into the DataFrame starting from the top level, doing
nothing if there is no query and performing the last query if there is
one """
i,j = i_j
if len(i)>0:
df,r,c = self._last_query(i_j,ignore_df_cache)
return df[r,c]
else:
return self._top_df
def _last_query(self,i_j,ignore_df_cache=False):
""" Reindexes into the DataFrame starting from the top level given
a sequence of row/col queries i_j, returning df,r,c where r,c are the
last queries and df[r,c] is the resulting DataFrame after the i_j
queries. There must be at least one index into the DataFrame for this
call. """
i,j = i_j
assert(len(i) == len(j))
assert(len(i) > 0)
if ignore_df_cache:
df = self._top_df._skeleton_copy()
else:
df = self._top_df
for k in range(len(i)-1):
row_q = DataFrame._tuple_element_to_query(i[k])
col_q = DataFrame._tuple_element_to_query(j[k])
df = df[row_q,col_q]
return (df,
DataFrame._tuple_element_to_query(i[-1]),
DataFrame._tuple_element_to_query(j[-1]))
def _get_full_rows_and_cols(self,i_j,ignore_df_cache=False):
"""Retrieves all rows and columns indices for query i_j. These are
full rows and columns from the perspective of the top-level DataFrame.
"""
i,j = i_j
df_indexed = self._reindex(i_j,ignore_df_cache)
if df_indexed.empty():
return ([],[])
(row_prefix,col_prefix) = df_indexed.pwd()
rows = [row_prefix+ri for ri in df_indexed._row_index.keys()]
cols = [col_prefix+ci for ci in df_indexed._col_index.keys()]
assert(len(rows) > 0 and len(cols) > 0)
return (rows,cols)
def _add_cols(self, col_keys):
""" Add col_keys to _col_index for a new partition, update col_counts,
and return the partition id.
"""
return self._add_keys(col_keys,1)
def _add_rows(self, row_keys):
""" Add row_keys to _row_index for a new partition, update row_counts,
and return the :partition id.
"""
return self._add_keys(row_keys,0)
def _add_keys(self, iter_keys, axis = 0):
""" Given an iterator over keys, add these keys to the index
corresponding to the given axis. """
if axis == 0:
counts = self._row_counts
index = self._row_index
else:
counts = self._col_counts
index = self._col_index
key_id = len(counts)
len_keys = 0
for k in iter_keys:
index[k] = (key_id, len_keys)
len_keys += 1
counts.append(len_keys)
return key_id
def _get_or_make_keys(self, query, val, axis=0, prefix=""):
""" Given a query, an index and a value, return the corresponding
subset of keys for this value if any exist. If none exist, construct
a list of keys for the query. Additionally return True if the keys
are known to already exist and were simply fetched. """
if axis == 0:
index = self._row_index
else:
index = self._col_index
# If query is a string, append it to the prefix if it is a directory.
# Otherwise, the query is for exactly one file, so return the key for
# that file
full_prefix = prefix
if isinstance(query,str):
if query.endswith("/"):
full_prefix += query
else:
return [prefix + query], False
# If the val is a DataFrame, then we should use exactly the keys present
# in the DataFrame
if isinstance(val, DataFrame):
if axis == 0:
target_index = val._row_index
else:
target_index = val._col_index
return (full_prefix + k for k in target_index.iterkeys()), False
# If the val is a single scalar, then return the exact key given by the
# query if it is a file, otherwise append a 0 to the directory
# If the keys already exist, then reconstruct the whole keys
if next(index._get_keys(query),None) != None:
return (prefix + k for k in index._get_keys(query)), True
# Otherwise, create new keys
else:
if is_scalar(val):
return [full_prefix + "0"], False
elif isinstance(val, np.ndarray):
return (full_prefix + str(k) for k in range(val.shape[axis])),False
else:
raise ValueError("Unknown value being set to the DataFrame: " +str(type(val)))
def __setitem__(self, i_j, val, rows = None, cols = None):
""" Set a portion of the dataframe, passing row/column indices to the
values stored in val. If the input is not a DataFrame, then try to
convert the input into a DataFrame.
If type = Transform, then we evaluate the transform and call
__setitem__ on the result (within the apply function)
If type = singleton, then wrap it in a proper dataframe.
Otherwise, we case on whether the keys for the columns and rows
already exist or not.
To maintain consistency between sub-DataFrames, all insertions actually
occur at the top level DataFrame. All child DataFrames refetch their
row/column indices from the top level DataFrame when necessary via
refresh_index().
"""
i,j = i_j
top_df = self._top_df
# This probably needs to be fixed for setting subsetted dataframes
if i_j == (slice(None,None,None),slice(None,None,None)):
node = self._row_query, self._col_query
else:
node = (self._row_query+(DataFrame._query_to_tuple_element(i),),
self._col_query+(DataFrame._query_to_tuple_element(j),))
# If the input is a Transform, evaluate the transform and update the
# computational graph
if isinstance(val,Transform):
# If the query is a file and not a directory, initialize it if it
# doesn't already exist
if (isinstance(i,str) and not i.endswith('/') and
i not in top_df._row_index):
top_df._add_rows([i])
if (isinstance(j,str) and not j.endswith('/') and
j not in top_df._col_index):
top_df._add_cols([j])
# # Right now, run the init and refresh the transform's variables
# # on every step
target_df = self._reindex(node)
self._add_to_graph(node,status=self.STATUS_BLUE,transform=val)
thread = val.apply(target_df)
if thread is not None:
self._threads[node] = thread
return
row_prefix,col_prefix = self.pwd()
def rows_iter():
if rows == None:
return self._get_or_make_keys(i, val, axis=0, prefix=row_prefix)[0]
else:
if isinstance(i,str) and i.endswith("/"):
return (row_prefix + i + k for k in rows)
else:
return (row_prefix + k for k in rows)
def cols_iter():
if cols == None:
return self._get_or_make_keys(j, val, axis=1, prefix=col_prefix)[0]
else:
if isinstance(j,str) and j.endswith("/"):
return (col_prefix + j + k for k in cols)
else:
return (col_prefix + k for k in cols)
len_rows = len_iter(rows_iter())
len_cols = len_iter(cols_iter())
if is_scalar(val):
M = val
# M = val*np.ones((len(rows),len(cols)))
elif isinstance(val, np.ndarray):
M = val
if M.shape != (len_rows,len_cols):
raise ValueError("Shape mismatch: " + str(M.shape) + " != "
+ str((len_rows,len_cols)))
elif isinstance(val, DataFrame):
M = val.get_matrix()
if rows == None:
rows = val.rows()
if cols == None:
cols = val.cols()
else:
raise ValueError("Unknown datatype being set to the DataFrame")
# # First check the cache for a fast set.
self._cache_lock.acquire_read()
try:
if node in self._cache:
if self._cache_readonly(node) == True:
raise UserWarning("Attempting to set a readonly cache block. " \
"The result will not persist. ")
# self._safe_cache_add(node,M,readonly=self._cache_readonly(node))
self._cache_set(node,M)
return
finally:
self._cache_lock.release_read()
# From here on out, rows and cols are full path names; these go into
# top_df, not the self dataframe
# "all in or all out" requirement
if self.shape[0] == 0:
all_rows_exist, no_rows_exist = False, True
elif self._get_or_make_keys(i, val, axis=0, prefix=row_prefix)[1]:
all_rows_exist, no_rows_exist = True, False
else:
all_rows_exist = all(k in top_df._row_index for k in rows_iter())
no_rows_exist = all(k not in top_df._row_index for k in rows_iter())
if self.shape[1] == 0:
all_cols_exist, no_cols_exist = False, True
elif self._get_or_make_keys(j, val, axis=1, prefix=col_prefix)[1]:
all_cols_exist, no_cols_exist = True, False
else:
all_cols_exist = all(k in top_df._col_index for k in cols_iter())
no_cols_exist = all(k not in top_df._col_index for k in cols_iter())
assert(all_rows_exist or no_rows_exist)
assert(all_cols_exist or no_cols_exist)
# Since the cache maintains indices, we need to lock until the indices
# are updating, the underlying matrix is written,
# and cache eviction are done
# If this is going to change the row/column structure, stop all dependents
if no_cols_exist:
col_id = top_df._add_cols(cols_iter())
col_ids = [col_id]
else:
col_ids = OrderedDict.fromkeys(top_df._col_index[c][0]
for c in cols_iter()).keys()
# col_p_id = self._col_index[cols[0]][0]
if no_rows_exist:
row_id = top_df._add_rows(rows_iter())
row_ids = [row_id]
else:
row_ids = OrderedDict.fromkeys(top_df._row_index[r][0] \
for r in rows_iter()).keys()
# require all partitions to already exist or not exist
all_pairs = itertools.product(set(row_ids),set(col_ids))
all_parts_exist = all(pair in self._partitions \
for pair in all_pairs)
no_parts_exist = all(pair not in self._partitions \
for pair in all_pairs)
if not (all_parts_exist or no_parts_exist):
raise KeyError
# If the entries do not exist, then start by
# stopping all dependencies that do exist
# all entries should exist at this point
# since this occurs after adding the rows and columns
if no_rows_exist or no_cols_exist:
for k_l in self._get_implicit_dependents(node):
if self._graph.node[k_l]["status"] != self.STATUS_BLUE:
self._propogate_stop(k_l)
# From here on out we assume its a DataFrame. First we must evict all
# conflicts, since the matrix is being changed.
self._safe_cache_find_and_evict(node)
# Manually update the dataframe
if all_rows_exist and all_cols_exist and all_parts_exist:
rows = list(rows_iter())
cols = list(cols_iter())
if is_scalar(M):
top_df._write_scalar_to(M,rows,cols)
else:
top_df._write_matrix_to(M,rows,cols)
else:
# Create a new column index partition
# Create a new partition block
cur_row = 0
for row_id in row_ids:
cur_col = 0
for col_id in col_ids:
row_count = top_df._row_counts[row_id]
col_count = top_df._col_counts[col_id]
# If the matrix constitutes the entire partition, just
# set without slicing
if ((len_rows,len_cols) == (row_count, col_count) \
and (cur_row,cur_col) == (0,0)):
if is_scalar(M):
self._partitions[row_id,col_id] = \
M*np.ones((len_rows,len_cols))
else:
self._partitions[row_id,col_id] = M
else:
# Otherwise, select the parts of the matrix that are
# applicable and set them for each partition
if is_scalar(M):
self._partitions[row_id,col_id] = \
M*np.ones((top_df._row_counts[row_id],
top_df._col_counts[col_id]))
else:
self._partitions[row_id,col_id] \
= M[cur_row:cur_row+top_df._row_counts[row_id], \
cur_col:cur_col+top_df._col_counts[col_id]]
cur_col += top_df._col_counts[col_id]
cur_row += top_df._row_counts[row_id]
# need to update df cache
# It is important this occurs after adding the indices to the
# row and column indexes, since the df_cache logic uses the
# indices to determine overlap.
self._df_cache_flush(node)
if no_rows_exist or no_cols_exist:
for k_l in self._get_implicit_dependents(node):
if self._graph.node[k_l]["status"] != self.STATUS_BLUE:
self._propogate_start(k_l)
if (node in self._graph.node and
self._graph.node[node]["status"] == self.STATUS_BLUE):
self._graph.node[node]["status"] = self.STATUS_GREEN
def __delitem__(self,i_j):
""" Delete the entries at i_j from the dataframe """
# TODO: delete just rows or just columns
i,j = i_j
node = (self._row_query+(DataFrame._query_to_tuple_element(i),),
self._col_query+(DataFrame._query_to_tuple_element(j),))
# First stop dependencies:
for k_l in self._get_implicit_dependents(node):
self._propogate_stop(k_l)
# Evict all conflicts
self._safe_cache_find_and_evict(node)
df = self[i_j]
row_prefix,col_prefix = df.pwd()
full_rows = [row_prefix+v for v in df._row_index.keys() ]
full_cols = [col_prefix+v for v in df._col_index.keys() ]
# Also flush the df cache
# Here, we do this before deletion since the cache relies on the
# indices to determine overlap.
self._df_cache_flush(node)
# Then delete the indices
del self._top_df._row_index[full_rows]
del self._top_df._col_index[full_cols]
# Finally restart dependencies
for k_l in self._get_implicit_dependents(node):
self._propogate_start(k_l)
def _write_matrix_to(self,M,rows,cols):
""" Directly write a matrix to the specified rows and columns into the
underlying DataFrame, bypassing all other checks and constructs.
If the underlying dataframe is non-initialized, we write it."""
assert (M.shape == (len(rows),len(cols)))
row_val = 0
for (row_id,row_idx) in self._row_index[rows]:
col_val = 0
for (col_id,col_idx) in self._col_index[cols]:
# set it element-wise
if (row_id,col_id) not in self._partitions:
self._partitions[row_id,col_id] = \
np.zeros((self._row_counts[row_id],
self._col_counts[col_id]))
assert((row_id,col_id) in self._partitions)
assert((row_idx,col_idx) < self._partitions[row_id,col_id].shape)
assert((row_val,col_val) < M.shape)
self._partitions[row_id,col_id] \
[row_idx,col_idx] \
= M[row_val,col_val]
col_val += 1
row_val += 1
def _write_scalar_to(self,v,rows,cols):
""" Directly write a matrix to the specified rows and columns into the
underlying DataFrame, bypassing all other checks and constructs.
If the underlying dataframe is non-initialized, we write it."""
assert is_scalar(v)
row_val = 0
for (row_id,row_idx) in self._row_index[rows]:
col_val = 0
for (col_id,col_idx) in self._col_index[cols]:
# set it element-wise
if (row_id,col_id) not in self._partitions:
self._partitions[row_id,col_id] = \
np.zeros((self._row_counts[row_id],
self._col_counts[col_id]))
assert((row_id,col_id) in self._partitions)
assert((row_idx,col_idx) < self._partitions[row_id,col_id].shape)
self._partitions[row_id,col_id][row_idx,col_idx] = v
col_val += 1
row_val += 1
###########################################################################
# Graph related functions #
###########################################################################
# A few basic rules for graph dependencies:
# If A = f(X1,...Xn) then X1 -> A (simple computational dependency)
# If A is a path and B is a subdirectory, then B -> A
# The previous can be rephrased as
# if B is a path and A is a parent directory, then B -> A
# If A is dirty, then all nodes dependent on A are dirty.
# This is equivalent to saying all parent directories and all
# computational dependencies are dirty, and recursively apply.
def stop(self):
""" Stop the continuous thread that generates this DataFrame.
If a thread is running that generates this DataFrame, that thread will
be stopped as soon as it finishes an iteration.
"""
i_j = self.hash()
if i_j in self._threads and \
self._graph.node[i_j]["status"] != self.STATUS_RED:
self._graph.node[i_j]["status"] = self.STATUS_RED
self._threads[i_j].join()
del self._threads[i_j]
else:
print "Thread not found!"
def stop_all(self):
pass
def go(self):
i_j = self.hash()
if (self.is_transform() and i_j not in self._threads and
self._graph.node[i_j]["status"] == self.STATUS_RED):
# Note: move this to transform.py
t = Thread(target = self.T._continuous_wrapper, args=(self,))
self._threads[i_j] = t
self._graph.node[i_j]["status"] = self.STATUS_GREEN
t.start()
def is_transform(self):
if self.hash() in self._graph.node:
return self._graph.node[self.hash()]["transform"] is not None
else:
return False
def status(self):
if self.is_transform():
if self.hash() in self._graph.node:
return self._graph.node[self.hash()]["status"]
else:
raise ReferenceError("Transformation does not " \
"exist in the computational graph. ")
else:
raise UserWarning("Asked for status of non continuous " \
"transform block.")
def is_running(self):
return (self.is_transform()
and self._graph.node[self.hash()]["status"] != self.STATUS_RED)
def _add_to_graph(self,i_j, status,transform=None):
""" Add a node to the graph and add all of its explicit edges.
Explicit edges are dependences from the arguments of the transform to
the resulting DataFrame. """
if transform is None:
# If the input is not a transofrm, simply add it to the graph if it
# doesn't yet exist.
if i_j not in self._graph:
self._graph.add_node(i_j,status=status,transform=None)
elif isinstance(transform, Transform):
# If the input is a transform, store the corresponding transform for
# later rebuilds.
self._graph.add_node(i_j,status=status,transform=transform)
else:
raise ValueError
# If the input is a transform, it has explicit dependencies that need
# to be added to the graph
if transform is not None:
for x in transform.args:
if isinstance(x,DataFrame):
k_l = (x._row_query,x._col_query)
if k_l not in self._graph:
self._add_to_graph(k_l,self.STATUS_GREEN,None)
if not k_l == i_j:
self._graph.add_edge((x._row_query,x._col_query),i_j)
for k in transform.kwargs:
v = transform.kwargs[k]
if isinstance(v,DataFrame):
k_l = (v._row_query,v._col_query)
if k_l not in self._graph:
self._add_to_graph(k_l,self.STATUS_GREEN,None)
if not k_l == i_j:
self._graph.add_edge((v._row_query,v._col_query),i_j)
@staticmethod
def _node_directory_overlap(i_j,k_l):
""" Return whether the directories are subdirectories of each other.
A False return here is definitely False, and we can skip any extra
work. A true result here is not sufficient to conclude that there is no
overlap. """
i,j = i_j
k,l = k_l
i_str = DataFrame._concat_strings(i)
j_str = DataFrame._concat_strings(j)
k_str = DataFrame._concat_strings(k)
l_str = DataFrame._concat_strings(l)
is_subdirectory = lambda a,b: a.startswith(b) or b.startswith(a)
row_is_subdirectory = is_subdirectory(i_str,k_str)
col_is_subdirectory = is_subdirectory(j_str,l_str)
return row_is_subdirectory,col_is_subdirectory
def _get_implicit_dependents(self,i_j):
""" Return all nodes implicitly dependent on i_j """
i,j = i_j
(rows,cols) = self._get_full_rows_and_cols(i_j, ignore_df_cache=True)
dependents = set()
for (i0_j0) in self._graph.nodes_iter():
row_dir,col_dir = DataFrame._node_directory_overlap(i_j,i0_j0)
if row_dir and col_dir:
(rows0,cols0) = self._get_full_rows_and_cols(i0_j0,
ignore_df_cache=True)
if (any(r in rows for r in rows0) \
and any(c in cols for c in cols0)):
dependents.add(i0_j0)
return dependents
def _get_df_implicit_dependents(self,i_j):
""" Return all nodes implicitly dependent on i_j in the dataframe
cache. This differs from get_implicit_dependents: rather than tracking
overlap within the dataframe, this tracks overlap in either set of
indicies.
If j_k shares any rows or columns with i_j, then we add it to the
returned set. """
(rows,cols) = self._get_full_rows_and_cols(i_j, ignore_df_cache=True)
dependents = set()
self._df_cache_lock.acquire()
for (i0_j0) in self._df_cache:
row_dir,col_dir = DataFrame._node_directory_overlap(i_j,i0_j0)
if row_dir or col_dir:
(rows0,cols0) = self._get_full_rows_and_cols(i0_j0,
ignore_df_cache=True)
if (any(r in rows for r in rows0) \
or any(c in cols for c in cols0)):
dependents.add(i0_j0)
self._df_cache_lock.release()
return dependents
def _get_explicit_dependents(self,i_j):
""" Return all nodes explicitly dependent on i_j, as denoted by the
computational graph """
if i_j in self._graph:
return self._graph.successors(i_j)
else:
return []
def _get_all_dependents(self,i_j):
""" Return all nodes dependent on i_j, implicit or explicit """
imp = self._get_implicit_dependents(i_j)
exp = self._get_explicit_dependents(i_j)
return list(imp)+exp
def _extend(self,row_labels,col_labels,typ=np.ndarray):
"""Insert row/col labels in this dataframe that don't yet exist. This
function requires there to be new labels to be inserted; otherwise it
should not be called as a no-op. """
top_df = self._top_df
row_prefix,col_prefix = self.pwd()
# Fetch all rows
full_rows = [row_prefix+v for v in row_labels ]
full_cols = [col_prefix+v for v in col_labels ]
# Filter out rows that don't exist in the DataFrame yet.
new_full_rows = [v for v in full_rows if v not in top_df._row_index]
new_full_cols = [v for v in full_cols if v not in top_df._col_index]
assert len(new_full_rows)>0 or len(new_full_cols)>0
if len(new_full_rows)>0:
top_df._add_rows(new_full_rows)
if len(new_full_cols)>0:
top_df._add_cols(new_full_cols)
# Get partition ids
row_ids = [v[0] for v in top_df._row_index[full_rows]]
col_ids = [v[0] for v in top_df._col_index[full_cols]]
if self.hash() in top_df._graph and self.is_transform():
top_df._refresh(top_df._graph.node[self.hash()]["transform"])
top_df._df_cache_flush(self.hash())
# If we update params_df here, then it won't update any parents of
self._refresh_index()
self._safe_cache_find_and_evict(self.hash())
def _df_cache_flush(self,i_j):
""" Remove all cached dataframe entries that are dependent on i_j """
if i_j in self._df_cache:
# del self._df_cache[i_j]
self._df_cache_del(i_j)
# self._cache_lock.acquire()
if i_j in self._cache:
self._safe_cache_evict(i_j)
# self._cache_lock.release()
implicit_dependents = self._get_df_implicit_dependents(i_j)
for k_l in implicit_dependents:
if k_l in self._df_cache:
# Delete from both caches, since the target has changed
# This order matters, since the eviction needs to write
# back to the dataframe according to the old value of the df
# cache.
if k_l in self._cache:
self._safe_cache_evict(k_l)
self._df_cache_del(k_l)
def _unsafe_df_cache_flush(self,i_j):
""" Remove all cached dataframe entries that are dependent on i_j """
if i_j in self._df_cache:
self._df_cache_del(i_j)
if i_j in self._cache:
self._cache_evict(i_j)
implicit_dependents = self._get_df_implicit_dependents(i_j)
for k_l in implicit_dependents:
if k_l in self._df_cache:
# Delete from both caches, since the target has changed
# This order matters, since the eviction needs to write
# back to the dataframe according to the old value of the df
# cache.
if k_l in self._cache:
self._cache_evict(k_l)
self._df_cache_del(k_l)
def _refresh(self,T):
""" Reindex into all the arguments for a given transformation. This
ensures the sizes are up to date. This may be deprecated with
refresh_index... """
args = list(T.args)
for i,df in enumerate(args):
if isinstance(df,DataFrame):
d = self._reindex((df._row_query,df._col_query))
args[i] = d
query = (args[i]._row_query,args[i]._col_query)
if query in self._cache:
self._safe_cache_evict(query)
T.args = tuple(args)
for k,df in T.kwargs.iteritems():
if isinstance(df,DataFrame):
d = self._reindex((df._row_query,df._col_query))
T.kwargs[k] = d
def _refresh_index(self):
""" Re-index into the dataframe, and update the corresponding row/column
index and counts. This should be called whenever the DataFrame's size
has changed. """
new_df = self._reindex((self._row_query,self._col_query))
self._row_index = new_df._row_index
self._col_index = new_df._col_index
self._row_counts = new_df._row_counts
self._col_counts = new_df._col_counts
def _propogate_stop(self,i_j):
""" Stop all transformations dependent on node i_j """
# If node is already stopped, return
if (self._graph.node[i_j]["status"] == self.STATUS_RED or
self._graph.node[i_j]["status"] == self.STATUS_BLUE):
return
implicit_dependents = self._get_implicit_dependents(i_j)
explicit_dependents = self._graph.successors(i_j)
self._graph.node[i_j]["status"] = self.STATUS_RED
if self._graph.node[i_j]["transform"] is not None \
and i_j in self._threads:
self._threads[i_j].join()
for v in implicit_dependents:
self._propogate_stop(v)
for v in explicit_dependents:
self._propogate_stop(v)
def _propogate_start(self,i_j,ignore=None):
""" Start all transformations that depend on i_j, if possible. """
if (self._graph.node[v]["status"] != self.STATUS_RED \
for v in self._graph.predecessors(i_j)) and \
self._graph.node[i_j]["status"] == self.STATUS_RED and \
i_j != ignore:
implicit_dependents = self._get_implicit_dependents(i_j)
explicit_dependents = self._graph.successors(i_j)
# if all parents are green, we can restart if necessary and set
# self to green
if self._graph.node[i_j]["transform"] is not None:
# If its a transform, refresh and start it up
self._refresh(self._graph.node[i_j]["transform"])
df,r,c = self._last_query(i_j)
df[r,c] = self._graph.node[i_j]["transform"]
# set self to green
self._graph.node[i_j]["status"] = self.STATUS_GREEN
# recurse on all children
# Only check implicit descendents if this current node was actually
# rerun.
if self._graph.node[i_j]["transform"] is not None:
for v in implicit_dependents:
if self._graph.node[v]["status"] == self.STATUS_RED:
self._propogate_start(v,ignore)
for v in explicit_dependents:
if self._graph.node[v]["status"] == self.STATUS_RED:
self._propogate_start(v,ignore)
###########################################################################
# Cache related functions #
###########################################################################
# cache[i_j][0] is the actual cached entry
# cache[i_j][1] is the dataframe object for this entry at the time of query
def _is_cached(self):
""" Test whether the underlying matrix for the DataFrame is cached """
i_j = (self._row_query,self._col_query)
return i_j in self._cache
def _is_df_cached(self):
""" Test whether the DataFrame is cached """
i_j = (self._row_query,self._col_query)
return i_j in self._df_cache
def _safe_cache_find_and_evict(self,i_j):
self._cache_lock.acquire_read()
try:
evictions = self._cache_find_evictions(i_j)
finally:
self._cache_lock.release_read()
if len(evictions) > 0:
self._cache_lock.acquire_write()
try:
for j_k in evictions:
self._cache_evict(j_k)
finally:
self._cache_lock.release_write()
def _cache_find_evictions(self,i_j):
""" Find all cached entries that depend on node i_j """
# Same logic as get_implicit_dependents but searching the cache instead
if len(self._cache)==0:
return set()
i,j = i_j
(rows,cols) = self._get_full_rows_and_cols(i_j, ignore_df_cache=True)
# Return a list of nodes that have a common intersection with i_j
evictions = set()
for (i0_j0) in self._cache:
if DataFrame._node_directory_overlap(i_j,i0_j0):
(rows0,cols0) = self._get_full_rows_and_cols(i0_j0,ignore_df_cache=True)
if (any(r in rows for r in rows0) \
and any(c in cols for c in cols0)):
evictions.add(i0_j0)
return evictions
def _safe_cache_evict(self,i_j):
self._cache_lock.acquire_write()
try:
self._cache_evict(i_j)
finally:
self._cache_lock.release_write()
def _cache_evict(self,i_j):
""" Evict the matrix for node i_j from the cache, and write the
cached data through to do the underlying DataFrame. """
if (i_j in self._cache):
if(self._cache_readonly(i_j)):
# If readonly, then just remove entry from cache
self._cache_del(i_j)
else:
# Otherwise we need to rerwite to the underlying df
M = self._cache_fetch(i_j)
old_rows = self._cache_rows(i_j)
old_cols = self._cache_cols(i_j)
self._cache_del(i_j)
# Remove from cache before setting in dataframe
i,j = i_j
assert(len(i) == len(j))
df = self._reindex(i_j)
df._write_matrix_to(M,old_rows,old_cols)
def _safe_cache_add(self,i_j,A,readonly=False):
""" Add the matrix A for node i_j into the cache """
self._cache_lock.acquire_write()
self._cache_add(i_j,A,readonly=readonly)
self._cache_lock.release_write()
def _cache_add(self,i_j,A,readonly=False):
""" Add the matrix A for node i_j into the cache """
df = self._reindex(i_j)
self._cache[i_j] = (A,
df._row_index.keys(),
df._col_index.keys(),
readonly)
def _cache_set(self,i_j,val):
if is_scalar(val):
A = self._cache[i_j]
A[0][:] = val
else:
self._cache[i_j] = (val,) + self._cache[i_j][1:]
def _cache_del(self,i_j):
""" Remove node i_j from the cache """
if i_j in self._cache:
del self._cache[i_j]
def _cache_fetch(self,i_j):
""" Retrieve the matrix for node i_j """
return self._cache[i_j][0]
def _cache_rows(self,i_j):
""" Retrieve the rows for node i_j corresponding to the the cached
matrix """
return self._cache[i_j][1]
def _cache_cols(self,i_j):
""" Retrieve the cols for node i_j corresponding to the the cached
matrix """
return self._cache[i_j][2]
def _cache_readonly(self,i_j):
if i_j in self._cache:
return self._cache[i_j][3]
return False
def _cache_set_readonly(self, i_j, tf):
self._cache[i_j] = self._cache[i_j][:3] + (tf,)
def _df_cache_del(self,i_j):
""" Delete the entry for i_j in the df cache """
self._df_cache_lock.acquire()
if i_j in self._df_cache:
del self._df_cache[i_j]
self._df_cache_lock.release()
def _df_cache_add(self,i_j,df):
""" Add the entry for i_j in the df cache """
self._df_cache_lock.acquire()
self._df_cache[i_j] = df
self._df_cache_lock.release()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from activity_bucket import views
urlpatterns = patterns('',
url(r'^$', views.get_activity_data, name='the-form'),
)
|
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1alpha1_volume_restore import V1alpha1VolumeRestore
class TestV1alpha1VolumeRestore(unittest.TestCase):
""" V1alpha1VolumeRestore unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1VolumeRestore(self):
"""
Test V1alpha1VolumeRestore
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1alpha1_volume_restore.V1alpha1VolumeRestore()
pass
if __name__ == '__main__':
unittest.main()
|
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.networkconnectivity_v1.services.hub_service import pagers
from google.cloud.networkconnectivity_v1.types import common
from google.cloud.networkconnectivity_v1.types import hub
from google.cloud.networkconnectivity_v1.types import hub as gcn_hub
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import HubServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import HubServiceGrpcTransport
from .transports.grpc_asyncio import HubServiceGrpcAsyncIOTransport
class HubServiceClientMeta(type):
"""Metaclass for the HubService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[HubServiceTransport]]
_transport_registry["grpc"] = HubServiceGrpcTransport
_transport_registry["grpc_asyncio"] = HubServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[HubServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class HubServiceClient(metaclass=HubServiceClientMeta):
"""Network Connectivity Center is a hub-and-spoke abstraction
for network connectivity management in Google Cloud. It reduces
operational complexity through a simple, centralized
connectivity management model.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "networkconnectivity.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HubServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HubServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> HubServiceTransport:
"""Returns the transport used by the client instance.
Returns:
HubServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def hub_path(project: str, hub: str,) -> str:
"""Returns a fully-qualified hub string."""
return "projects/{project}/locations/global/hubs/{hub}".format(
project=project, hub=hub,
)
@staticmethod
def parse_hub_path(path: str) -> Dict[str, str]:
"""Parses a hub path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/global/hubs/(?P<hub>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def instance_path(project: str, zone: str, instance: str,) -> str:
"""Returns a fully-qualified instance string."""
return "projects/{project}/zones/{zone}/instances/{instance}".format(
project=project, zone=zone, instance=instance,
)
@staticmethod
def parse_instance_path(path: str) -> Dict[str, str]:
"""Parses a instance path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/zones/(?P<zone>.+?)/instances/(?P<instance>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def interconnect_attachment_path(
project: str, region: str, resource_id: str,
) -> str:
"""Returns a fully-qualified interconnect_attachment string."""
return "projects/{project}/regions/{region}/interconnectAttachments/{resource_id}".format(
project=project, region=region, resource_id=resource_id,
)
@staticmethod
def parse_interconnect_attachment_path(path: str) -> Dict[str, str]:
"""Parses a interconnect_attachment path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/regions/(?P<region>.+?)/interconnectAttachments/(?P<resource_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def network_path(project: str, resource_id: str,) -> str:
"""Returns a fully-qualified network string."""
return "projects/{project}/global/networks/{resource_id}".format(
project=project, resource_id=resource_id,
)
@staticmethod
def parse_network_path(path: str) -> Dict[str, str]:
"""Parses a network path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/global/networks/(?P<resource_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def spoke_path(project: str, location: str, spoke: str,) -> str:
"""Returns a fully-qualified spoke string."""
return "projects/{project}/locations/{location}/spokes/{spoke}".format(
project=project, location=location, spoke=spoke,
)
@staticmethod
def parse_spoke_path(path: str) -> Dict[str, str]:
"""Parses a spoke path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/spokes/(?P<spoke>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def vpn_tunnel_path(project: str, region: str, resource_id: str,) -> str:
"""Returns a fully-qualified vpn_tunnel string."""
return "projects/{project}/regions/{region}/vpnTunnels/{resource_id}".format(
project=project, region=region, resource_id=resource_id,
)
@staticmethod
def parse_vpn_tunnel_path(path: str) -> Dict[str, str]:
"""Parses a vpn_tunnel path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/regions/(?P<region>.+?)/vpnTunnels/(?P<resource_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, HubServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the hub service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, HubServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, HubServiceTransport):
# transport is a HubServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_hubs(
self,
request: Union[hub.ListHubsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListHubsPager:
r"""Lists hubs in a given project.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_list_hubs():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.ListHubsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_hubs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.ListHubsRequest, dict]):
The request object. Request for
[HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]
method.
parent (str):
Required. The parent resource's name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.services.hub_service.pagers.ListHubsPager:
Response for
[HubService.ListHubs][google.cloud.networkconnectivity.v1.HubService.ListHubs]
method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.ListHubsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.ListHubsRequest):
request = hub.ListHubsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_hubs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListHubsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_hub(
self,
request: Union[hub.GetHubRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hub.Hub:
r"""Gets details about the specified hub.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_get_hub():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.GetHubRequest(
name="name_value",
)
# Make the request
response = client.get_hub(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.GetHubRequest, dict]):
The request object. Request for
[HubService.GetHub][google.cloud.networkconnectivity.v1.HubService.GetHub]
method.
name (str):
Required. The name of the hub
resource to get.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.types.Hub:
A hub is a collection of spokes. A
single hub can contain spokes from
multiple regions. However, if any of a
hub's spokes use the data transfer
feature, the resources associated with
those spokes must all reside in the same
VPC network. Spokes that do not use data
transfer can be associated with any VPC
network in your project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.GetHubRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.GetHubRequest):
request = hub.GetHubRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_hub]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_hub(
self,
request: Union[gcn_hub.CreateHubRequest, dict] = None,
*,
parent: str = None,
hub: gcn_hub.Hub = None,
hub_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new hub in the specified project.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_create_hub():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.CreateHubRequest(
parent="parent_value",
hub_id="hub_id_value",
)
# Make the request
operation = client.create_hub(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.CreateHubRequest, dict]):
The request object. Request for
[HubService.CreateHub][google.cloud.networkconnectivity.v1.HubService.CreateHub]
method.
parent (str):
Required. The parent resource.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
hub (google.cloud.networkconnectivity_v1.types.Hub):
Required. The initial values for a
new hub.
This corresponds to the ``hub`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
hub_id (str):
Required. A unique identifier for the
hub.
This corresponds to the ``hub_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.networkconnectivity_v1.types.Hub` A hub is a collection of spokes. A single hub can contain spokes from
multiple regions. However, if any of a hub's spokes
use the data transfer feature, the resources
associated with those spokes must all reside in the
same VPC network. Spokes that do not use data
transfer can be associated with any VPC network in
your project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, hub, hub_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcn_hub.CreateHubRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcn_hub.CreateHubRequest):
request = gcn_hub.CreateHubRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if hub is not None:
request.hub = hub
if hub_id is not None:
request.hub_id = hub_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_hub]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcn_hub.Hub,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def update_hub(
self,
request: Union[gcn_hub.UpdateHubRequest, dict] = None,
*,
hub: gcn_hub.Hub = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the description and/or labels of the
specified hub.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_update_hub():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.UpdateHubRequest(
)
# Make the request
operation = client.update_hub(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.UpdateHubRequest, dict]):
The request object. Request for
[HubService.UpdateHub][google.cloud.networkconnectivity.v1.HubService.UpdateHub]
method.
hub (google.cloud.networkconnectivity_v1.types.Hub):
Required. The state that the hub
should be in after the update.
This corresponds to the ``hub`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. In the case of an update to an existing hub,
field mask is used to specify the fields to be
overwritten. The fields specified in the update_mask are
relative to the resource, not the full request. A field
is overwritten if it is in the mask. If the user does
not provide a mask, then all fields are overwritten.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.networkconnectivity_v1.types.Hub` A hub is a collection of spokes. A single hub can contain spokes from
multiple regions. However, if any of a hub's spokes
use the data transfer feature, the resources
associated with those spokes must all reside in the
same VPC network. Spokes that do not use data
transfer can be associated with any VPC network in
your project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([hub, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcn_hub.UpdateHubRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcn_hub.UpdateHubRequest):
request = gcn_hub.UpdateHubRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if hub is not None:
request.hub = hub
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_hub]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("hub.name", request.hub.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcn_hub.Hub,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def delete_hub(
self,
request: Union[hub.DeleteHubRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes the specified hub.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_delete_hub():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.DeleteHubRequest(
name="name_value",
)
# Make the request
operation = client.delete_hub(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.DeleteHubRequest, dict]):
The request object. The request for
[HubService.DeleteHub][google.cloud.networkconnectivity.v1.HubService.DeleteHub].
name (str):
Required. The name of the hub to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.DeleteHubRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.DeleteHubRequest):
request = hub.DeleteHubRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_hub]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def list_spokes(
self,
request: Union[hub.ListSpokesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSpokesPager:
r"""Lists the spokes in the specified project and
location.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_list_spokes():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.ListSpokesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_spokes(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.ListSpokesRequest, dict]):
The request object. The request for
[HubService.ListSpokes][google.cloud.networkconnectivity.v1.HubService.ListSpokes].
parent (str):
Required. The parent resource.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.services.hub_service.pagers.ListSpokesPager:
The response for
[HubService.ListSpokes][google.cloud.networkconnectivity.v1.HubService.ListSpokes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.ListSpokesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.ListSpokesRequest):
request = hub.ListSpokesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_spokes]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSpokesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_spoke(
self,
request: Union[hub.GetSpokeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hub.Spoke:
r"""Gets details about the specified spoke.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_get_spoke():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.GetSpokeRequest(
name="name_value",
)
# Make the request
response = client.get_spoke(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.GetSpokeRequest, dict]):
The request object. The request for
[HubService.GetSpoke][google.cloud.networkconnectivity.v1.HubService.GetSpoke].
name (str):
Required. The name of the spoke
resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkconnectivity_v1.types.Spoke:
A spoke represents a connection between your Google Cloud network resources
and a non-Google-Cloud network.
When you create a spoke, you associate it with a hub.
You must also identify a value for exactly one of the
following fields:
- linked_vpn_tunnels
- linked_interconnect_attachments
- linked_router_appliance_instances
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.GetSpokeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.GetSpokeRequest):
request = hub.GetSpokeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_spoke]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_spoke(
self,
request: Union[hub.CreateSpokeRequest, dict] = None,
*,
parent: str = None,
spoke: hub.Spoke = None,
spoke_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a spoke in the specified project and
location.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_create_spoke():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.CreateSpokeRequest(
parent="parent_value",
spoke_id="spoke_id_value",
)
# Make the request
operation = client.create_spoke(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.CreateSpokeRequest, dict]):
The request object. The request for
[HubService.CreateSpoke][google.cloud.networkconnectivity.v1.HubService.CreateSpoke].
parent (str):
Required. The parent resource.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
spoke (google.cloud.networkconnectivity_v1.types.Spoke):
Required. The initial values for a
new spoke.
This corresponds to the ``spoke`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
spoke_id (str):
Required. Unique id for the spoke to
create.
This corresponds to the ``spoke_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.networkconnectivity_v1.types.Spoke` A spoke represents a connection between your Google Cloud network resources
and a non-Google-Cloud network.
When you create a spoke, you associate it with a hub.
You must also identify a value for exactly one of the
following fields:
- linked_vpn_tunnels
- linked_interconnect_attachments
- linked_router_appliance_instances
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, spoke, spoke_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.CreateSpokeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.CreateSpokeRequest):
request = hub.CreateSpokeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if spoke is not None:
request.spoke = spoke
if spoke_id is not None:
request.spoke_id = spoke_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_spoke]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
hub.Spoke,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def update_spoke(
self,
request: Union[hub.UpdateSpokeRequest, dict] = None,
*,
spoke: hub.Spoke = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the parameters of the specified spoke.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_update_spoke():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.UpdateSpokeRequest(
)
# Make the request
operation = client.update_spoke(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.UpdateSpokeRequest, dict]):
The request object. Request for
[HubService.UpdateSpoke][google.cloud.networkconnectivity.v1.HubService.UpdateSpoke]
method.
spoke (google.cloud.networkconnectivity_v1.types.Spoke):
Required. The state that the spoke
should be in after the update.
This corresponds to the ``spoke`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. In the case of an update to an existing spoke,
field mask is used to specify the fields to be
overwritten. The fields specified in the update_mask are
relative to the resource, not the full request. A field
is overwritten if it is in the mask. If the user does
not provide a mask, then all fields are overwritten.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.networkconnectivity_v1.types.Spoke` A spoke represents a connection between your Google Cloud network resources
and a non-Google-Cloud network.
When you create a spoke, you associate it with a hub.
You must also identify a value for exactly one of the
following fields:
- linked_vpn_tunnels
- linked_interconnect_attachments
- linked_router_appliance_instances
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([spoke, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.UpdateSpokeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.UpdateSpokeRequest):
request = hub.UpdateSpokeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if spoke is not None:
request.spoke = spoke
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_spoke]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("spoke.name", request.spoke.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
hub.Spoke,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def delete_spoke(
self,
request: Union[hub.DeleteSpokeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes the specified spoke.
.. code-block:: python
from google.cloud import networkconnectivity_v1
def sample_delete_spoke():
# Create a client
client = networkconnectivity_v1.HubServiceClient()
# Initialize request argument(s)
request = networkconnectivity_v1.DeleteSpokeRequest(
name="name_value",
)
# Make the request
operation = client.delete_spoke(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.networkconnectivity_v1.types.DeleteSpokeRequest, dict]):
The request object. The request for
[HubService.DeleteSpoke][google.cloud.networkconnectivity.v1.HubService.DeleteSpoke].
name (str):
Required. The name of the spoke to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hub.DeleteSpokeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, hub.DeleteSpokeRequest):
request = hub.DeleteSpokeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_spoke]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=common.OperationMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-networkconnectivity",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("HubServiceClient",)
|
class SymTabEntry():
def __init__(self,name):
self.lines = []
self.attr = {}
self.name = name
def appendLineNumber(self,nr):
self.lines.append(nr)
def setattr(self,name,value):
self.attr[name] = value;
def getattr(self,name):
return self.attr[name];
|
import matplotlib.pyplot as plt
import numpy as np
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.PenningTrap_3D import penningtrap
from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order
from pySDC.playgrounds.Boris.penningtrap_HookClass import particles_output
def main():
"""
Particle cloud in a penning trap, incl. live visualization
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 0.015625
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussLobatto
sweeper_params['num_nodes'] = 3
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['omega_E'] = 4.9
problem_params['omega_B'] = 25.0
problem_params['u0'] = np.array([[10, 0, 0], [100, 0, 100], [1], [1]])
problem_params['nparts'] = 10
problem_params['sig'] = 0.1
problem_params['Tend'] = 16.0
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 20
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = particles_output # specialized hook class for more statistics and output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = penningtrap
description['problem_params'] = problem_params
description['sweeper_class'] = boris_2nd_order
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
# description['space_transfer_class'] = particles_to_particles # this is only needed for more than 2 levels
description['step_params'] = step_params
# instantiate the controller (no controller parameters used here)
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 128 * 0.015625
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_init()
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
extract_stats = filter_stats(stats, type='etot')
sortedlist_stats = sort_stats(extract_stats, sortby='time')
energy = [entry[1] for entry in sortedlist_stats]
plt.figure()
plt.plot(energy, 'bo--')
plt.xlabel('Time')
plt.ylabel('Energy')
plt.savefig('penningtrap_energy.png', transparent=True, bbox_inches='tight')
if __name__ == "__main__":
main()
|
import log
import os
import socket
import subprocess
import tempfile
import time
import xml.dom.minidom
import xml.sax.saxutils
IDRSA_KEYLENGTH = "4096"
class XSContainerException(Exception):
def customised(self):
pass
def runlocal(cmd, shell=False, canfail=False):
log.debug('Running: %s' % (cmd))
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=shell)
stdout, stderr = process.communicate('')
returncode = process.returncode
log.info('Command %s exited with rc %d: Stdout: %s Stderr: %s' %
(cmd, returncode, stdout, stderr))
if returncode != 0 and not canfail:
raise(XSContainerException('Command failed'))
return (returncode, stdout, stderr)
def converttoxml(node, parentelement=None, dom=None):
if not dom or not parentelement:
dom = xml.dom.minidom.Document()
converttoxml(node, parentelement=dom, dom=dom)
return dom.toxml()
if type(node) == list:
for item in node:
# Indicate items in a list with <item></item> tags
item_node = dom.createElement("item")
parentelement.appendChild(item_node)
converttoxml(item, parentelement=item_node, dom=dom)
elif type(node) == dict:
for key, value in node.iteritems():
# Workaround: XML element names may not
# - start with numbers, may
# - contain slashes
# - start with punctuations, or 'xml'.
# Package these in a special element 'SPECIAL_XS_ENCODED_ELEMENT'
# and take the name as a key instead
# @todo: add a faster regular expression for this
if (key[0].isdigit() or
'/' in key or
key[0] in ['.', ':', '!', '?'] or
key.lower().startswith('xml')):
element = dom.createElement('SPECIAL_XS_ENCODED_ELEMENT')
element.setAttribute('name', key)
else:
element = dom.createElement(xml.sax.saxutils.escape(key))
parentelement.appendChild(element)
converttoxml(value, parentelement=element, dom=dom)
elif type(node) in [str, bool, float, int] or node is None:
textnode = dom.createTextNode(xml.sax.saxutils.escape(str(node)))
parentelement.appendChild(textnode)
else:
# ignore
pass
def create_idrsa():
idrsafile = tempfile.mkstemp()[1]
os.remove(idrsafile)
idrsafilepub = "%s.pub" % (idrsafile)
cmd = ['ssh-keygen', '-f', idrsafile, '-b', IDRSA_KEYLENGTH, '-N', '']
runlocal(cmd)
try:
idrsapriv = read_file(idrsafile).strip()
idrsapub = read_file(idrsafilepub).strip()
finally:
os.remove(idrsafile)
os.remove(idrsafilepub)
return (idrsapriv, idrsapub)
def read_file(filepath):
filehandle = open(filepath, 'r')
content = filehandle.read()
filehandle.close()
return content
def write_file(filepath, content):
dirpath, _ = os.path.split(filepath)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filehandle = open(filepath, "w+")
filehandle.write(content)
filehandle.close()
os.chmod(filepath, 0o600)
def file_old_or_none_existent(path_of_file):
neednewfile = False
if os.path.exists(path_of_file):
mtime = os.path.getmtime(path_of_file)
if time.time() - mtime > 60:
neednewfile = True
else:
neednewfile = True
return neednewfile
def test_connection(address, port):
try:
asocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow the connection to block for 2 seconds
asocket.settimeout(2)
asocket.connect((address, port))
asocket.close()
return True
except (socket.error, socket.timeout):
return False
def make_iso(label, sourcedirectory, targetiso):
cmd = ['mkisofs', '-R', '-J', '-V', label,
'-o', targetiso, sourcedirectory]
runlocal(cmd)
def get_data_file_path(filename):
this_dir, _ = os.path.split(__file__)
path = os.path.join(this_dir, os.pardir, "data", filename)
abspath = os.path.abspath(path)
return abspath
def convert_dict_to_ascii(item):
if isinstance(item, dict):
result = {}
for key, value in item.iteritems():
result[convert_dict_to_ascii(key)] = convert_dict_to_ascii(value)
elif isinstance(item, list):
result = list()
for contained in item:
result.append(convert_dict_to_ascii(contained))
elif isinstance(item, unicode):
result = str(item)
else:
result = item
return result
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TransctionLog'
db.delete_table(u'paiements_transctionlog')
# Adding model 'TransactionLog'
db.create_table(u'paiements_transactionlog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('transaction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['paiements.Transaction'])),
('when', self.gf('django.db.models.fields.DateTimeField')()),
('extra_data', self.gf('django.db.models.fields.TextField')()),
('log_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal(u'paiements', ['TransactionLog'])
def backwards(self, orm):
# Adding model 'TransctionLog'
db.create_table(u'paiements_transctionlog', (
('transaction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['paiements.Transaction'])),
('when', self.gf('django.db.models.fields.DateTimeField')()),
('log_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('extra_data', self.gf('django.db.models.fields.TextField')()),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'paiements', ['TransctionLog'])
# Deleting model 'TransactionLog'
db.delete_table(u'paiements_transactionlog')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configs.config': {
'Meta': {'object_name': 'Config'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_api': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_ipn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_request': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'test_mode': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url_back_err': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_back_ok': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_ipn': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'paiements.transaction': {
'Meta': {'object_name': 'Transaction'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configs.Config']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_status': ('django.db.models.fields.CharField', [], {'default': "'cr'", 'max_length': '2'}),
'ipn_needed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_postfinance_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_user_back_from_postfinance_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_userforwarded_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'postfinance_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postfinance_status': ('django.db.models.fields.CharField', [], {'default': "'??'", 'max_length': '2'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'paiements.transactionlog': {
'Meta': {'object_name': 'TransactionLog'},
'extra_data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paiements.Transaction']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['paiements']
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name = 'django-cached-manager',
version = '0.0.1',
license = 'BSD',
description = 'Django models manager that encapsulates some common caching operations',
long_description = open('README.rst').read(),
author = 'Vlad Starostin',
author_email = 'drtyrsa@yandex.ru',
packages = ['cached_manager',
'cached_manager.tests',
'cached_manager.tests.utils'],
classifiers = [
'Development Status :: 1 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_layout02.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with user defined layout."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_legend({
'layout': {
'x': 0.80197353455818021,
'y': 0.37442403032954213,
'width': 0.12858202099737534,
'height': 0.25115157480314959,
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
import roan
from django.db import models
class Palestrante(models.Model):
nome = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, unique=True)
headline = models.CharField(max_length=60)
minicurriculo = models.CharField(max_length=1000)
twitter = models.CharField(max_length=50, blank=True)
foto = models.ImageField(upload_to=u"palestrantes")
listagem = models.BooleanField(
verbose_name=u"Exibir na página de palestrantes?",
default=False,
)
def __repr__(self):
return u'<Palestrante: "%s">' % self.nome
def __unicode__(self):
return self.nome
class Palestra(models.Model):
titulo = models.CharField(max_length=150, verbose_name=u"Título")
slug = models.SlugField(max_length=150, unique=True)
descricao = models.CharField(max_length=2000, verbose_name=u"Descrição")
inicio = models.TimeField(verbose_name=u"Horário de início")
termino = models.TimeField(verbose_name=u"Horário de término")
palestrantes = models.ManyToManyField(Palestrante,
blank=True,
related_name="palestras")
def nomes_palestrantes(self, palestrantes=None):
palestrantes = palestrantes or self.palestrantes.order_by("nome")
nomes = [p.nome for p in palestrantes]
nomes = ", ".join(nomes)
if "," in nomes:
indice = nomes.rfind(",")
nomes = "%s e %s" % (nomes[:indice], nomes[indice + 2:])
return nomes
def __repr__(self):
return u"<Palestra: %s>" % self.titulo
def __unicode__(self):
return self.titulo
def get_absolute_url(self):
palestrantes = self.palestrantes.order_by("nome")
prefixo = "/".join([p.slug for p in palestrantes])
if prefixo:
return "/programacao/%s/%s/" % (prefixo, self.slug)
return "#"
roan.purge("/palestrantes/").on_save(Palestrante)
roan.purge("/palestrantes/").on_delete(Palestrante)
roan.purge("/programacao/").on_save(Palestra)
roan.purge("/programacao/").on_delete(Palestra)
|
from __future__ import print_function
import os
RPMBUILDER_USER = 'rpmbuilder'
def rpmbuilder_user_exists():
u_files = open("/etc/passwd", 'r')
for line in u_files.readlines():
if RPMBUILDER_USER == line.split(":")[0]:
u_files.close()
return True
u_files.close()
return False
def create_rpmbuilder_user():
print("Creating rpmbuilder user ...", end="")
os.system("useradd %s" % RPMBUILDER_USER)
print(" [OK].")
if __name__ == "__main__":
if not rpmbuilder_user_exists():
create_rpmbuilder_user()
else:
print("The user rpmbuilder already exists.")
|
def getcodec(*args, **kwargs): # real signature unknown
""" """
pass
__map_big5 = None # (!) real value is ''
__map_cp950ext = None # (!) real value is ''
|
default_app_config = 'xcblog.apps.xcblogConfig'
|
from __future__ import absolute_import
import unittest
import numpy as np
import os
import mdtraj
from itertools import combinations, product
from pyemma.coordinates.data.featurizer import MDFeaturizer, CustomFeature, _parse_pairwise_input
from six.moves import range
import pkg_resources
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
xtcfile = os.path.join(path, 'bpti_mini.xtc')
pdbfile = os.path.join(path, 'bpti_ca.pdb')
asn_leu_pdb = """
ATOM 559 N ASN A 69 19.168 -0.936 -10.274 1.00 27.50 N
ATOM 560 CA ASN A 69 20.356 -0.049 -10.419 1.00 25.52 C
ATOM 561 C ASN A 69 21.572 -0.418 -9.653 1.00 24.26 C
ATOM 562 O ASN A 69 22.687 -0.336 -10.171 1.00 24.33 O
ATOM 563 CB ASN A 69 19.965 1.410 -10.149 1.00 26.49 C
ATOM 564 CG ASN A 69 18.932 1.881 -11.124 1.00 26.35 C
ATOM 565 OD1 ASN A 69 18.835 1.322 -12.224 1.00 26.77 O
ATOM 566 ND2 ASN A 69 18.131 2.864 -10.745 1.00 24.85 N
ATOM 567 N LEU A 70 21.419 -0.824 -8.404 1.00 23.02 N
ATOM 568 CA LEU A 70 22.592 -1.275 -7.656 1.00 23.37 C
ATOM 569 C LEU A 70 23.391 -2.325 -8.448 1.00 25.78 C
ATOM 570 O LEU A 70 24.647 -2.315 -8.430 1.00 25.47 O
ATOM 571 CB LEU A 70 22.202 -1.897 -6.306 1.00 22.17 C
ATOM 572 CG LEU A 70 23.335 -2.560 -5.519 1.00 22.49 C
ATOM 573 CD1 LEU A 70 24.578 -1.665 -5.335 1.00 22.56 C
ATOM 574 CD2 LEU A 70 22.853 -3.108 -4.147 1.00 24.47 C
""" *2 ### asn-leu-asn-leu
def verbose_assertion_minrmsd(ref_Y, test_Y, test_obj):
for jj in np.arange(test_Y.shape[1]):
ii = np.argmax(np.abs(ref_Y-test_Y[:,jj]))
assert np.allclose(ref_Y, test_Y[:,jj], atol=test_obj.atol), \
'Largest discrepancy between reference (ref_frame %u)' \
' and test: %8.2e, for the pair %f, %f at frame %u'%\
(test_obj.ref_frame,
(ref_Y-test_Y[:,jj])[ii],
ref_Y[ii], test_Y[ii,jj], ii)
class TestFeaturizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
import tempfile
cls.asn_leu_pdbfile = tempfile.mkstemp(suffix=".pdb")[1]
with open(cls.asn_leu_pdbfile, 'w') as fh:
fh.write(asn_leu_pdb)
cls.asn_leu_traj = tempfile.mktemp(suffix='.xtc')
# create traj for asn_leu
n_frames = 4001
traj = mdtraj.load(cls.asn_leu_pdbfile)
ref = traj.xyz
new_xyz = np.empty((n_frames, ref.shape[1], 3))
noise = np.random.random(new_xyz.shape)
new_xyz[:, :,: ] = noise + ref
traj.xyz=new_xyz
traj.time=np.arange(n_frames)
traj.save(cls.asn_leu_traj)
super(TestFeaturizer, cls).setUpClass()
@classmethod
def tearDownClass(cls):
try:
os.unlink(cls.asn_leu_pdbfile)
except EnvironmentError:
pass
super(TestFeaturizer, cls).tearDownClass()
def setUp(self):
self.pdbfile = pdbfile
self.traj = mdtraj.load(xtcfile, top=self.pdbfile)
self.feat = MDFeaturizer(self.pdbfile)
self.atol = 1e-5
self.ref_frame = 0
self.atom_indices = np.arange(0, self.traj.n_atoms/2)
def test_select_backbone(self):
inds = self.feat.select_Backbone()
def test_select_all(self):
self.feat.add_all()
assert (self.feat.dimension() == self.traj.n_atoms * 3)
refmap = np.reshape(self.traj.xyz, (len(self.traj), self.traj.n_atoms * 3))
assert (np.all(refmap == self.feat.map(self.traj)))
def test_select(self):
sel = np.array([1, 2, 5, 20], dtype=int)
self.feat.add_selection(sel)
assert (self.feat.dimension() == sel.shape[0] * 3)
refmap = np.reshape(self.traj.xyz[:, sel, :], (len(self.traj), sel.shape[0] * 3))
assert (np.all(refmap == self.feat.map(self.traj)))
def test_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.map(self.traj)))
def test_inverse_distances(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_inverse_distances(pairs, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
Dinv = 1.0/np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(Dinv, self.feat.map(self.traj)))
def test_ca_distances(self):
sel = self.feat.select_Ca()
assert(np.all(sel == list(range(self.traj.n_atoms)))) # should be all for this Ca-traj
pairs = self.feat.pairs(sel, excluded_neighbors=0)
self.feat.add_distances_ca(periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs.shape[0])
X = self.traj.xyz[:, pairs[:, 0], :]
Y = self.traj.xyz[:, pairs[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
assert(np.allclose(D, self.feat.map(self.traj)))
def test_contacts(self):
sel = np.array([1, 2, 5, 20], dtype=int)
pairs_expected = np.array([[1, 5], [1, 20], [2, 5], [2, 20], [5, 20]])
pairs = self.feat.pairs(sel, excluded_neighbors=2)
assert(pairs.shape == pairs_expected.shape)
assert(np.all(pairs == pairs_expected))
self.feat.add_contacts(pairs, threshold=0.5, periodic=False) # unperiodic distances such that we can compare
assert(self.feat.dimension() == pairs_expected.shape[0])
X = self.traj.xyz[:, pairs_expected[:, 0], :]
Y = self.traj.xyz[:, pairs_expected[:, 1], :]
D = np.sqrt(np.sum((X - Y) ** 2, axis=2))
C = np.zeros(D.shape)
I = np.argwhere(D <= 0.5)
C[I[:, 0], I[:, 1]] = 1.0
assert(np.allclose(C, self.feat.map(self.traj)))
def test_angles(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_angles_deg(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
def test_angles_cossin(self):
sel = np.array([[1, 2, 5],
[1, 3, 8],
[2, 9, 10]], dtype=int)
self.feat.add_angles(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_dihedrals(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrals_deg(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, deg=True)
assert(self.feat.dimension() == sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
self.assertEqual(len(self.feat.describe()), self.feat.dimension())
def test_dihedrials_cossin(self):
sel = np.array([[1, 2, 5, 6],
[1, 3, 8, 9],
[2, 9, 10, 12]], dtype=int)
self.feat.add_dihedrals(sel, cossin=True)
assert(self.feat.dimension() == 2 * sel.shape[0])
Y = self.feat.map(self.traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals_deg(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(deg=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -180.0))
assert(np.alltrue(Y <= 180.0))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrals_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_backbone_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_traj, top=self.asn_leu_pdbfile)
Y = self.feat.map(traj)
self.assertEqual(Y.shape, (len(traj), 3*4)) # (3 phi + 3 psi)*2 [cos, sin]
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
assert "COS" in desc[0]
assert "SIN" in desc[1]
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrials_chi(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions()
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
self.assertEqual(len(desc), self.feat.dimension())
def test_backbone_dihedrials_chi_cossin(self):
self.feat = MDFeaturizer(topfile=self.asn_leu_pdbfile)
self.feat.add_chi1_torsions(cossin=True)
traj = mdtraj.load(self.asn_leu_pdbfile)
Y = self.feat.map(traj)
assert(np.alltrue(Y >= -np.pi))
assert(np.alltrue(Y <= np.pi))
desc = self.feat.describe()
assert "COS" in desc[0]
assert "SIN" in desc[1]
self.assertEqual(len(desc), self.feat.dimension())
def test_custom_feature(self):
# TODO: test me
pass
def test_MinRmsd(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame])
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame)
test_Y = self.feat.map(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame])
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_MinRmsd_with_atom_indices(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame], atom_indices=self.atom_indices)
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame, atom_indices=self.atom_indices)
test_Y = self.feat.map(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame], atom_indices=self.atom_indices)
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_MinRmsd_with_atom_indices_precentered(self):
# Test the Trajectory-input variant
self.feat.add_minrmsd_to_ref(self.traj[self.ref_frame], atom_indices=self.atom_indices, precentered=True)
# and the file-input variant
self.feat.add_minrmsd_to_ref(xtcfile, ref_frame=self.ref_frame, atom_indices=self.atom_indices, precentered=True)
test_Y = self.feat.map(self.traj).squeeze()
# now the reference
ref_Y = mdtraj.rmsd(self.traj, self.traj[self.ref_frame], atom_indices=self.atom_indices, precentered=True)
verbose_assertion_minrmsd(ref_Y, test_Y, self)
assert self.feat.dimension() == 2
assert len(self.feat.describe())==2
def test_Residue_Mindist_Ca_all(self):
n_ca = self.feat.topology.n_atoms
self.feat.add_residue_mindist(scheme='ca')
D = self.feat.map(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca')[0]
assert np.allclose(D, Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Residue_Mindist_Ca_all_threshold(self):
threshold = .7
self.feat.add_residue_mindist(scheme='ca', threshold=threshold)
D = self.feat.map(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca')[0]
Dbinary = np.zeros_like(Dref)
I = np.argwhere(Dref <= threshold)
Dbinary[I[:, 0], I[:, 1]] = 1
assert np.allclose(D, Dbinary)
assert len(self.feat.describe())==self.feat.dimension()
def test_Residue_Mindist_Ca_array(self):
contacts=np.array([[20,10,], [10,0]])
self.feat.add_residue_mindist(scheme='ca', residue_pairs=contacts)
D = self.feat.map(self.traj)
Dref = mdtraj.compute_contacts(self.traj, scheme='ca', contacts=contacts)[0]
assert np.allclose(D, Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_One_Group(self):
group0= [0,20,30,0]
self.feat.add_group_mindist(group_definitions=[group0]) # Even with duplicates
D = self.feat.map(self.traj)
dist_list = list(combinations(np.unique(group0),2))
Dref = mdtraj.compute_distances(self.traj, dist_list)
assert np.allclose(D.squeeze(), Dref.min(1))
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_All_Three_Groups(self):
group0 = [0,20,30,0]
group1 = [1,21,31,1]
group2 = [2,22,32,2]
self.feat.add_group_mindist(group_definitions=[group0, group1, group2])
D = self.feat.map(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0),np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0),np.unique(group2))))
dist_list_12 = np.array(list(product(np.unique(group1),np.unique(group2))))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_12 = mdtraj.compute_distances(self.traj, dist_list_12).min(1)
Dref = np.vstack((Dref_01,Dref_02,Dref_12)).T
assert np.allclose(D.squeeze(), Dref)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_All_Three_Groups_threshold(self):
threshold = .7
group0 = [0, 20, 30, 0]
group1 = [1, 21, 31, 1]
group2 = [2, 22, 32, 2]
self.feat.add_group_mindist(group_definitions=[group0, group1, group2], threshold=threshold)
D = self.feat.map(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0), np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0), np.unique(group2))))
dist_list_12 = np.array(list(product(np.unique(group1), np.unique(group2))))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_12 = mdtraj.compute_distances(self.traj, dist_list_12).min(1)
Dref = np.vstack((Dref_01, Dref_02, Dref_12)).T
Dbinary = np.zeros_like(Dref)
I = np.argwhere(Dref <= threshold)
Dbinary[I[:, 0], I[:, 1]] = 1
assert np.allclose(D, Dbinary)
assert len(self.feat.describe())==self.feat.dimension()
def test_Group_Mindist_Some_Three_Groups(self):
group0 = [0,20,30,0]
group1 = [1,21,31,1]
group2 = [2,22,32,2]
group_pairs=np.array([[0,1],
[2,2],
[0,2]])
self.feat.add_group_mindist(group_definitions=[group0, group1, group2], group_pairs=group_pairs)
D = self.feat.map(self.traj)
# Now the references, computed separately for each combination of groups
dist_list_01 = np.array(list(product(np.unique(group0),np.unique(group1))))
dist_list_02 = np.array(list(product(np.unique(group0),np.unique(group2))))
dist_list_22 = np.array(list(combinations(np.unique(group2),2)))
Dref_01 = mdtraj.compute_distances(self.traj, dist_list_01).min(1)
Dref_02 = mdtraj.compute_distances(self.traj, dist_list_02).min(1)
Dref_22 = mdtraj.compute_distances(self.traj, dist_list_22).min(1)
Dref = np.vstack((Dref_01,Dref_22,Dref_02)).T
assert np.allclose(D.squeeze(), Dref)
assert len(self.feat.describe())==self.feat.dimension()
class TestFeaturizerNoDubs(unittest.TestCase):
def testAddFeaturesWithDuplicates(self):
"""this tests adds multiple features twice (eg. same indices) and
checks whether they are rejected or not"""
featurizer = MDFeaturizer(pdbfile)
expected_active = 1
featurizer.add_angles([[0, 1, 2], [0, 3, 4]])
featurizer.add_angles([[0, 1, 2], [0, 3, 4]])
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_contacts([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_contacts([[0, 1], [0, 3]])
self.assertEqual(len(featurizer.active_features), expected_active)
# try to fool it with ca selection
ca = featurizer.select_Ca()
ca = featurizer.pairs(ca, excluded_neighbors=0)
featurizer.add_distances(ca)
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances_ca()
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_inverse_distances([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances([[0, 1], [0, 3]])
expected_active += 1
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_distances([[0, 1], [0, 3]])
self.assertEqual(len(featurizer.active_features), expected_active)
def my_func(x):
return x - 1
def foo(x):
return x - 1
expected_active += 1
my_feature = CustomFeature(my_func)
my_feature.dimension = 3
featurizer.add_custom_feature(my_feature)
self.assertEqual(len(featurizer.active_features), expected_active)
featurizer.add_custom_feature(my_feature)
self.assertEqual(len(featurizer.active_features), expected_active)
# since myfunc and foo are different functions, it should be added
expected_active += 1
foo_feat = CustomFeature(foo, dim=3)
featurizer.add_custom_feature(foo_feat)
self.assertEqual(len(featurizer.active_features), expected_active)
expected_active += 1
ref = mdtraj.load(xtcfile, top=pdbfile)
featurizer.add_minrmsd_to_ref(ref)
featurizer.add_minrmsd_to_ref(ref)
self.assertEquals(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_minrmsd_to_ref(pdbfile)
featurizer.add_minrmsd_to_ref(pdbfile)
self.assertEquals(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_residue_mindist()
featurizer.add_residue_mindist()
self.assertEquals(len(featurizer.active_features), expected_active)
expected_active += 1
featurizer.add_group_mindist([[0,1],[0,2]])
featurizer.add_group_mindist([[0,1],[0,2]])
self.assertEquals(len(featurizer.active_features), expected_active)
def test_labels(self):
""" just checks for exceptions """
featurizer = MDFeaturizer(pdbfile)
featurizer.add_angles([[1, 2, 3], [4, 5, 6]])
featurizer.add_backbone_torsions()
featurizer.add_contacts([[0, 1], [0, 3]])
featurizer.add_distances([[0, 1], [0, 3]])
featurizer.add_inverse_distances([[0, 1], [0, 3]])
cs = CustomFeature(lambda x: x - 1, dim=3)
featurizer.add_custom_feature(cs)
featurizer.add_minrmsd_to_ref(pdbfile)
featurizer.add_residue_mindist()
featurizer.add_group_mindist([[0,1],[0,2]])
featurizer.describe()
class TestPairwiseInputParser(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
def test_trivial(self):
dist_list = np.array([[0, 1],
[0, 2],
[0, 3]])
assert np.allclose(dist_list, _parse_pairwise_input(dist_list, None, self.feat._logger))
def test_one_unique(self):
# As a list
group1 = [0, 1, 2]
dist_list = np.asarray(list(combinations(group1, 2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, None, self.feat._logger))
# As an array
group1 = np.array([0, 1, 2])
dist_list = np.asarray(list(combinations(group1, 2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, None, self.feat._logger))
def test_two_uniques(self):
# As a list
group1 = [0, 1, 2]
group2 = [3, 4, 5]
dist_list = np.asarray(list(product(group1, group2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
# As an array
group1 = np.array([0, 1, 2])
group2 = np.array([3, 4, 5])
dist_list = np.asarray(list(product(group1, group2)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
def test_two_redundants(self):
group1 = np.array([0, 1, 2, 0])
group2 = np.array([3, 4, 5, 4])
dist_list = np.asarray(list(product(np.unique(group1),
np.unique(group2)
)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
def test_two_redundants_overlap(self):
group1 = np.array([0, 1, 2, 0])
group2 = np.array([3, 4, 5, 4, 0, 1])
dist_list = np.asarray(list(product(np.unique(group1),
np.unique(group2[:-2])
)))
assert np.allclose(dist_list, _parse_pairwise_input(group1, group2, self.feat._logger))
class TestStaticMethods(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
def test_pairs(self):
n_at = 5
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=3)
assert np.allclose(pairs, [0,4])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=2)
assert np.allclose(pairs, [[0,3],[0,4],
[1,4]])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=1)
assert np.allclose(pairs, [[0,2], [0,3],[0,4],
[1,3], [1,4],
[2,4]])
pairs = self.feat.pairs(np.arange(n_at), excluded_neighbors=0)
assert np.allclose(pairs, [[0,1], [0,2], [0,3],[0,4],
[1,2], [1,3], [1,4],
[2,3], [2,4],
[3,4]])
def some_call_to_mdtraj_some_operations_some_linalg(traj, pairs, means, U):
D = mdtraj.compute_distances(traj, pairs)
D_meanfree = D - means
Y = (U.T.dot(D_meanfree.T)).T
return Y.astype('float32')
class TestCustomFeature(unittest.TestCase):
def setUp(self):
self.feat = MDFeaturizer(pdbfile)
self.traj = mdtraj.load(xtcfile, top=pdbfile)
self.pairs = [[0,1],[0,2], [1,2]] #some distances
self.means = [.5, .75, 1.0] #bogus means
self.U = np.array([[0,1],
[1,0],
[1,1]]) #bogus transformation, projects from 3 distances to 2 components
def test_some_feature(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg , self.U.shape[1],
self.pairs,
self.means,
self.U
)
Y_custom_feature = self.feat.map(self.traj)
# Directly call the function
Y_function = some_call_to_mdtraj_some_operations_some_linalg(self.traj, self.pairs, self.means, self.U)
assert np.allclose(Y_custom_feature, Y_function)
def test_describe(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg, self.U.shape[1],
self.pairs,
self.means,
self.U
)
self.feat.describe()
def test_dimensionality(self):
self.feat.add_custom_func(some_call_to_mdtraj_some_operations_some_linalg, self.U.shape[1],
self.pairs,
self.means,
self.U
)
assert self.feat.dimension()==self.U.shape[1]
if __name__ == "__main__":
unittest.main()
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = [
('blog', '0006_auto__del_field_blogpost__keywords__add_field_blogpost_keywords_string'),
('pages', '0002_auto__del_field_page__keywords__add_field_page_keywords_string__chg_fi'),
]
def forwards(self, orm):
# Deleting model 'Keyword'
db.delete_table('core_keyword')
def backwards(self, orm):
# Adding model 'Keyword'
db.create_table('core_keyword', (
('slug', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('core', ['Keyword'])
models = {
}
complete_apps = ['core']
|
from distutils.spawn import find_executable
import subprocess
from os import path
import tempfile
from webassets.exceptions import FilterError
from webassets.filter.jst import JSTemplateFilter
__all__ = ('JinjaToJSFilter',)
class JinjaToJSFilter(JSTemplateFilter):
"""Compile `Jinja <http://jinja.pocoo.org/docs/>`_ templates.
This filter assumes that the ``jinja2s`` executable is in the path.
.. note::
Use this filter if you want to compile Jinja templates.
.. warning::
Currently, this filter is not compatible with input filters. Any
filters that would run during the input-stage will simply be
ignored. Input filters tend to be other compiler-style filters,
so this is unlikely to be an issue.
"""
name = 'jinja2js'
options = {
'extra_args': 'JINJA2JS_EXTRA_ARGS',
'root': 'JINJA2JS_ROOT',
}
def process_templates(self, out, hunks, **kw):
if not find_executable("jinja2js"):
raise EnvironmentError(
"The jinja2js executable can't be found."
"\nPlease pip install pwt.jinja2js")
templates = [info['source_path'] for _, info in hunks]
temp = tempfile.NamedTemporaryFile(dir='.', delete=True)
args = ['jinja2js', '--outputPathFormat', temp.name]
args.extend(templates)
if self.extra_args:
args.extend(self.extra_args)
proc = subprocess.Popen(
args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise FilterError(('handlebars: subprocess had error: stderr=%s, '+
'stdout=%s, returncode=%s') % (
stderr, stdout, proc.returncode))
out.write(open(temp.name).read())
|
import argparse
import os
from ts import *
def write_ts(file_name, packets, force):
logging.info("Writing %s", file_name)
if not force and os.path.exists(file_name):
choice = input(
"Output file {} already exists. Overwrite it? "
"[y/N] ".format(file_name)).lower()
if choice != "y":
return
with open(file_name, "wb") as f:
for packet in packets:
f.write(packet.bytes)
def generate_initialization_segment(
segment_file_names, segment_template, out_file_name, force):
pat = None
pat_ts = None
pmt = None
pmt_ts = None
segment_ts = {}
pmt_pid = None
for segment_file_name in segment_file_names:
logging.info("Reading %s", segment_file_name)
current_segment_ts = []
segment_ts[segment_file_name] = current_segment_ts
for ts in read_ts(segment_file_name):
if ts.pid == ProgramAssociationTable.PID:
new_pat = ProgramAssociationTable(ts.payload)
if pat is None:
pat = new_pat
pat_ts = ts
programs = list(pat.programs.values())
if len(programs) != 1:
raise Exception(
"PAT has {} programs, but DASH only allows 1 "
"program.".format(len(pat.programs)))
if pmt_pid is not None and programs[0] != pmt_pid:
raise Exception("PAT has new PMT PID. This program has "
"not been tested to handled this case.")
pmt_pid = programs[0]
elif new_pat != pat:
raise Exception("Cannot generate initialization segment "
"for segment with multiple PAT's. {} != {"
"}".format(new_pat, pat))
elif ts.pid == pmt_pid:
new_pmt = ProgramMapTable(ts.payload)
if pmt is None:
pmt = new_pmt
pmt_ts = ts
elif new_pmt != pmt:
raise Exception("Cannot generate initialization segment "
"for segment with multiple PMT's. {} != {"
"}".format(new_pmt, pmt))
else:
current_segment_ts.append(ts)
logging.debug("Common PSI is:\nPAT: %s\nPMT: %s", pat, pmt)
write_ts(out_file_name, [pat_ts, pmt_ts], force)
for segment_file_name in segment_file_names:
path, file_name = os.path.split(segment_file_name)
name_part, _ = os.path.splitext(file_name)
segment_out_file_name = segment_template.format_map(
{"path": path, "name_part": name_part})
write_ts(segment_out_file_name, segment_ts[segment_file_name], force)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"media_segment", nargs="+",
help="The media segments to create an initialization segment for.")
parser.add_argument(
"--segment-template", "-s",
help="Template for segment index files. {name_part} will be replaced "
"with the file name of the media segment minus the suffix (.ts). "
"{path} will be replaced with the full path to the media segment.",
default="{path}/{name_part}.ts")
parser.add_argument(
"--out", "-o", required=True,
help="The file to write the initialization segment to.")
parser.add_argument(
"--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument(
"--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
logging.basicConfig(
format='%(levelname)s: %(message)s',
level=logging.DEBUG if args.verbose else logging.INFO)
generate_initialization_segment(
args.media_segment, args.segment_template, args.out, args.force)
|
import ctypes as ct
from ctypes.util import find_library
import platform
osName = platform.system()
def define_function(libName, name, returnType, params):
'''Helper function to help in binding functions'''
if osName == "Windows":
function = ct.WINFUNCTYPE(returnType, *params)
lib = ct.WinDLL(libName)
elif osName == "Darwin" or osName == "Linux":
function = ct.FUNCTYPE(returnType, *params)
lib = ct.CDLL(find_library(libName))
address = getattr(lib, name)
new_func = ct.cast(address, function)
return new_func
if osName == "Windows":
glGetProcAddress = define_function('opengl32', 'wglGetProcAddress',
ct.POINTER(ct.c_int), (ct.c_char_p,))
elif osName in ('Linux', 'Darwin', 'Windows'):
from sdl2 import SDL_GL_GetProcAddress
glGetProcAddress = SDL_GL_GetProcAddress
class _BindGL(object):
def __init__(self):
self.osName = platform.system()
if self.osName == 'Linux':
libFound = find_library('GL')
self.lib = ct.CDLL(libFound)
self.funcType = ct.CFUNCTYPE
elif self.osName == 'Windows':
libFound = find_library('opengl32')
self.lib = ct.WinDLL(libFound)
self.funcType = ct.WINFUNCTYPE
elif self.osName == 'Darwin': # Mac OS X
libraryPath = '/System/Library/Frameworks/OpenGL.framework'
libFound = find_library(libraryPath)
self.lib = ct.CDLL(libFound)
self.funcType = ct.CFUNCTYPE
def gl_func(self, name, returnType, paramTypes):
''' Define and load an opengl function '''
function = self.funcType(returnType, *paramTypes)
try:
address = getattr(self.lib, name)
except AttributeError:
name = name.encode(encoding='UTF-8')
address = glGetProcAddress(name)
return ct.cast(address, function)
_glbind = _BindGL()
gl_func = _glbind.gl_func
__all__ = ['gl_func', 'define_function']
|
import unittest
import os
import femagtools.bch
from io import open
import numpy as np
class BchReaderTest(unittest.TestCase):
def read_bch(self, filename):
testPath = os.path.join(os.path.split(__file__)[0], 'data')
if len(testPath) == 0:
testPath = os.path.join(os.path.abspath('.'), 'data')
r = femagtools.bch.Reader()
with open('{0}/{1}'.format(testPath, filename),
encoding='latin1') as f:
r.read(f)
return r
def test_read_cogging(self):
bch = self.read_bch('cogging.BATCH')
self.assertEqual(bch.version, '7.9.147 November 2012')
self.assertEqual(bch.nodes, 2315)
self.assertEqual(bch.elements, 3305)
self.assertEqual(bch.quality, 100.0)
self.assertEqual(len(bch.torque_fft), 1)
self.assertEqual(len(bch.torque_fft[0]), 5)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 5)
self.assertEqual(bch.torque_fft[0]['order'], [4, 12, 24, 36, 48])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(sorted(bch.flux['1'][0].keys()),
sorted(['displ', 'voltage_four',
'current_k', 'flux_k',
'voltage_ir', 'displunit',
'voltage_dpsi']))
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 61)
self.assertEqual(bch.flux_fft['1'][0]['order'], [1, 3, 5, 7, 9, 11])
self.assertEqual(len(bch.torque), 1)
self.assertEqual(sorted(bch.torque[0].keys()),
sorted(['angle', 'force_y', 'force_x', 'torque',
'current_1', 'ripple', 't_idpsi']))
self.assertEqual(len(bch.torque[0]['torque']), 61)
self.assertAlmostEqual(bch.losses[0]['winding'], 0.0, 1)
self.assertAlmostEqual(bch.losses[0]['stajo'], 0.458, 2)
self.assertAlmostEqual(bch.losses[0]['staza'], 0.344, 3)
self.assertAlmostEqual(bch.losses[0]['magnetJ'], 0.006, 3)
# self.assertAlmostEqual(bch.losses[0]['rotfe'], 0.000, 3)
self.assertAlmostEqual(bch.lossPar['fo'][0], 50.0, 1)
self.assertAlmostEqual(bch.lossPar['fo'][1], 50.0, 1)
self.assertEqual(bch.get(('machine', 'p')), 2)
np.testing.assert_almost_equal(bch.inertia, [0.230195e-3, 0.011774e-3])
def test_read_sctest(self):
bch = self.read_bch('sctest.BATCH')
self.assertEqual(len(bch.torque_fft), 1)
self.assertEqual(len(bch.scData['ia']), 134)
self.assertAlmostEqual(bch.scData['ikd'], 0.0, 1)
self.assertAlmostEqual(bch.scData['iks'], 1263.581, 2)
self.assertAlmostEqual(bch.scData['tks'], 1469.736, 2)
def test_read_pmsim(self):
bch = self.read_bch('pmsim.BATCH')
self.assertEqual(len(bch.torque_fft), 2)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 7)
self.assertEqual(bch.torque_fft[1]['order'], [0, 12, 24, 30, 36, 42])
self.assertEqual(sorted(bch.flux['1'][0].keys()),
sorted(['displ', 'voltage_four',
'current_k', 'flux_k',
'voltage_ir', 'displunit',
'voltage_dpsi']))
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 46)
self.assertEqual(len(bch.torque), 2)
self.assertTrue('torque' in bch.torque[1])
self.assertEqual(len(bch.torque[1]['torque']), 46)
self.assertTrue('ld' in bch.dqPar)
self.assertAlmostEqual(bch.dqPar['i1'][1], 49.992, 3)
self.assertAlmostEqual(bch.dqPar['ld'][0], 9.9e-3, 6)
self.assertAlmostEqual(bch.dqPar['ld'][0], 9.9e-3, 6)
self.assertAlmostEqual(bch.dqPar['u1'][1], 358.38, 2)
self.assertAlmostEqual(bch.dqPar['torque'][0], 65.3, 1)
self.assertAlmostEqual(bch.machine['i1'], 50.0)
self.assertAlmostEqual(bch.lossPar['fo'][0], 50.0, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1, 3, 5, 7, 9, 11, 13, 15])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[100.0, 300.0, 500.0, 700.0, 900.0,
1100.0, 1300.0, 1500.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[10.33, 9.391, 9.391, 9.391, 3.348,
2.971, 1.476, 0.882])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[15.804, 142.234, 395.094, 774.383,
455.591, 603.881, 419.063, 333.395])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['order_el'],
[1, 3, 5, 7, 9, 11, 13, 15])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['freq'],
[100.0, 300.0, 500.0, 700.0, 900.0, 1100.0, 1300.0, 1500.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[8.641, 7.774, 7.774, 7.748, 3.679, 2.915, 1.303, 0.626])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['eddy'],
[13.065, 117.587, 326.631, 637.999, 500.663, 592.805, 370.023, 236.594])
def test_read_pmsim_9(self):
bch = self.read_bch('pmsim-9.BATCH')
self.assertAlmostEqual(bch.machine['plfe'][0], 2540.2, 1)
self.assertAlmostEqual(bch.machine['plfe'][1], 2020.5, 1)
self.assertAlmostEqual(bch.dqPar['up'][0], 259.4, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_mech'],
[6, 18, 30, 42, 54, 90, 114])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1.0, 3.0, 5.0, 7.0, 9.0, 15.0, 19.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[400.0, 1200.0, 2000.0, 2800.0, 3600.0, 6000.0, 7600.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[1637.884, 225.861, 93.969, 19.904, 6.661, 3.043, 1.752])
assert [round(l*1e3, 4) for l in bch.dqPar['Lho']] == [0.5908, 0.6583]
def test_read_relsim(self):
bch = self.read_bch('relsim.BATCH')
self.assertEqual(len(bch.torque), 1)
self.assertTrue('torque' in bch.torque[0])
self.assertAlmostEqual(np.mean(bch.torque[0]['torque']), 5.656, 2)
self.assertAlmostEqual(bch.dqPar['u1'][1], 274.5, 1)
self.assertAlmostEqual(bch.dqPar['torque'][0], 5.775, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[50.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[0.152])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[0.066])
def test_read_pmsim_external(self):
bch = self.read_bch('pmsim-external.BATCH')
self.assertTrue('ld' in bch.dqPar)
self.assertAlmostEqual(bch.dqPar['i1'][1], 49.992, 3)
self.assertAlmostEqual(bch.dqPar['ld'][0], 0.86688e-3, 6)
self.assertAlmostEqual(bch.dqPar['ld'][0], 0.86688e-3, 6)
self.assertAlmostEqual(bch.dqPar['u1'][1], 2409.142, 2)
self.assertAlmostEqual(bch.dqPar['torque'][0], 1137.92, 1)
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['order_el'],
[1, 3])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['freq'],
[800.0, 2400.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['hyst'],
[2619.555, 49.438])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['stajo']['eddy'],
[15512.529, 1186.523])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['order_el'],
[1, 3, 5])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['freq'],
[800.0, 2400.0, 4000.0])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['hyst'],
[5688.175, 296.19, 0.989])
np.testing.assert_almost_equal(bch.losses[-1]['fft']['staza']['eddy'],
[43864.352, 7108.561, 39.563])
def test_read_psidq(self):
bch = self.read_bch('psidpsiq.BATCH')
self.assertEqual(len(bch.torque_fft), 10)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 7)
self.assertEqual(bch.torque_fft[0]['order'],
[0, 4, 8, 12, 16, 20, 24])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(len(bch.flux['1']), 10)
self.assertTrue('flux_k' in bch.flux['1'][0])
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 16)
self.assertEqual(len(bch.torque), 10)
self.assertEqual(len(bch.torque[-1]['torque']), 16)
self.assertEqual(len(bch.psidq), 7)
self.assertEqual(len(bch.psidq_ldq), 6)
self.assertEqual(len(bch.psidq['psid']), 3)
self.assertEqual(len(bch.psidq_ldq['ld']), 3)
self.assertEqual(len(bch.psidq['losses']), 11)
self.assertEqual(len(bch.psidq['losses']['styoke']), 3)
self.assertTrue('id' in bch.airgapInduction)
self.assertEqual(bch.airgapInduction['id'],
[-200.0, -100.0, 0.0])
self.assertEqual(len(bch.airgapInduction['Ba']), 3)
self.assertEqual(len(bch.airgapInduction['Bm'][0]), 3)
def test_read_ldq(self):
bch = self.read_bch('ldq.BATCH')
self.assertEqual(len(bch.torque_fft), 13)
self.assertTrue('order' in bch.torque_fft[0])
self.assertTrue('torque' in bch.torque_fft[0])
self.assertEqual(len(bch.torque_fft[0]['torque']), 8)
self.assertEqual(bch.torque_fft[0]['order'], [12, 36, 48, 56, 60,
72, 76, 84])
self.assertEqual(sorted(bch.flux.keys()), ['1', '2', '3'])
self.assertEqual(len(bch.flux['1']), 13)
self.assertEqual(len(bch.flux['1'][0]), 7)
self.assertTrue('flux_k' in bch.flux['1'][0])
self.assertEqual(len(bch.flux['1'][0]['flux_k']), 46)
self.assertEqual(len(bch.torque), 13)
self.assertEqual(len(bch.torque[-1]['torque']), 46)
self.assertEqual(len(bch.ldq['losses']), 5)
self.assertEqual(len(bch.ldq['losses']['styoke']), 4)
self.assertTrue('i1' in bch.airgapInduction)
self.assertEqual(len(bch.airgapInduction['i1']), 3)
self.assertEqual(len(bch.airgapInduction['an']), 4)
self.assertEqual(len(bch.airgapInduction['an'][0]), 4)
def test_read_pmsim2(self):
bch = self.read_bch('PM_270_L8_001.BATCH')
self.assertAlmostEqual(bch.dqPar['i1'][1], 70.0, 1)
self.assertAlmostEqual(bch.dqPar['beta'][0], -38.0, 1)
def test_read_linearforce(self):
bch = self.read_bch('linearForce.BATCH')
self.assertEqual(len(bch.linearForce), 1)
self.assertEqual(len(bch.linearForce[0]['displ']), 26)
self.assertEqual(bch.linearForce[0]['displ'][5], 15.0)
self.assertEqual(bch.linearForce[0]['force_x'][7], -0.3439)
self.assertEqual(bch.linearForce[0]['force_y'][2], 03107.0)
self.assertEqual(bch.linearForce[0]['magnet_1'][13], 10.0)
self.assertEqual(bch.linearForce_fft[0]['force'][0], 0.3483)
self.assertEqual(bch.linearForce_fft[1]['force'][0], 3157.)
self.assertEqual(len(bch.linearForce_fft), 2)
self.assertEqual(len(bch.flux_fft), 3)
def test_read_linmot_z(self):
bch = self.read_bch('linmot_z.BATCH')
self.assertEqual(len(bch.linearForce), 2)
self.assertEqual(max(bch.linearForce[1]['force_z']), 4074.0)
def test_dq(self):
bch = self.read_bch('dq.BATCH')
bch.get(['torque', 'torque']) == []
bch.get(['linearForce[-1]', 'ripple_x']) == 0.0
assert bch.get(['linearForce', 'ripple_z']) is None
self.assertAlmostEqual(bch.dqPar['psid'][0], 1.93, 5)
self.assertAlmostEqual(bch.dqPar['psiq'][0], 0.77074639149333668, 5)
def test_read_felosses(self):
bch = self.read_bch('rel-felosses.BATCH')
self.assertEqual(len(bch.losses), 4)
self.assertEqual(bch.losses[-1]['stajo'], 4425.106)
self.assertEqual(bch.losses[-1]['staza'], 7504.659)
def test_read_pmsim_demag(self):
bch = self.read_bch('PMREL-4p-skewed.BATCH')
self.assertEqual(len(bch.demag), 9)
self.assertEqual([-370.92, -2241.79, -2236.31],
[d['H_max'] for d in bch.demag if d['segment'] == 3])
def test_read_characteristics(self):
bch = self.read_bch('char.BATCH')
self.assertEqual(len(bch.characteristics), 1)
self.assertEqual(len(bch.characteristics[0].keys()), 19)
self.assertEqual(len(bch.characteristics[0]['speed_torque']['n']), 16)
def test_read_asterisks(self):
bch = self.read_bch('PM-with-asterisks_001.BATCH')
self.assertTrue(np.isnan(bch.nodes))
self.assertAlmostEqual(bch.airgapInduction['an'][0][8][0], 0.0690, 1)
self.assertAlmostEqual(bch.airgapInduction['an'][0][9][0], -0.9915, 1)
def test_read_dist_leak(self):
bch = self.read_bch('PM-4p-distleak.BATCH')
self.assertTrue(bch.leak_dist_wind)
self.assertEqual(bch.leak_dist_wind['nseg'], 4)
if __name__ == '__main__':
unittest.main()
|
'''
Created on 14 May 2013
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import os
import sys
import getopt
import numpy as np
from time import gmtime, strftime
import logging
import logging.handlers
import h5py
from datetime import datetime
import shutil
import maps_generate_img_dat
import maps_definitions
import maps_elements
from file_io import maps_hdf5
import maps_fit_parameters
import maps_calibration
import make_maps
from file_io.file_util import open_file_with_retry, call_function_with_retry
def setup_logger(log_name, stream_to_console=True):
logger = logging.getLogger(log_name)
fHandler = logging.FileHandler(log_name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | PID[%(process)d] | %(funcName)s(): %(message)s')
fHandler.setFormatter(formatter)
logger.addHandler(fHandler)
if stream_to_console:
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s | %(levelname)s | PID[%(process)d] | %(funcName)s(): %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
return logger, fHandler
def check_and_create_dir(dir_name, logger):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if not os.path.exists(dir_name):
logger.warning('warning: did not find the %s directory, and could not create a new directory. Will abort this action', dir_name)
return False
return True
def check_output_dirs(main_dict, logger):
if check_and_create_dir(main_dict['output_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['output_fits'], logger) == False:
return False
if check_and_create_dir(main_dict['mda_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['pca_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['img_dat_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['line_dat_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['xanes_dat_dir'], logger) == False:
return False
if check_and_create_dir(main_dict['fly_dat_dir'], logger) == False:
return False
check_and_create_dir(os.path.join(main_dict['master_dir'], 'lookup'), logger)
check_and_create_dir(os.path.join(main_dict['master_dir'], 'rois'), logger)
return True
def select_beamline(main_dict, make_maps_conf, this_beamline, logger):
make_maps_conf.use_det[:] = 0
make_maps_conf.use_beamline = this_beamline
logger.info('make_maps_conf.version %s', make_maps_conf.version)
logger.info('main_dict[beamline] %s', main_dict['beamline'])
if main_dict['beamline'] == '2-ID-E':
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 12000. #[8 microns]
make_maps_conf.dmaps_names = ['SRcurrent', 'us_ic', 'ds_ic', 'abs_ic',
'abs_cfg', 'H_dpc_cfg', 'V_dpc_cfg', 'dia1_dpc_cfg', 'dia2_dpc_cfg',
'H_dpc_norm', 'V_dpc_norm', 'phase', 'ELT1', 'ERT1', 'ICR1', 'OCR1',
'deadT', 'x_coord', 'y_coord',
'dummy', 'dummy', 'dummy', 'dummy']
if (main_dict['beamline'] =='2-ID-D') or (main_dict['beamline'] == '2-ID-B') or (main_dict['beamline'] == '2-BM'):
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 8000. #[8 microns]
make_maps_conf.dmaps_names = ['SRcurrent', 'us_ic', 'ds_ic', 'abs_ic',
'abs_cfg', 'H_dpc_cfg', 'V_dpc_cfg', 'dia1_dpc_cfg', 'dia2_dpc_cfg',
'H_dpc_norm', 'V_dpc_norm', 'phase', 'ELT1', 'ERT1', 'ICR1', 'OCR1',
'deadT', 'x_coord', 'y_coord',
'dummy', 'dummy', 'dummy', 'dummy']
logger.info('make_maps_conf.dmaps_names %s', make_maps_conf.dmaps_names)
if main_dict['beamline'] == 'Bio-CAT':
logger.info('now it is Bio-CAT')
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 24000. #[8 microns]
make_maps_conf.dmaps_names = ['SRcurrent', 'us_ic', 'ds_ic', 'abs_ic', 'ELT1', 'ERT1',
'x_coord', 'y_coord', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy', 'dummy', 'dummy']
if main_dict['beamline'] == 'GSE-CARS':
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 24000. #[8 microns]
make_maps_conf.dmaps_names = ['SRcurrent', 'us_ic', 'ds_ic', 'abs_ic',
'abs_cfg', 'H_dpc_cfg', 'V_dpc_cfg', 'dia1_dpc_cfg', 'dia2_dpc_cfg',
'H_dpc_norm', 'V_dpc_norm', 'phase', 'ELT1', 'ERT1', 'ICR1', 'OCR1',
'deadT', 'x_coord', 'y_coord',
'dummy', 'dummy', 'dummy', 'dummy']
if main_dict['beamline'] == 'Bionanoprobe':
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 24000.
make_maps_conf.dmaps_names = ['SRcurrent', 'us_ic', 'ds_ic', 'abs',
'H_dpc_cfg', 'V_dpc_cfg', 'dia1_dpc_cfg', 'dia2_dpc_cfg',
'H_dpc_norm', 'V_dpc_norm', 'phase', 'ELT1', 'ERT1', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy']
logger.info('main_dict["beamline"] %s', main_dict['beamline'])
if main_dict['beamline'] == 'DLS-I08':
make_maps_conf.use_det[0] = 1
make_maps_conf.fit_t_be = 24000.
make_maps_conf.dmaps_names = ['dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy',
'dummy', 'dummy', 'dummy', 'dummy']
for i in range(len(make_maps_conf.dmaps_names)):
make_maps_conf.dmaps[i].name == make_maps_conf.dmaps_names[i]
if make_maps_conf.dmaps_names[i] != 'dummy':
make_maps_conf.dmaps[i].use = 1
def load_spectrum(filename, spectra, logger, append=1):
us_amp = np.zeros((3))
ds_amp = np.zeros((3))
f = open_file_with_retry(filename, 'rt')
if f == None:
logger.error('load_spectrum(): Could not open file: %s', filename)
return False
line = f.readline() # 1. line is version
# print line
line = f.readline() # 2. is # elements
slist = line.split(':')
# tag = slist[0]
value = ''.join(slist[1:])
n_detector_elements = int(value)
if n_detector_elements < 1:
n_detector_elements = 1
# print 'n_detector_elements', n_detector_elements
line = f.readline()
line = f.readline()
slist = line.split(':')
# tag = slist[0]
value = ''.join(slist[1:])
n_channels = int(value)
# print 'n_channels', n_channels
f.seek(0, 0)
amp = np.zeros((8, 3)) # 8 amplifiers, each with a numerical value(0) and a unit(1), resulting in a factor (3)
amp[:, 0] = 1. # put in a numerical value default of 1.
real_time = []
live_time = []
current = []
calibration = { 'offset' : np.zeros((n_detector_elements)),
'slope' : np.zeros((n_detector_elements)),
'quad' : np.zeros((n_detector_elements)) }
counts_us_ic = 0.
counts_ds_ic = 0.
a_num = ['','','','','','']
a_unit = ['','','','','','']
#roi_area = -1.
#roi_pixels = -1
found_data = 0
lines = f.readlines()
for line in lines:
if ':' in line :
slist = line.split(':')
tag = slist[0]
value = ''.join(slist[1:])
if tag == 'VERSION':
version = float(value)
elif tag == 'DATE':
date = value
elif tag == 'ELEMENTS':
n_detector_elements = int(value)
elif tag == 'CHANNELS':
n_channels = int(value)
elif tag == 'REAL_TIME':
#real_time = np.zeros((n_detector_elements))
value = value.split(' ')
real_time = [float(x) for x in value if x != '']
elif tag == 'LIVE_TIME':
#live_time = np.zeros((n_detector_elements))
value = value.split(' ')
live_time = [float(x) for x in value if x != '']
elif tag == 'CAL_OFFSET':
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
calibration['offset'][:] = valuelist
elif tag == 'CAL_SLOPE':
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
calibration['slope'][:] = valuelist
elif tag == 'CAL_QUAD':
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
calibration['quad'][:] = valuelist
elif tag == 'TWO_THETA':
two_theta = np.zeros((n_detector_elements))
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
two_theta[:] = valuelist
elif tag == 'UPSTREAM_IONCHAMBER':
counts_us_ic = np.zeros((n_detector_elements))
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
counts_us_ic[:] = valuelist
elif tag == 'DOWNSTREAM_IONCHAMBER':
counts_ds_ic = np.zeros((n_detector_elements))
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
counts_ds_ic[:] = valuelist
elif tag == 'SRcurrent':
current = np.zeros((n_detector_elements))
value = value.split(' ')
valuelist = [float(x) for x in value if x != '']
current[:] = valuelist
elif tag == 'ENVIRONMENT':
value = ':'.join(slist[1:])
pos = value.find('=')
etag = value[0:pos].strip()
vallist = value.split('"')
temp = vallist[1]
if etag == 'S:SRcurrentAI':
current = float(temp)
elif etag == '2xfm:scaler1_cts1.B':
if counts_us_ic == 0 : counts_us_ic = float(temp)
elif etag == '2xfm:scaler1_cts1.C':
if counts_ds_ic == 0 : counts_ds_ic = float(temp)
elif etag == '2xfm:scaler3_cts1.B':
counts_us_ic = float(temp)
elif etag == '2xfm:scaler3_cts1.C':
counts_ds_ic = float(temp)
elif etag == '2idd:scaler1_cts1.C':
counts_us_ic = float(temp)
elif etag == '2idd:scaler1_cts1.B':
counts_ds_ic = float(temp)
elif etag == '8bmb:3820:scaler1_cts1.B':
counts_us_ic = float(temp)
elif etag == '8bmb:3820:scaler1_cts1.C':
counts_ds_ic = float(temp)
elif etag[5:] == 'A1sens_num.VAL':
a_num[0] = temp
elif etag[5:] == 'A2sens_num.VAL':
a_num[1] = temp
elif etag[5:] == 'A3sens_num.VAL':
a_num[2] = temp
elif etag[5:] == 'A4sens_num.VAL':
a_num[3] = temp
elif etag[5:] == 'A1sens_unit.VAL':
a_unit[0] = temp
elif etag[5:] == 'A2sens_unit.VAL':
a_unit[1] = temp
elif etag[5:] == 'A3sens_unit.VAL':
a_unit[2] = temp
elif etag[5:] == 'A4sens_unit.VAL':
a_unit[3] = temp
elif tag == 'DATA':
found_data = 1
dataindex = lines.index(line)
break
if found_data:
data = np.zeros((n_channels, n_detector_elements))
for i in range(n_channels):
for j in range(n_detector_elements):
dataindex += 1
line = lines[dataindex]
counts = float(line)
data[i, j] = counts
f.close()
if data.size == 0:
logger.error('Not a valid data file: %s', filename)
return False
for i in range(8):
amp[i, 2] = amp[i, 0]
if amp[i, 1] == 0: amp[i, 2] = amp[i, 2] / 1000. # pA/V
if amp[i, 1] == 1: amp[i, 2] = amp[i, 2] # nA/V
if amp[i, 1] == 2: amp[i, 2] = amp[i, 2] * 1000. #uA/V
if amp[i, 1] == 3: amp[i, 2] = amp[i, 2] * 1000. * 1000. #mA/V
if counts_ds_ic == 0:
logger.warning('warning downstream IC counts zero')
counts_ds_ic = 1.
if counts_us_ic == 0:
logger.warning('warning upstream IC counts zero')
counts_us_ic = 1.
if append > 0:
temp_used = []
for item in spectra: temp_used.append(item.used)
wo = np.where(np.array(temp_used) > 0.)
wo = wo[0]
if wo.size != 0:
wo = np.amax(wo)
else:
wo = -1
else:
wo = -1
month = 0
year = 0
day = 0
hour = 0
minute = 0
date = date.strip()
if date != '':
test = date[0:3]
# test which of the two formats is used
if (test == 'Mon') or (test == 'Tue') or (test == 'Wed') or (test == 'Thu') or (test == 'Fri') or (test == 'Sat') or (test == 'Sun'):
year_pos = 20
month_pos = 4
day_pos = 8
hour_pos = 13
minute_pos = 16
else:
year_pos = 8
month_pos = 0
day_pos = 4
hour_pos = 13
minute_pos = 16
test = date[month_pos: month_pos + 3].lower()
if test == 'jan' : month = 1
if test == 'feb' : month = 2
if test == 'mar' : month = 3
if test == 'apr' : month = 4
if test == 'may' : month = 5
if test == 'jun' : month = 6
if test == 'jul' : month = 7
if test == 'aug' : month = 8
if test == 'sep' : month = 9
if test == 'oct' : month = 10
if test == 'nov' : month = 11
if test == 'dec' : month = 12
try:
test = date[year_pos:(year_pos + 4)]
year = int(test)
test = date[day_pos: day_pos + 2]
day = int(test)
test = date[hour_pos:hour_pos + 4]
hour = int(test)
test = date[minute_pos: minute_pos + 4]
minute = int(test)
except:
logger.warning(' Could not convert date.')
for l in range(n_detector_elements):
i = int(l)
j = int(i + wo + 1)
if np.sum(data[:, i]) > 0.:
shortname = filename.split('/')
shortname = shortname[-1]
shortname = shortname.split('\\')
shortname = shortname[-1]
shortname, ext = os.path.splitext(shortname)
spectra[j].name = shortname.strip()
spectra[j].used_chan = n_channels
spectra[j].used = 1
spectra[j].data[0:spectra[j].used_chan] = data[0:spectra[j].used_chan, i]
spectra[j].real_time = real_time[l]
spectra[j].live_time = live_time[l]
spectra[j].SRcurrent = current[0]
spectra[j].calib['off'] = calibration['offset']
spectra[j].calib['lin'] = calibration['slope']
spectra[j].calib['quad'] = calibration['quad']
spectra[j].IC[0]['cts'] = counts_us_ic
for kk in range(2):
if kk == 0 : temp = us_amp
if kk == 1 : temp = ds_amp
spectra[j].IC[kk]['sens_num'] = float(temp[0])
spectra[j].IC[kk]['sens_unit'] = float(temp[1])
spectra[j].IC[kk]['sens_factor'] = float(temp[2])
for kk in range(2):
if 'A/V' in a_unit[kk]:
spectra[j].IC[kk]['sens_factor'] = float(a_num[kk])
spectra[j].IC[kk]['sens_num'] = float(a_num[kk])
if 'pA/' in a_unit[kk] : spectra[j].IC[kk]['sens_unit'] = 0
if 'nA/' in a_unit[kk] : spectra[j].IC[kk]['sens_unit'] = 1
if 'uA/' in a_unit[kk] : spectra[j].IC[kk]['sens_unit'] = 2
if 'mA/' in a_unit[kk] : spectra[j].IC[kk]['sens_unit'] = 3
else:
spectra[j].IC[kk]['sens_unit'] = float(a_unit[kk])
if (float(a_num[kk]) == 0) : spectra[j].IC[kk]['sens_factor'] = 1
if (float(a_num[kk]) == 1) : spectra[j].IC[kk]['sens_factor'] = 2
if float(a_num[kk]) == 2 : spectra[j].IC[kk]['sens_factor'] = 5
if float(a_num[kk]) == 3 : spectra[j].IC[kk]['sens_factor'] = 10
if float(a_num[kk]) == 4 : spectra[j].IC[kk]['sens_factor'] = 20
if float(a_num[kk]) == 5 : spectra[j].IC[kk]['sens_factor'] = 50
if float(a_num[kk]) == 6 : spectra[j].IC[kk]['sens_factor'] = 100
if float(a_num[kk]) == 7 : spectra[j].IC[kk]['sens_factor'] = 200
if float(a_num[kk]) == 8 : spectra[j].IC[kk]['sens_factor'] = 500
spectra[j].IC[kk]['sens_num'] = spectra[j].IC[kk]['sens_factor']
spectra[j].IC[kk]['sens_factor'] = float(spectra[j].IC[kk]['sens_factor']) /1000. *np.power(1000.,float(spectra[j].IC[kk]['sens_unit']))
spectra[j].IC[1]['cts'] = counts_ds_ic
spectra[j].date['year'] = year
spectra[j].date['month'] = month
spectra[j].date['day'] = day
spectra[j].date['hour'] = hour
spectra[j].date['minute'] = minute
#spectra[j].roi['area'] = roi_area[k]
#spectra[j].roi['pixels'] = roi_pixels[k]
else:
spectra[j].used_chan = 0L
return True
def save_spectrum(main_dict, filename, sfilename, logger):
# Get info from .h5 file
no_specs = 1
real_time = 0.0
live_time = 0.0
srcurrent = 0.0
uICcts = 0
dICcts = 0
amp = np.zeros((8, 3), dtype=np.float)
for i in range(8):
if amp[i, 0] == 0.0: amp[i, 2] = 1.
if amp[i, 0] == 1.0: amp[i, 2] = 2.
if amp[i, 0] == 2.0: amp[i, 2] = 5.
if amp[i, 0] == 3.0: amp[i, 2] = 10.
if amp[i, 0] == 4.0: amp[i, 2] = 20.
if amp[i, 0] == 5.0: amp[i, 2] = 50.
if amp[i, 0] == 6.0: amp[i, 2] = 100.
if amp[i, 0] == 7.0: amp[i, 2] = 200.
if amp[i, 0] == 8.0: amp[i, 2] = 500.
if amp[i, 1] == 0.0: amp[i, 2] = amp[i, 2] / 1000. # pA/V
if amp[i, 1] == 1.0: amp[i, 2] = amp[i, 2] # nA/V
if amp[i, 1] == 2.0: amp[i, 2] = amp[i, 2] * 1000. # uA/V
if amp[i, 1] == 3.0: amp[i, 2] = amp[i, 2] * 1000. * 1000. # mA/V
us_amp = np.zeros(3)
ds_amp = np.zeros(3)
if main_dict['beamline'] == '2-ID-D':
us_amp[:] = amp[1, :]
ds_amp[:] = amp[3, :]
if main_dict['beamline'] == '2-ID-E':
us_amp[:] = amp[0, :]
ds_amp[:] = amp[1, :]
if main_dict['beamline'] == 'Bio-CAT':
us_amp[:] = amp[0, :]
ds_amp[:] = amp[1, :]
ic0 = {'cts': 0., 'sens_num': 0., 'sens_unit': 0., 'sens_factor': 0.}
ic1 = {'cts': 0., 'sens_num': 0., 'sens_unit': 0., 'sens_factor': 0.}
for kk in range(1):
if kk == 0:
temp = us_amp
ic0['sens_num'] = float(temp[0])
ic0['sens_unit'] = float(temp[1])
ic0['sens_factor'] = float(temp[2])
if kk == 1:
temp = ds_amp
ic1['sens_num'] = float(temp[0])
ic1['sens_unit'] = float(temp[1])
ic1['sens_factor'] = float(temp[2])
ch5 = maps_hdf5.h5(logger)
fh5 = call_function_with_retry(h5py.File, 5, 0.1, 1.1, (filename, 'r'))
if fh5 == None:
logger.error('Error opeing file %s', filename)
return False
if 'MAPS' not in fh5:
logger.error('error, hdf5 file does not contain the required MAPS group. I am aborting this action')
return False
maps_group_id = fh5['MAPS']
this_data, valid_read = ch5.read_hdf5_core(maps_group_id, 'scan_time_stamp')
if valid_read: scan_time_stamp = this_data
mmcGrp = maps_group_id['make_maps_conf']
calibration_offset = 0
this_data, valid_read = ch5.read_hdf5_core(mmcGrp, 'calibration_offset')
if valid_read: calibration_offset = this_data
calibration_slope = 0
this_data, valid_read = ch5.read_hdf5_core(mmcGrp, 'calibration_slope')
if valid_read: calibration_slope = this_data
calibration_quad = 0
this_data, valid_read = ch5.read_hdf5_core(mmcGrp, 'calibration_quad')
if valid_read: calibration_quad = this_data
int_spec = []
this_data, valid_read = ch5.read_hdf5_core(maps_group_id, 'int_spec')
if valid_read : int_spec = this_data
fh5.close()
logger.info('saving - %s', sfilename)
f = open_file_with_retry(sfilename, 'w')
if f == None:
logger.error('-------\nError opening file to write: %s', sfilename)
return
#f = open(sfilename, 'w')
print>>f, 'VERSION: 3.1'
print>>f, 'ELEMENTS: ' + str(no_specs)
line = 'DATE: '+ str(scan_time_stamp)
print>>f, line
line = 'CHANNELS: '+ str(main_dict['max_spec_channels'])
print>>f, line
line = 'REAL_TIME: ' + str(real_time)
print>>f, line
line = 'LIVE_TIME: ' + str(live_time)
print>>f, line
line = 'SRcurrent: ' + str(srcurrent)
print>>f, line
line = 'UPSTREAM_IONCHAMBER: ' + str(uICcts)
print>>f, line
line = 'DOWNSTREAM_IONCHAMBER: ' + str(dICcts)
print>>f, line
line = 'CAL_OFFSET: ' + str(calibration_offset[0])
print>>f, line
line = 'CAL_SLOPE: ' + str(calibration_slope[0])
print>>f, line
line = 'CAL_QUAD: ' + str(calibration_quad[0])
print>>f, line
if main_dict['beamline'] == '2-ID-E':
line = ''
if ic0['sens_num'] == 0 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="1"'
if ic0['sens_num'] == 1 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="2"'
if ic0['sens_num'] == 2 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="5"'
if ic0['sens_num'] == 3 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="10"'
if ic0['sens_num'] == 4 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="20"'
if ic0['sens_num'] == 5 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="50"'
if ic0['sens_num'] == 6 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="100"'
if ic0['sens_num'] == 7 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="200"'
if ic0['sens_num'] == 8 : line = 'ENVIRONMENT: 2xfm:A1sens_num.VAL="500"'
print>>f, line
if ic0['sens_unit'] == 0 : line = 'ENVIRONMENT: 2xfm:A1sens_unit.VAL="pA/V"'
if ic0['sens_unit'] == 1 : line = 'ENVIRONMENT: 2xfm:A1sens_unit.VAL="nA/V"'
if ic0['sens_unit'] == 2 : line = 'ENVIRONMENT: 2xfm:A1sens_unit.VAL="uA/V"'
if ic0['sens_unit'] == 3 : line = 'ENVIRONMENT: 2xfm:A1sens_unit.VAL="mA/V"'
print>>f, line
if ic1['sens_num'] == 0 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="1"'
if ic1['sens_num'] == 1 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="2"'
if ic1['sens_num'] == 2 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="5"'
if ic1['sens_num'] == 3 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="10"'
if ic1['sens_num'] == 4 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="20"'
if ic1['sens_num'] == 5 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="50"'
if ic1['sens_num'] == 6 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="100"'
if ic1['sens_num'] == 7 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="200"'
if ic1['sens_num'] == 8 : line = 'ENVIRONMENT: 2xfm:A2sens_num.VAL="500"'
print>>f, line
if ic1['sens_unit'] == 0 : line = 'ENVIRONMENT: 2xfm:A2sens_unit.VAL="pA/V"'
if ic1['sens_unit'] == 1 : line = 'ENVIRONMENT: 2xfm:A2sens_unit.VAL="nA/V"'
if ic1['sens_unit'] == 2 : line = 'ENVIRONMENT: 2xfm:A2sens_unit.VAL="uA/V"'
if ic1['sens_unit'] == 3 : line = 'ENVIRONMENT: 2xfm:A2sens_unit.VAL="mA/V"'
print>>f, line
if main_dict['beamline'] == '2-ID-D':
line = ['']
if ic0['sens_num'] == 0 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="1"'
if ic0['sens_num'] == 1 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="2"'
if ic0['sens_num'] == 2 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="5"'
if ic0['sens_num'] == 3 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="10"'
if ic0['sens_num'] == 4 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="20"'
if ic0['sens_num'] == 5 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="50"'
if ic0['sens_num'] == 6 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="100"'
if ic0['sens_num'] == 7 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="200"'
if ic0['sens_num'] == 8 : line = 'ENVIRONMENT: 2idd:A2sens_num.VAL="500"'
print>>f, line
if ic0['sens_unit'] == 0 : line = 'ENVIRONMENT: 2idd:A2sens_unit.VAL="pA/V"'
if ic0['sens_unit'] == 1 : line = 'ENVIRONMENT: 2idd:A2sens_unit.VAL="nA/V"'
if ic0['sens_unit'] == 2 : line = 'ENVIRONMENT: 2idd:A2sens_unit.VAL="uA/V"'
if ic0['sens_unit'] == 3 : line = 'ENVIRONMENT: 2idd:A2sens_unit.VAL="mA/V"'
print>>f, line
if ic1['sens_num'] == 0 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="1"'
if ic1['sens_num'] == 1 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="2"'
if ic1['sens_num'] == 2 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="5"'
if ic1['sens_num'] == 3 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="10"'
if ic1['sens_num'] == 4 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="20"'
if ic1['sens_num'] == 5 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="50"'
if ic1['sens_num'] == 6 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="100"'
if ic1['sens_num'] == 7 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="200"'
if ic1['sens_num'] == 8 : line = 'ENVIRONMENT: 2idd:A4sens_num.VAL="500"'
print>>f, line
if ic1['sens_unit'] == 0 : line = 'ENVIRONMENT: 2idd:A4sens_unit.VAL="pA/V"'
if ic1['sens_unit'] == 1 : line = 'ENVIRONMENT: 2idd:A4sens_unit.VAL="nA/V"'
if ic1['sens_unit'] == 2 : line = 'ENVIRONMENT: 2idd:A4sens_unit.VAL="uA/V"'
if ic1['sens_unit'] == 3 : line = 'ENVIRONMENT: 2idd:A4sens_unit.VAL="mA/V"'
print>>f, line
print>>f, 'DATA:'
for ie in range(len(int_spec)):
print>>f, '%.6f' %(int_spec[ie])
f.close()
return True
def _option_a_(main_dict, maps_conf, logger):
logger.info('\n Section A \n')
check_output_dirs(main_dict, logger)
#maps_test_xrffly
maps_conf.use_fit = 0
make_maps.main(main_dict, logger=logger, force_fit=0, no_fit=True)
def _option_b_(main_dict, maps_conf, maps_def, total_number_detectors, info_elements, logger):
logger.info('\n Section B \n')
current_directory = main_dict['master_dir']
for this_detector_element in range(total_number_detectors):
logger.info('this_detector_element %s total_number_detectors %s', this_detector_element , total_number_detectors)
if (total_number_detectors > 1):
suffix = str(this_detector_element)
else:
suffix = ''
# if b then lets load the 4
# largest img.at files, extract the spectra, and do the fits, then
# rename the average override file
imgdat_filenames = []
main_dict['XRFmaps_dir'] = main_dict['img_dat_dir']
if len(main_dict['dataset_files_to_proc']) < 1 or main_dict['dataset_files_to_proc'][0] == 'all':
files = os.listdir(main_dict['XRFmaps_dir'])
extension = '.h5' + suffix
bad_extension = '.mda.h5' + suffix
for f in files:
if extension in f.lower() and not bad_extension in f.lower():
imgdat_filenames.append(f)
imgdat_filenames.sort()
if len(imgdat_filenames) > 8:
imgdat_filesizes = np.zeros((len(imgdat_filenames)))
for ii in range(len(imgdat_filenames)):
fsize = os.path.getsize(os.path.join(main_dict['img_dat_dir'], imgdat_filenames[ii]))
imgdat_filesizes[ii] = fsize
sorted_index = np.argsort(np.array(imgdat_filesizes))
imgdat_filenames = [imgdat_filenames for (imgdat_filesizes, imgdat_filenames) in sorted(zip(imgdat_filesizes, imgdat_filenames))]
imgdat_filenames.reverse()
imgdat_filenames = imgdat_filenames[0:8]
logger.info('8 largest h5 files: %s', imgdat_filenames)
else:
imgdat_filenames = [mdafile.replace('.mda', '.h5') + suffix for mdafile in main_dict['dataset_files_to_proc']]
main_dict['XRFmaps_names'] = imgdat_filenames
logger.info('option B processing h5 files: %s', imgdat_filenames)
main_dict['XRFmaps_id'] = 0
spectra_filenames = []
# Get integrated spectra from .h5 files and save them as text files
for ii in range(len(imgdat_filenames)):
sfile = os.path.join(main_dict['XRFmaps_dir'], imgdat_filenames[ii])
this_filename = 'intspec' + imgdat_filenames[ii] + '.txt'
savefile = os.path.join(main_dict['output_dir'], this_filename)
if check_output_dirs(main_dict, logger) == False:
return None
save_spectrum(main_dict, sfile, savefile, logger)
spectra_filenames.append(savefile)
# Load spectra into spectra structure
spectra = maps_def.define_spectra(main_dict['max_spec_channels'], main_dict['max_spectra'], main_dict['max_ICs'], mode='plot_spec')
if len(spectra_filenames) == 1:
load_spectrum(spectra_filenames[0], spectra, logger, append=0)
if len(spectra_filenames) > 1:
for iii in range(len(spectra_filenames)):
load_spectrum(spectra_filenames[iii], spectra, logger)
calib = maps_calibration.calibration(main_dict, maps_conf, logger)
# now start the fitting of the integrated spectra we just loaded
fp = maps_fit_parameters.maps_fit_parameters(logger)
fitp = fp.define_fitp(main_dict['beamline'], info_elements)
fitp.g.no_iters = 4
this_w_uname = "DO_FIT_ALL_W_TAILS"
#this_w_uname = "DO_MATRIX_FIT"
dofit_spec = 1
avg_fitp = fp.define_fitp(main_dict['beamline'], info_elements)
# if (first_run == 1) and (this_detector_element == 0) :
# fitp, avg_fitp, spectra = calib.do_fits(this_w_uname, fitp, dofit_spec, spectra, maxiter = 10, per_pix = 1, generate_img = 1, suffix = suffix, info_elements = info_elements) # do the first fit twice, because the very first spectrum is nevere fitted right (not sure why), need to fix it later
# first_run = 0
#print 'do_fits',type(this_w_uname), type(fitp), type(dofit_spec), type(spectra), type(suffix) , type(info_elements), type(calib), type(calib.do_fits)
#fitp, avg_fitp, spectra = calib.do_fits(this_w_uname, fitp, dofit_spec, spectra, 1, 1, 500, suffix, info_elements)
fitp, avg_fitp, spectra = calib.do_fits(this_w_uname, fitp, spectra, maxiter=500, per_pix=1, generate_img=1, suffix=suffix, info_elements=info_elements)
if fitp != None:
avg_res_override_name = os.path.join(current_directory, 'average_resulting_maps_fit_parameters_override.txt')
#old_override_name = os.path.join(current_directory, 'old_maps_fit_parameters_override.txt')
old_override_date_name = os.path.join(current_directory, 'old_' + strftime("%Y-%m-%d_%H-%M-%S", gmtime()) + '_maps_fit_parameters_override.txt' + suffix)
#old_override_suffix_date_name = os.path.join(current_directory, 'old_' + strftime("%Y-%m-%d_%H-%M-%S", gmtime()) + '_maps_fit_parameters_override.txt' + suffix)
#maps_override_suffix_name = os.path.join(current_directory, 'maps_fit_parameters_override.txt' + suffix)
maps_override_name = os.path.join(current_directory, 'maps_fit_parameters_override.txt' + suffix)
logger.info('total_num detectors = %s', total_number_detectors)
try:
if os.path.isfile(maps_override_name):
logger.info('renaming %s to %s', maps_override_name, old_override_date_name)
os.rename(maps_override_name, old_override_date_name)
if os.path.isfile(avg_res_override_name):
logger.info('renaming %s to %s', avg_res_override_name, maps_override_name)
os.rename(avg_res_override_name, maps_override_name)
except:
logger.error('error renaming average_resulting_maps_fit_parameters_override to maps_fit_parameters_override')
dirlist = os.listdir(current_directory)
if 'output_old' in dirlist:
filelist = os.listdir(os.path.join(current_directory, 'output_old'))
for fl in filelist:
thisfile = os.path.join(os.path.join(current_directory, 'output_old'), fl)
try:
os.remove(thisfile)
except:
logger.exception("error removing: "+thisfile)
else:
os.makedirs(os.path.join(current_directory, 'output_old'))
#todo: create directory if it does not exist
#Copy files to output_fits
src_files = os.listdir(os.path.join(current_directory, 'output'))
for fn in src_files:
if fn == "Thumbs.db":
continue
full_file_name = os.path.join(os.path.join(current_directory, 'output'), fn)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, os.path.join(current_directory, 'output_old'))
try:
os.remove(full_file_name)
except:
logger.exception("error removing "+full_file_name)
return spectra
def _option_c_(main_dict, logger):
logger.info('\n Section C \n')
current_directory = main_dict['master_dir']
check_output_dirs(main_dict, logger)
#Call make_maps and force fitting. Overrides USE_FIT in maps_setting.txt
make_maps.main(main_dict, logger=logger, force_fit=1, no_fit=False)
dirlist = os.listdir(current_directory)
if not 'output.fits' in dirlist:
os.makedirs(os.path.join(current_directory, 'output.fits'))
#Copy files to output_fits
src_files = os.listdir(os.path.join(current_directory, 'output'))
for file_name in src_files:
if file_name == "Thumbs.db":
continue
full_file_name = os.path.join(os.path.join(current_directory, 'output'), file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, os.path.join(current_directory, 'output.fits'))
os.remove(full_file_name)
def _option_d_(logger):
logger.error('Image extraction not implemented.')
# main_dict['XRFmaps_dir'] = main_dict['img_dat_dir']
# files = os.listdir(main_dict['XRFmaps_dir'])
# imgdat_filenames = []
# extension = '.h5'
# for f in files:
# if extension in f.lower():
# imgdat_filenames.append(f)
#
# no_files = len(imgdat_filenames)
# current_directory = main_dict['master_dir']
# main_dict['XRFmaps_names'] = imgdat_filenames
#
#
# main_dict['XRFmaps_id'] = 0
#
# temp_string = []
# try:
# f = open('maps_fit_parameters_override.txt', 'rt')
# for line in f:
# if ':' in line :
# slist = line.split(':')
# tag = slist[0]
# value = ''.join(slist[1:])
#
#
# if tag == 'ELEMENTS_TO_FIT' :
# temp_string = value.split(',')
# temp_string = [x.strip() for x in temp_string]
#
#
# f.close()
# except:
# print 'Could not read maps_fit_parameters_override.txt'
#
# test_string = ['abs_ic', 'H_dpc_cfg', 'V_dpc_cfg', 'phase']
# for istring in temp_string:
# test_string.append(istring)
# test_string.append('s_a')
#
# maps_tools.extract_all(main_dict, test_string)
def maps_batch(wdir, option_a_roi_plus, option_b_extract_spectra, option_c_per_pixel, option_d_image_extract, option_e_exchange_format, option_g_avg_hdf, logger):
verbose = True
time_started = strftime("%Y-%m-%d %H:%M:%S", gmtime())
# remove quotations marks if any in wdir
wdir.strip('"')
if "'" in wdir:
wdir = wdir[1:-1]
wdir = os.path.normpath(wdir)
if verbose:
logger.info('working directory =%s', wdir)
if not os.path.exists(wdir):
logger.error('Error - Directory %s does not exist. Please specify working directory.', wdir)
raise ValueError('Directory %s does not exist')
# define main_dict
main_dict = {'mapspy_version':'1.2',
'maps_date':'01. March, 2013',
'beamline':'2-ID-E',
'S_font':'',
'M_font':'',
'L_font':'',
'master_dir': wdir,
'output_dir': os.path.join(wdir, 'output'),
'output_fits': os.path.join(wdir, 'output.fits'),
'img_dat_dir': os.path.join(wdir, 'img.dat'),
'line_dat_dir': os.path.join(wdir, 'line.dat'),
'xanes_dat_dir':os.path.join(wdir, 'xanes.dat'),
'fly_dat_dir': os.path.join(wdir, 'fly.dat'),
'mda_dir': os.path.join(wdir, 'mda'),
'pca_dir': os.path.join(wdir, 'pca.dat'),
'XRFmaps_dir': os.path.join(wdir, 'img.dat'),
'XRFmaps_names':[''],
'XRFmaps_id':0,
'print_annotations':1,
'black_background':0,
'max_spec_channels':2048L,
'max_spectra':4096L,
'max_ICs':6L,
'standard_filenames': [],
'total_number_detectors': 1,
'max_no_processors_files': 1,
'max_no_processors_lines': -1,
'write_hdf': 0,
'use_fit': 0,
'quick_dirty': 0,
'xrf_bin': 0,
'nnls': 0,
'detector_to_start_with': 0,
'xanes_scan': 0,
'dataset_files_to_proc': ['all'],
'version': 0}
# Get info from maps_settings.txt
maps_settingsfile = 'maps_settings.txt'
try:
sfilepath = os.path.join(main_dict['master_dir'], maps_settingsfile)
f = open_file_with_retry(sfilepath, 'rt')
for line in f:
try:
if ':' in line:
slist = line.split(':')
tag = slist[0]
value = ''.join(slist[1:])
if tag == 'VERSION':
main_dict['version'] = float(value)
elif tag == 'DETECTOR_ELEMENTS':
main_dict['total_number_detectors'] = int(value)
elif tag == 'MAX_NUMBER_OF_FILES_TO_PROCESS':
main_dict['max_no_processors_files'] = int(value)
elif tag == 'MAX_NUMBER_OF_LINES_TO_PROCESS' or tag == 'MAX_NUMBER_OF_PROCESSORS_TO_USE':
main_dict['max_no_processors_lines'] = int(value)
elif tag == 'WRITE_HDF5':
main_dict['write_hdf'] = int(value)
elif tag == 'USE_FIT':
main_dict['use_fit'] = int(value)
elif tag == 'QUICK_DIRTY':
main_dict['quick_dirty'] = int(value)
elif tag == 'XRF_BIN':
main_dict['xrf_bin'] = int(value)
elif tag == 'NNLS':
main_dict['nnls'] = int(value)
elif tag == 'XANES_SCAN':
main_dict['xanes_scan'] = int(value)
elif tag == 'DETECTOR_TO_START_WITH':
main_dict['detector_to_start_with'] = int(value)
elif tag == 'BEAMLINE':
main_dict['beamline'] = str(value).strip()
elif tag == 'STANDARD':
main_dict['standard_filenames'].append(str(value).strip())
elif tag == 'DatasetFilesToProc':
main_dict['dataset_files_to_proc'] = str(value).replace('\\', '/').strip().split(',')
except:
logger.warning('Error parsing tag [%s] values [%s]', tag, value)
f.close()
except:
raise ValueError('maps_batch: Could not open maps_settings.txt.')
error_occured = False
me = maps_elements.maps_elements(logger)
info_elements = me.get_element_info()
maps_def = maps_definitions.maps_definitions(logger)
maps_conf = maps_def.set_maps_definitions(main_dict['beamline'], info_elements)
logger.info('main_dict beamline: %s maps_config version: %s', main_dict['beamline'], str(main_dict['version']))
select_beamline(main_dict, maps_conf, main_dict['beamline'], logger)
logger.info('total number of detectors: %s', main_dict['total_number_detectors'])
try:
# Section a converts mda to h5 and does ROI and ROI+ fits
if option_a_roi_plus > 0:
_option_a_(main_dict, maps_conf, logger)
option_g_avg_hdf = 1
except:
error_occured = True
logger.exception("Error occured for roi plus fitting")
spectra = None
try:
# Section b loads 8 largest h5 files, fits them and saves fit parameters
if option_b_extract_spectra > 0:
spectra = _option_b_(main_dict, maps_conf, maps_def, main_dict['total_number_detectors'], info_elements, logger)
except:
error_occured = True
logger.exception("Error occured for integrated spectra")
try:
# Section c converts mda to h5 files and does ROI/ROI+/FITS
if option_c_per_pixel > 0:
_option_c_(main_dict, logger)
# enable g option if we are performing c
option_g_avg_hdf = 1
except:
error_occured = True
logger.exception("Error occured for per pixel fitting")
# Section d extracts images
if option_d_image_extract > 0:
_option_d_(logger)
try:
if option_g_avg_hdf > 0:
avg_start = datetime.now()
# Generate average images
if main_dict['total_number_detectors'] > 1:
logger.info(' we are now going to create the maps_generate_average...')
n_channels = 2048
if option_b_extract_spectra > 0:
energy_channels = None
#energy_channels = spectra[0].calib['off'] + spectra[0].calib['lin'] * np.arange((n_channels), dtype=np.float)
else:
energy_channels = None
makemaps = maps_generate_img_dat.analyze(logger, info_elements, main_dict, maps_conf)
makemaps.generate_average_img_dat(main_dict, maps_conf, energy_channels)
avg_end = datetime.now()
total_roi_time = avg_end - avg_start
total_time_str = '\n\n %%%%%%%% avg total time = '+str(total_roi_time.total_seconds())+' %%%%%%% \n\n'
logger.info(total_time_str)
except:
error_occured = True
logger.exception("Error occured generating avg")
# Section e adds exchange information
if option_e_exchange_format > 0:
logger.info('Adding exchange information')
ch5 = maps_hdf5.h5(logger)
ch5.add_exchange(main_dict, maps_conf)
logger.info('time started: %s', time_started)
logger.info('time finished: %s', strftime("%Y-%m-%d %H:%M:%S", gmtime()))
if error_occured == True:
logger.error('Raise value error.')
raise ValueError('Error occured while processing fitting')
if __name__ == '__main__':
dirct = sys.argv[1]
print 'Processing directory ', dirct
global _log_name
_log_name = log_name = 'Job_' + datetime.strftime(datetime.now(), "%y_%m_%d_%H_%M_%S")
logger, fHandler = setup_logger(_log_name + '.log')
a = 0
b = 0
c = 0
d = 0
e = 0
g = 0
options, extraParams = getopt.getopt(sys.argv[2:], 'abcdefg', ['a', 'b', 'c', 'd', 'f', 'g', 'full'])
for opt, arg in options:
if opt in ('-a', '--a'):
a = 1
elif opt in ('-b', '--b'):
b = 1
elif opt in ('-c', '--c'):
c = 1
elif opt in ('-d', '--d'):
d = 1
elif opt in ('-e', '--e'):
e = 1
elif opt in ('-g', '--g'):
g = 1
elif opt in ('--full'):
a = 1
b = 1
c = 1
d = 1
e = 1
maps_batch(wdir=dirct, option_a_roi_plus=a, option_b_extract_spectra=b, option_c_per_pixel=c, option_d_image_extract=d, option_e_exchange_format=e, option_g_avg_hdf=g, logger=logger)
|
import time
import math
import random
import numpy
from diffusion_model import DiffusionModel
n = 30 # Number of nodes
m = 2 # Number of initially infected nodes
ep = 0.1 # Edge probability in ER model
p = 0.5 # Probability that an infected node infects a susceptible node
q = 0.2 # Probability that an infected node appears healthy
tsim = 10 # Time for simulation at each step
c = 0.8 # Exploration constant
ntrials = 50 # Number of trials
MIN_VALUE = -(n+1) # minimum = -n
def get_key(infected, resistant):
return (frozenset(infected), frozenset(resistant))
def search(model, search_tree):
observed_infected, resistant = model.get_observed_infected(), model.get_resistant()
if model.has_stabilized(observed_infected, resistant):
# Observed model has stabilized; take no action for one round
return None, 0
# Perform simulations and update the search tree
nsim = 0
start_time = time.clock()
while time.clock() - start_time < tsim:
for i in range(100):
simulate(model, observed_infected, resistant, search_tree)
nsim += 100
root = get_key(observed_infected, resistant)
max_value = MIN_VALUE
best_target = None
for target in search_tree[root]['candidates']:
value = search_tree[root]['candidates'][target]['value']
if value > max_value:
max_value = value
best_target = target
return best_target, nsim
def simulate(model, infected, resistant, search_tree):
if model.has_stabilized(infected, resistant):
return 0
state = get_key(infected, resistant)
candidates = set(range(n)).difference(resistant)
if state not in search_tree:
search_tree[state] = {'count': 0, 'candidates': {}}
for target in candidates:
search_tree[state]['candidates'][target] = {'count': 0, 'value': None}
return rollout(model, infected, resistant)
tree_node = search_tree[state]
best_target = None
if tree_node['count'] < len(candidates): # If there are unexplored branches, pick the first one
for target in candidates:
if tree_node['candidates'][target]['count'] == 0:
best_target = target
break
else: # Otherwise, pick the branch with the highest score
max_score = -1
for target in candidates:
target_branch = tree_node['candidates'][target]
score = float(target_branch['value']) / len(candidates) \
+ c * math.sqrt(math.log(tree_node['count'])/target_branch['count'])
if score > max_score:
max_score = score
best_target = target
best_target_branch = tree_node['candidates'][best_target]
infected_, resistant_, reward = model.simulate_step(infected, resistant, best_target)
value_ = reward + simulate(model, infected_, resistant_, search_tree)
tree_node['count'] += 1
best_target_branch['count'] += 1
if best_target_branch['value'] is None:
best_target_branch['value'] = value_
else:
best_target_branch['value'] = best_target_branch['value'] \
+ float(value_ - best_target_branch['value']) / best_target_branch['count']
return value_
def rollout(model, infected, resistant):
if model.has_stabilized(infected, resistant):
return 0
candidates = set(range(n)).difference(resistant)
target = random.choice(tuple(candidates))
infected_, resistant_, reward = model.simulate_step(infected, resistant, target)
return reward + rollout(model, infected_, resistant_)
def run_once():
model = DiffusionModel(n, m, ep, p, q)
search_tree = {}
nsim_total = 0
while not model.has_stabilized():
best_target, nsim = search(model, search_tree)
model.step(best_target)
nsim_total += nsim
return model.get_value(), nsim_total
def main():
values = []
for j in range(ntrials):
(infected_count, resistant_count, value), nsim_total = run_once()
print "Simulations: %d; infected: %d; resistant: %d; value: %d" % \
(nsim_total, infected_count, resistant_count, value)
values.append(value)
print "Mean value: %f; stdev: %f" % (numpy.mean(values), numpy.std(values))
if __name__ == "__main__":
main()
|
import os
import cgi
import datetime
import time
import copy
from validators import *
from uliweb.i18n import gettext_lazy as _
from uliweb.core.html import Buf, Tag, begin_tag, u_str
from widgets import *
from layout import *
from uliweb.utils.storage import Storage
from uliweb.utils import date
from uliweb.utils.common import request_url, safe_str, get_uuid
DEFAULT_FORM_CLASS = 'form'
REQUIRED_CAPTION = '*'
REQUIRED_CAPTION_AFTER = True
DEFAULT_ENCODING = 'utf-8'
DEFAULT_LABEL_DELIMETER = ':'
ERR_CONVERT = _("Can't convert %r to %s.")
class ReservedWordError(Exception):pass
class RuleNotFound(Exception):pass
__id = 0
def capitalize(s):
t = s.split('_')
return ' '.join([x.capitalize() for x in t])
def get_id():
global __id
__id += 1
return __id
class D(dict):
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
return None
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def check_reserved_word(f):
if f in dir(Form):
raise ReservedWordError(
"Cannot define property using reserved word '%s'. " % f
)
class FieldProxy(object):
def __init__(self, form, field):
self.form = form
self.field = field
@property
def label(self):
return self.get_label()
def get_label(self, _class=None):
if self.field.__class__ is BooleanField:
delimeter = False
else:
delimeter = True
return self.field.get_label(_class=_class, delimeter=delimeter)
@property
def help_string(self):
return self.field.get_help_string(_class='description')
@property
def error(self):
return self.form.errors.get(self.field.field_name, '')
@property
def html(self):
default = self.field.default
return self.field.html(self.form.data.get(self.field.field_name, default), self.form.ok)
def __str__(self):
return self.html
def _get_data(self):
return self.form.data.get(self.field.name, self.field.default)
def _set_data(self, value):
self.form.data[self.field.name] = value
data = property(_get_data, _set_data)
class BaseField(object):
default_build = Text
field_css_class = ''
default_validators = []
default_datatype = None
creation_counter = 0
type_name = 'str'
def __init__(self, label='', default=None, required=False, validators=None,
name='', html_attrs=None, help_string='', build=None, datatype=None,
multiple=False, idtype=None, static=False, placeholder='',
hidden=False, rules=None, range=False, **kwargs):
self.label = label
self._default = default
self.validators = validators or []
self.name = name
self.required = required
self.kwargs = kwargs
self.html_attrs = html_attrs or {}
self.datatype = datatype or self.default_datatype
self.idtype = idtype
self.static = static
self.hidden = hidden
self.rules = rules or {}
self.range = range
_cls = ''
if '_class' in self.html_attrs:
_cls = '_class'
elif 'class' in self.html_attrs:
_cls = 'class'
if not _cls:
self.html_attrs['class'] = ' '.join([self.field_css_class])
self.placeholder = placeholder
self.multiple = multiple or range
self.build = build or self.default_build
self.help_string = help_string
BaseField.creation_counter += 1
self.creation_counter = BaseField.creation_counter
if 'id' in self.kwargs:
self._id = self.kwargs.pop('id')
else:
self._id = None
def _get_http_attrs(self):
if self.placeholder:
self.html_attrs['placeholder'] = self.placeholder
def _get_default(self):
return self._default
default = property(_get_default)
def clone(self):
b = object.__new__(self.__class__)
b.__dict__ = copy.deepcopy(self.__dict__)
return b
def to_python(self, data):
"""
Convert a data to python format.
"""
if data is None:
return data
if self.datatype:
return self.datatype(data)
else:
return data
def html(self, data='', py=True):
"""
Convert data to html value format.
"""
if py:
value = self.to_html(data)
else:
value = data
if self.static:
return str('<span class="value">%s</span>' % safe_str(value))
else:
if self.hidden:
build = Hidden
else:
build = self.build
self._get_http_attrs()
return str(build(name=self.name, value=value, id=self.id, **self.html_attrs))
def get_label(self, delimeter=True, label=None, **kwargs):
if label is None:
if self.label is None:
label = capitalize(self.name)
else:
label = self.label
if not label:
return ''
if delimeter and DEFAULT_LABEL_DELIMETER:
label += DEFAULT_LABEL_DELIMETER
if self.required and not self.static:
if REQUIRED_CAPTION_AFTER:
label += str(Tag('span', REQUIRED_CAPTION, _class='field_required'))
else:
label = str(Tag('span', REQUIRED_CAPTION, _class='field_required')) + label
return str(Tag('label', label, _for=self.id, newline=False, **kwargs))
def get_help_string(self, **kwargs):
if self.help_string:
return safe_str(self.help_string)
else:
return ''
@property
def id(self):
if self._id:
return self._id
else:
if self.idtype == 'name':
id = 'field_' + self.name
elif self.idtype:
id = 'field_' + str(get_id())
else:
id = None
return id
def parse_data(self, request, all_data):
if not isinstance(request, (tuple, list)):
request = [request]
for r in request:
v = None
if self.multiple:
if hasattr(r, 'getlist'):
func = getattr(r, 'getlist')
else:
func = getattr(r, 'getall')
v = all_data[self.name] = func(self.name)
else:
v = all_data[self.name] = r.get(self.name, None)
if v is not None:
break
def get_data(self, all_data):
return all_data.get(self.name, None)
def to_html(self, data):
if data is None:
return ''
return u_str(data)
def to_json(self, value=None):
d = {'name':self.name, 'type':self.type_name, 'label':self.label}
if hasattr(self, 'choices'):
choices = self.get_choices(value)
else:
choices = []
if choices:
d['type'] = 'select'
d['choices'] = choices
if self.placeholder:
d['placeholder'] = self.placeholder
if self.html_attrs:
d['attrs'] = self.html_attrs
if self.range:
d['range'] = self.range
if self.help_string:
d['help_string'] = self.help_string
if self.required:
d['required'] = True
return d
def validate(self, data, all_data=None):
"""
if 'rule' in kwargs, then validate extra rules
e.g.:
rule= {'required':True, 'minlength':6}
"""
all_data = all_data or {}
if hasattr(data, 'stream'):
data.file = data.stream
if hasattr(data, 'file'):
if data.file:
v = data.filename
else:
raise Exception, 'Unsupport type %s' % type(data)
else:
v = data
msg = TEST_NOT_EMPTY()(v)
if self.required:
if msg:
return False, msg
else:
if msg:
return True, self.default
try:
if isinstance(data, list):
v = []
for i in data:
v.append(self.to_python(i))
data = v
else:
data = self.to_python(data)
except:
return False, unicode(ERR_CONVERT) % (data, self.__class__.__name__)
for v in self.get_validators():
msg = v(data, all_data)
if msg:
return False, msg
return True, data
def get_validators(self):
for v in self.default_validators + self.validators:
yield v
def __property_config__(self, form_class, field_name):
self.form_class = form_class
self.field_name = field_name
if not self.name:
self.name = field_name
self.init_rules(self.rules)
def init_rules(self, rules):
#initial rules
for k, v in rules.items():
rule_message = ''
x = k.split(':')
if isinstance(v, (tuple, list)):
rule_value = v[0]
rule_message = v[1]
else:
rule_value = v
if len(x) == 1:
front = True
end = True
rule_name = k
else:
rule_name = x[0]
if x[1] == 'front':
front = True
end = False
else:
front = False
end = True
if end:
if rule_name == 'required':
self.required = True
validator_cls = rules_mapping.get(rule_name)
if validator_cls:
validator = validator_cls(rule_value, message=rule_message, field=self)
self.validators.append(validator)
if not rule_message:
rule_message = validator.get_message()
else:
raise RuleNotFound('Rule is not found.')
if front:
r = self.form_class.front_rules['rules'].setdefault(self.name, {})
r[rule_name] = rule_value
if rule_message:
m = self.form_class.front_rules['messages'].setdefault(self.name, {})
m[rule_name] = rule_message
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
else:
return FieldProxy(model_instance, self)
def __set__(self, model_instance, value):
raise Exception('Virtual property is read-only')
class StringField(BaseField):
default_datatype = str
def __init__(self, label='', default='', required=False, validators=None, name='', html_attrs=None, help_string='', build=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
def to_python(self, data):
"""
Convert a data to python format.
"""
if data is None:
return ''
if isinstance(data, unicode):
data = data.encode(DEFAULT_ENCODING)
else:
data = str(data)
return data
class UnicodeField(BaseField):
type_name = 'unicode'
def __init__(self, label='', default='', required=False, validators=None, name='', html_attrs=None, help_string='', build=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
def to_python(self, data):
"""
Convert a data to python format.
"""
if data is None:
return u''
if isinstance(data, unicode):
return data
else:
return unicode(data, DEFAULT_ENCODING)
class PasswordField(StringField):
default_build = Password
type_name = 'password'
class HiddenField(StringField):
default_build = Hidden
type_name = 'hidden'
class ListField(StringField):
type_name = 'list'
def __init__(self, label='', default=None, required=False, validators=None, name='', delimeter=', ', html_attrs=None, help_string='', build=None, datatype=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
self.delimeter = delimeter
self._default = default or []
self.datatype = datatype
def to_python(self, data):
import re
if issubclass(self.build, TextArea):
result = [x for x in data.splitlines()]
else:
result = [x for x in re.split('[%s]+' % self.delimeter, data)]
if self.datatype:
if self.datatype is str:
result = map(safe_str, result)
else:
result = map(self.datatype, result)
return result
def to_html(self, data):
if issubclass(self.build, TextArea):
return '\n'.join([u_str(x) for x in data])
else:
return self.delimeter.join([u_str(x) for x in data])
class TextField(StringField):
default_build = TextArea
type_name = 'text'
def __init__(self, label='', default='', required=False, validators=None, name='', html_attrs=None, help_string='', build=None, rows=4, cols=None, convert_html=False, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
self.rows = rows
self.cols = cols
self.convert_html = convert_html
def html(self, data='', py=True):
value = data
#add convert '&' to '&' 2011-8-20 by limodou
if self.convert_html:
value = value.replace('&', '&')
return str(self.build(value, id='field_'+self.name, name=self.name, rows=self.rows, cols=self.cols, **self.html_attrs))
def to_python(self, data):
"""
Convert a data to python format.
"""
if data is None:
return ''
if isinstance(data, self.datatype):
return data
if self.datatype is unicode:
return unicode(data, DEFAULT_ENCODING)
else:
return data.encode(DEFAULT_ENCODING)
class TextLinesField(TextField):
type_name = 'lines'
def __init__(self, label='', default=None, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, datatype=str, rows=4, cols=None, **kwargs):
TextField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, rows=rows, cols=cols, **kwargs)
self._default = default or []
self.datatype = datatype
def to_python(self, data):
return [self.datatype(x) for x in data.splitlines()]
def to_html(self, data):
return '\n'.join([u_str(x) for x in data])
def html(self, data='', py=True):
if data is None:
value = ''
else:
value = '\n'.join([u_str(x) for x in data])
#add convert '&' to '&' 2011-8-20 by limodou
if self.convert_html:
value = value.replace('&', '&')
return str(self.build(value, id='field_'+self.name, name=self.name, rows=self.rows, cols=self.cols, **self.html_attrs))
class BooleanField(BaseField):
default_build = Checkbox
field_css_class = 'checkbox'
type_name = 'bool'
def __init__(self, label='', default=False, name='', html_attrs=None, help_string='', build=None, required=False, **kwargs):
BaseField.__init__(self, label=label, default=default, required=False, validators=None, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
def to_python(self, data):
if data.lower() in ('on', 'true', 'yes', 'ok'):
return True
else:
return False
def html(self, data, py=True):
if data:
return str(self.build(checked=None, id='field_'+self.name, name=self.name, **self.html_attrs))
else:
return str(self.build(id='field_'+self.name, name=self.name, **self.html_attrs))
def to_html(self, data):
if data is True:
return 'on'
else:
return ''
def validate(self, data, all_data=None):
'''
None data means False, so BooleanField need to override validate()
'''
if data is None:
return True, False
else:
return super(BooleanField, self).validate(data, all_data)
class IntField(BaseField):
default_build = Number
type_name = 'int'
def __init__(self, label='', default=0, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
def to_python(self, data):
return int(float(data))
def to_html(self, data):
if data is None:
return ''
return str(data)
class FloatField(BaseField):
type_name = 'float'
def __init__(self, label='', default=0.0, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
def to_python(self, data):
return float(data)
def to_html(self, data):
if data is None:
return ''
return str(data)
class SelectField(BaseField):
default_build = Select
type_name = 'select'
def __init__(self, label='', default=None, choices=None, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, empty='', size=10, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
self.choices = choices or []
self.empty = empty
self.size = size
if self.multiple:
self._default = default or []
else:
self._default = default or None
def get_choices(self, value=None):
if callable(self.choices):
return self.choices()
else:
return self.choices
def html(self, data='', py=True):
choices = self.get_choices(data)[:]
if (self.empty is not None) and (not self.multiple):
group = False
if choices:
if len(choices[0]) > 2:
group = True
c = [(x[1], x[2]) for x in choices]
else:
c = choices
if (not self.default in dict(c)):
if group:
choices.insert(0, (choices[0][0], '', self.empty))
else:
choices.insert(0, ('', self.empty))
return str(self.build(choices, data, id=self.id, name=self.name, multiple=self.multiple, size=self.size, **self.html_attrs))
def to_json(self, value=None):
d = {'name': self.name, 'type': self.type_name, 'label': self.label,
'choices': self.get_choices(value), 'multiple':self.multiple,
'placeholder': self.placeholder, 'attrs':self.html_attrs}
if 'data-url' in self.html_attrs:
d['data-url'] = self.html_attrs['data-url']
elif 'url' in self.html_attrs:
d['data-url'] = self.html_attrs['url']
return d
class RadioSelectField(SelectField):
default_build = RadioSelect
type_name = 'radios'
class CheckboxSelectField(SelectField):
default_build = CheckboxSelect
type_name = 'checkboxes'
def __init__(self, label='', default=None, choices=None, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, multiple=None, **kwargs):
multiple = multiple if multiple is not None else True
SelectField.__init__(self, label=label, default=default, choices=choices, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, multiple=multiple, **kwargs)
class FileField(BaseField):
default_build = File
type_name = 'file'
def __init__(self, label='', upload_to=None, upload_to_sub=None, **kwargs):
BaseField.__init__(self, label=label, **kwargs)
self.upload_to = upload_to
self.upload_to_sub = upload_to_sub
def to_python(self, data):
d = D({})
d['filename'] = os.path.basename(data.filename)
d['file'] = data.file
data.file.seek(0, os.SEEK_END)
d['size'] = data.file.tell()
data.file.seek(0, os.SEEK_SET)
return d
def html(self, data, py=True):
return str(self.build(name=self.name, id=self.id, **self.html_attrs))
class ImageField(FileField):
type_name = 'image'
def __init__(self, label='', default='', required=False, validators=None, name='', html_attrs=None, help_string='', build=None, size=None, **kwargs):
FileField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
self.size = size
self.validators.append(TEST_IMAGE(next=TEST_IMAGE_SIZE(size)))
class _BaseDatetimeField(StringField):
time_func = 'to_date'
def __init__(self, label='', default=None, required=False, validators=None, name='', html_attrs=None, help_string='', build=None, format=None, **kwargs):
BaseField.__init__(self, label=label, default=default, required=required, validators=validators, name=name, html_attrs=html_attrs, help_string=help_string, build=build, **kwargs)
self.format = format
def _get_default(self):
if self._default == 'now':
return getattr(date, self.time_func)(date.now())
else:
return self._default
default = property(_get_default)
def to_python(self, data):
try:
return getattr(date, self.time_func)(data, format=self.format)
except ValueError:
raise Exception, _("The date is not a valid date format.")
def to_html(self, data):
if data:
return date.to_string(data, timezone=False)
else:
return ''
class DateField(_BaseDatetimeField):
field_css_class = 'field_date'
type_name = 'date'
class TimeField(_BaseDatetimeField):
field_css_class = 'field_time'
time_func = 'to_time'
type_name = 'time'
class DateTimeField(_BaseDatetimeField):
field_css_class = 'field_datetime'
time_func = 'to_datetime'
type_name = 'datetime'
class FormMetaclass(type):
def __init__(cls, name, bases, dct):
cls.fields = {}
cls.fields_list = []
cls.rules = dct.get('rules', {})
for base in bases[:1]:
if hasattr(base, 'fields'):
for name, field in base.fields.iteritems():
new_field = field.clone()
cls.add_field(name, new_field)
fields_list = [(k, v) for k, v in dct.items() if isinstance(v, BaseField)]
fields_list.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter))
for (field_name, obj) in fields_list:
cls.add_field(field_name, obj)
class FormBuild(object):
def __str__(self):
buf = []
for x in ['pre_html', 'begin', 'body', 'buttons_line', 'end', 'post_html']:
t = getattr(self, x)
if t:
buf.append(str(t))
return '\n'.join(buf)
fields_mapping = {
'str':StringField,
'string':StringField,
'select':SelectField,
'text':TextField,
'unicode':UnicodeField,
'lines':TextLinesField,
'password':PasswordField,
'hidden':HiddenField,
'int':IntField,
'list':ListField,
'radios':RadioSelectField,
'image':ImageField,
'float':FloatField,
'file':FileField,
'bool':BooleanField,
'checkboxes':CheckboxSelectField,
'date':DateField,
'time':TimeField,
'datetime':DateTimeField,
}
rules_mapping = {
'required':TEST_NOT_EMPTY,
'email':TEST_EMAIL,
'url':TEST_URL,
'equalTo':TEST_EQUALTO,
'in':TEST_IN,
'image':TEST_IMAGE,
'minlength':TEST_MINLENGTH,
'maxlength':TEST_MAXLENGTH,
'rangelength':TEST_RANGELENGTH,
'min':TEST_MIN,
'max':TEST_MAX,
'range':TEST_RANGE,
'date':TEST_DATE,
'datetime':TEST_DATETIME,
'time':TEST_TIME,
'number':TEST_NUMBER,
'digits':TEST_DIGITS,
}
class Form(object):
__metaclass__ = FormMetaclass
layout_class = BootstrapLayout
layout = None
layout_class_args = {}
fieldset = False
form_action = ''
form_method = 'POST'
form_buttons = None
form_title = None
form_class = None
form_id = 'form_' + get_uuid()[:5]
rules = {}
front_rules = {'rules':{}, 'messages':{}}
def __init__(self, action=None, method=None, buttons=None,
validators=None, html_attrs=None, data=None, errors=None,
idtype='name', title='', vars=None, layout=None,
id=None, _class='', **kwargs):
self.form_action = action or self.form_action or request_url()
self.form_method = method or self.form_method
self.form_title = title or self.form_title
self.form_class = _class or self.form_class
self.kwargs = kwargs
buttons = buttons or self.form_buttons or [str(Button(value=_('Submit'), _class="btn btn-primary", name="submit", type="submit"))]
if buttons:
if isinstance(buttons, (tuple, list)):
self._buttons = list(buttons)
else:
self._buttons = [buttons]
self.validators = validators or []
self.html_attrs = html_attrs or {}
if '_class' in self.html_attrs:
self.html_attrs['class'] = self.html_attrs.pop('_class')
self.idtype = idtype
self.layout = layout or self.layout
self.vars = vars
for name, obj in self.fields_list:
obj.idtype = self.idtype
if self.form_class:
self.html_attrs['class'] = self.form_class# + ' ' + DEFAULT_FORM_CLASS
if 'class' not in self.html_attrs:
self.html_attrs['class'] = ''
self.form_class = self.html_attrs.get('class')
self.form_id = id or self.html_attrs.get('id') or self.form_id
self.bind(data or {}, errors or {})
self.__init_validators()
self.ok = True
@classmethod
def add_field(cls, field_name, field, attribute=False):
if isinstance(field, BaseField):
check_reserved_word(field_name)
cls.fields[field_name] = field
rules = cls.rules.get(field_name)
if rules:
field.rules.update(rules)
field.__property_config__(cls, field_name)
if attribute:
setattr(cls, field_name, field)
cls.fields_list.append((field_name, field))
def __init_validators(self):
for k, obj in self.fields.items():
func = getattr(self, 'validate_%s' % obj.field_name, None)
if func and callable(func):
obj.validators.insert(0, func)
func = getattr(self, 'form_validate', None)
if func and callable(func):
self.validators.append(func)
def validate(self, *data):
old_data = self.data.copy()
all_data = {}
for k, v in self.fields.items():
#skip static field
if v.static: continue
v.parse_data(data, all_data)
errors = D({})
new_data = {}
#gather all fields
for field_name, field in self.fields.items():
if field.static: continue
new_data[field_name] = field.get_data(all_data)
#validate and gather the result
# result = D({})
result = D(new_data.copy())
for field_name, field in self.fields.items():
if field.static: continue
flag, value = field.validate(new_data[field_name], result)
if not flag:
if isinstance(value, dict):
errors.update(value)
else:
errors[field_name] = value
else:
result[field_name] = value
if not errors and self.validators:
#validate global
for v in self.validators:
r = v(result)
if r:
errors.update(r)
if errors:
self.ok = False
self.errors.update(errors)
self.data = new_data
else:
self.ok = True
self.errors = {}
self.data = result
#the data of static field will be put into parsed data
for k, v in self.fields.items():
if v.static and k in old_data:
self.data[k] = old_data[k]
return self.ok
def __str__(self):
return self.html()
def _repr_html_(self):
return self.html()
@property
def form_begin(self):
args = self.html_attrs.copy()
args['action'] = self.form_action
args['method'] = self.form_method
for field_name, field in self.fields.items():
if isinstance(field, FileField):
args['enctype'] = "multipart/form-data"
break
return begin_tag('form', **args)
@property
def form_end(self):
return '</form>\n'
def get_buttons(self):
return self._buttons
def bind(self, data=None, errors=None):
if data is not None:
self.data = data
if errors is not None:
self.errors = errors
def html(self):
cls = get_form_layout_class(self.layout_class)
layout = cls(self, self.layout, **self.layout_class_args)
pre_html = self.pre_html() if hasattr(self, 'pre_html') else ''
body = layout.html()
post_html = self.post_html() if hasattr(self, 'post_html') else ''
return ''.join([str(x) for x in [pre_html,body,post_html]])
@property
def build(self):
cls = get_form_layout_class(self.layout_class)
layout = cls(self, self.layout, **self.layout_class_args)
result = FormBuild()
result.pre_html = self.pre_html() if hasattr(self, 'pre_html') else ''
result.begin = layout.begin()
result.body = layout.hiddens() + layout.body()
result.buttons = layout.buttons()
result.buttons_line = layout.buttons_line()
result.end = layout.end()
result.post_html = self.post_html() if hasattr(self, 'post_html') else ''
return result
def get_json(self):
return {
'fields': self.get_fields(),
'layout': self.get_layout(),
'data': self.data,
'rules': self.front_rules['rules'],
'messages': self.front_rules['messages'],
}
def get_fields(self):
s = []
for f in self.fields_list:
f[1].name = f[0]
d = f[1].to_json(self.data.get(f[0]))
s.append(d)
return s
def get_layout(self):
if self.layout:
return self.layout
layout = []
for x in self.fields_list:
layout.append(x[0])
layout = [tuple(layout)]
return layout
def get_field_cls(type, default=None):
return fields_mapping.get(type, default or StringField)
def make_field(type, **kwargs):
"""
According field information creating Field instance
"""
cls = get_field_cls(type)
return cls(**kwargs)
def make_form(fields=None, layout=None, layout_class=None, base_class=None,
get_form_field=None, name=None, rules=None, **kwargs):
"""
Make a from according dict data:
{'fields':[
{'name':'name', 'type':'str', 'label':'label,
'rules':{
'required':
'email'
'required:back|front' #back means server side, front means front side
}
...},
...
],
#layout_class should be defined in settings.ini, just like
#[FORM_LAYOUT_CLASSES]
#bs3 = '#{appname}.form_help.Bootstrap3Layout'
#is also can be a Layout Class
#default is BootstrapLayout
'layout_class':'bs3',
'layout':{
'rows':[
'-- legend title --',
'field_name',
['group_fieldname', 'group_fieldname']
{'name':'name', 'colspan':3}
],
}
'base_class':'form class if not existed, then use Form'
}
get_form_field is a callback function, used to defined customized field class
if has name then it'll be cached
"""
from uliweb.utils.sorteddict import SortedDict
get_form_field = get_form_field or (lambda name, f:None)
#make fields
props = SortedDict({})
for f in fields or []:
if isinstance(f, BaseField):
props[f.name] = get_form_field(f.name, f) or f
else:
props[f['name']] = get_form_field(f['name'], f) or make_field(**f)
#set other props
if layout:
props['layout'] = layout
if layout_class:
props['layout_class'] = layout_class
if rules:
props['rules'] = rules
layout_class_args = kwargs.pop('layout_class_args', None)
if layout_class_args:
props['layout_class_args'] = layout_class_args
cls = type(name or 'MakeForm_', (base_class or Form,), props)
return cls
def get_form(formcls):
"""
get form class according form class path or form class object
"""
from uliweb.utils.common import get_configrable_object
return get_configrable_object(formcls, 'FORMS', Form)
def get_form_layout_class(form_layout_class):
"""
Get form layout class according form_layout_class path or layout class object
"""
from uliweb.utils.common import get_configrable_object
return get_configrable_object(form_layout_class, 'FORM_LAYOUT_CLASSES', Layout)
|
from __future__ import division, print_function, unicode_literals, absolute_import
import hycohanz as hfss
raw_input('Press "Enter" to connect to HFSS.>')
[oAnsoftApp, oDesktop] = hfss.setup_interface()
raw_input('Press "Enter" to create a new project.>')
oProject = hfss.new_project(oDesktop)
raw_input('Press "Enter" to insert a new DrivenModal design named HFSSDesign1.>')
oDesign = hfss.insert_design(oProject, "HFSSDesign1", "DrivenModal")
raw_input('Press "Enter" to set the active editor to "3D Modeler" (The default and only known correct value).>')
oEditor = hfss.set_active_editor(oDesign)
raw_input('Press "Enter" to insert some circle properties into the design.>')
hfss.add_property(oDesign, "xcenter", hfss.Expression("1m"))
hfss.add_property(oDesign, "ycenter", hfss.Expression("2m"))
hfss.add_property(oDesign, "zcenter", hfss.Expression("3m"))
hfss.add_property(oDesign, "diam", hfss.Expression("1m"))
raw_input('Press "Enter" to draw a circle using the properties.>')
hfss.create_circle(oEditor, hfss.Expression("xcenter"),
hfss.Expression("ycenter"),
hfss.Expression("zcenter"),
hfss.Expression("diam")/2)
raw_input('Press "Enter" to assign a PerfectE boundary condition on the circle.>')
hfss.assign_perfect_e(oDesign, "PerfectE1", [10])
raw_input('Press "Enter" to quit HFSS.>')
hfss.quit_application(oDesktop)
del oEditor
del oDesign
del oProject
del oDesktop
del oAnsoftApp
|
import logging
import nltk
from nltk.corpus import wordnet
__author__ = 'ilov3'
logger = logging.getLogger(__name__)
def wordnet_pos_code(tag):
if tag in ['NN', 'NNS', 'NNP', 'NNPS', ]:
return wordnet.NOUN
elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', ]:
return wordnet.VERB
elif tag in ['JJ', 'JJR', 'JJS', ]:
return wordnet.ADJ
elif tag in ['RB', 'RBR', 'RBS', ]:
return wordnet.ADV
else:
return None
def get_nltk_stop_words():
return set(nltk.corpus.stopwords.words('english'))
def get_most_common_words(text, words_count, remove_stopwords=False):
# NLTK's default stopwords
stopwords = get_nltk_stop_words()
# Making a list of tagged words
tagged_words = nltk.word_tokenize(text)
tagged_words = nltk.pos_tag(tagged_words)
# Make all words lower case
# tagged_words = [(tagged_word[0].lower(), tagged_word[1]) for tagged_word in tagged_words]
# Remove single-character tokens (mostly punctuation)
tagged_words = [tagged_word for tagged_word in tagged_words if len(tagged_word[0]) > 1]
# Remove numbers
tagged_words = [tagged_word for tagged_word in tagged_words if not tagged_word[0].isnumeric()]
# Remove stopwords
if remove_stopwords:
tagged_words = [tagged_word for tagged_word in tagged_words if tagged_word[0] not in stopwords]
# Dark magic
lemmatizer = nltk.stem.WordNetLemmatizer()
words = []
for tagged_word in tagged_words:
pos = wordnet_pos_code(tagged_word[1])
# Ignoring all words, except nouns, verbs, adjectives and adverbs
if pos is not None:
words.append((lemmatizer.lemmatize(tagged_word[0], pos=pos), tagged_word[1]))
# Calculate frequency distribution
fdist = nltk.FreqDist(words)
# Return top % words_count % words
res = []
for word, frequency in fdist.most_common(words_count):
word_dict = {}
word_dict['word'] = word
word_dict['count'] = frequency
res.append(word_dict)
return res
|
from nltk.tree import ParentedTree
class ParseTree(ParentedTree):
def get_hash(self, T = None):
if T is None:
T = self
if isinstance(T, ParseTree):
return T.label() + '(' + self.get_hash(T[0]) + ',' + self.get_hash(T[1]) + ')'
else:
return str(len(T))
def __deepcopy__(self, memo = None):
return self.copy(True)
def count_left_of(self, pos):
if not pos:
return 0
if pos[-1] == 1:
if isinstance(self[pos[:-1]][0], ParseTree):
add = len(self[pos[:-1]][0].leaves())
else:
add = 1
else:
add = 0
return add + self.count_left_of(pos[:-1])
def count_right_of(self, pos):
if not pos:
return 0
if pos[-1] == 0:
if isinstance(self[pos[:-1]][1], ParseTree):
add = len(self[pos[:-1]][1].leaves())
else:
add = 1
else:
add = 0
return add + self.count_right_of(pos[:-1])
def get_first_left(self, pos):
if not pos:
return ()
if pos[-1] == 1:
return pos[:-1] + [0]
else:
return self.get_first_left(pos[:-1])
def get_first_right(self, pos):
if not pos:
return ()
if pos[-1] == 0:
return pos[:-1] + [1]
else:
return self.get_first_right(pos[:-1])
|
import genpy
from rospy.rostime import Time, Duration
from python_qt_binding.QtCore import QTranslator
from abstract_item import AbstractItem
from helper_functions import prepare_number_for_representation, MAXIMUM_OFFLINE_TIME, ROUND_DIGITS
class ConnectionItem(AbstractItem):
"""
A ConnectionItem reresents the connection between a publisher and a subscriber and the topic they are publishing / listening on
"""
def __init__(self, logger, seuid, first_message, parent=None):
"""
Initializes the ConnectionItem.
:param seuid: the seuid of the item
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param type: the type of the item
:type type: str
:param parent: the parent-item
:type parent: AbstractItem
"""
AbstractItem.__init__(self, logger, seuid, parent)
self.__parent = parent
self._type = "connection"
self.add_keys=["dropped_msgs", "traffic"]
self.avg_keys=["period_mean", "period_stddev", "stamp_age_mean", "stamp_age_stddev", "bandwidth", "frequency"]
self.max_keys=["period_max", "stamp_age_max"]
self._attributes = []
self._attributes.extend(["dropped_msgs", "traffic",
"period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max", "bandwidth", "frequency"])
for item in self._attributes:
self._add_data_list(item)
for item in self._attributes:
self._rated_attributes.append(item + ".actual_value")
self._rated_attributes.append(item + ".expected_value")
self._rated_attributes.append(item + ".state")
for item in self._rated_attributes:
self._add_rated_data_list(item)
self._logger.log("info", Time.now(), seuid, "Created a new ConnectionItem")
self.show_as_subscriber = False
self.tree_item1 = None
self.tree_item2 = None
def aggregate_data(self, period):
"""
:param period: The amount in seconds over which the data should be aggregated.
:return:
"""
values = {}
for key in self._attributes:
values[key] = 0
entries = self.get_items_younger_than(Time.now() - (Duration(secs=period) if int(Duration(secs=period).to_sec()) <= int(Time.now().to_sec()) else Time(0) ))
length = len(entries["window_stop"]) if entries["window_stop"] else 0
if length > 0:
for key in self.add_keys:
for i in range(0, length):
values[key] += entries[key][i]
for key in self.max_keys:
if type(entries[key][-1]) == genpy.rostime.Time or type(entries[key][-1]) == genpy.rostime.Duration:
for i in range(0, length):
if entries[key][i].to_sec() > values[key]:
values[key] = entries[key][i].to_sec()
else:
for i in range(0, length):
if entries[key][i] > values[key]:
values[key] = entries[key][i]
for key in self.avg_keys:
if type(entries[key][0]) is genpy.rostime.Time or type(entries[key][0]) is genpy.rostime.Duration:
for i in range(0, length):
values[key] += entries[key][i].to_sec()
else:
for i in range(0, length):
values[key] += entries[key][i]
values[key] = values[key] / length
return values
def execute_action(self, action):
"""
Not senseful, Connection cannot execute actions.
:param action: action to be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns the detailed data of the ConnectionItem.
:returns: str
"""
data_dict = self.get_latest_data()
if Time.now() - data_dict["window_stop"] > Duration(secs=5):
return "No recent data"
content = "<p class=\"detailed_data\">"
content += self.get_erroneous_entries()
if "frequency" in self._attributes:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " <br>"
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) + " " \
+ self.tr("dropped_msgs_unit") + " <br>"
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(data_dict["bandwidth"]) + " " \
+ " " + self.tr("bandwidth_unit") + " <br>"
content += self.tr("period_mean") + ": " + prepare_number_for_representation(data_dict["period_mean"]) \
+ " " + self.tr("period_mean_unit") + " <br>"
content += self.tr("period_stddev") + ": " + prepare_number_for_representation(data_dict["period_stddev"]) \
+ " " + self.tr("period_stddev_unit") + " <br>"
content += self.tr("period_max") + ": " + prepare_number_for_representation(data_dict["period_max"]) + " " \
+ self.tr("period_max_unit") + " <br>"
content += self.tr("stamp_age_mean") + ": " + prepare_number_for_representation(data_dict["stamp_age_mean"]) \
+ " " + self.tr("stamp_age_mean_unit") + " <br>"
content += self.tr("stamp_age_stddev") + ": " + prepare_number_for_representation(data_dict["stamp_age_stddev"]) \
+ " " + self.tr("stamp_age_stddev_unit") + " <br>"
content += self.tr("stamp_age_max") + ": " + prepare_number_for_representation(data_dict["stamp_age_max"]) \
+ " " + self.tr("stamp_age_max_unit") + " <br>"
content += "</p>"
return content
def get_plotable_items(self):
"""
Returns items for the plot.
:returns: str[]
"""
return ["dropped_msgs", "bandwidth", "frequency", "period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
def get_short_data(self):
"""
Returns a shortend version of the item data.
:returns: data of the item
:rtype: str
"""
data_dict = self.get_latest_data()
if data_dict["window_stop"] == Time(0):
return "No data yet"
elif (Time.now() - data_dict["window_stop"]) > Duration(MAXIMUM_OFFLINE_TIME):
# last entry was more than MAXIMUM_OFFLINE_TIME ago, it could be offline!
return "No data since " + prepare_number_for_representation(Time.now() - data_dict["window_stop"]) \
+ " seconds"
content = ""
if data_dict["state"] is "error":
content += self.get_erroneous_entries_for_log()
else:
content += self.tr("frequency") + ": " + prepare_number_for_representation(data_dict["frequency"]) \
+ " " + self.tr("frequency_unit") + " - "
content += self.tr("bandwidth") + ": " + prepare_number_for_representation(
data_dict["bandwidth"]) + " " \
+ self.tr("bandwidth_unit") + " - "
content += self.tr("dropped_msgs") + ": " + prepare_number_for_representation(data_dict["dropped_msgs"]) \
+ " " + self.tr("dropped_msgs_unit")
return content
def get_list_items(self):
return []
def get_time_items(self):
return ["period_mean", "period_stddev", "period_max", "stamp_age_mean",
"stamp_age_stddev", "stamp_age_max"]
|
from platform import node
class Node(object):
def __init__(self, data, next):
self.data = data
self.next = next
class SingleList(object):
def __init__(self, values = None):
self.head = None
self.tail = None
self.len = 0
if values != None:
for val in values:
self.append(val)
def __str__(self):
current_node = self.head
fullListString = "["
while current_node is not None:
fullListString = fullListString + str(current_node.data) + "->"
current_node = current_node.next
fullListString = fullListString + "None]"
return fullListString
def __iter__(self):
return SingleListIter(self.head)
def __len__(self):
return self.len
def append(self, data):
node = Node(data, None)
if self.head is None:
self.head = self.tail = node
else:
self.tail.next = node
self.tail = node
self.len += 1
'''
while currentNode.data < node.data: # until currentNode is equal or greater than the new node
currentNode = currentNode.next
if currentNode.data == node.data:
print("Don't add the node.")
else: # currentNode > new node
self.len += 1
self.tail.next = node
self.tail = node
'''
def remove(self, node_value):
current_node = self.head
previous_node = None
while current_node is not None:
if current_node.data == node_value:
# if this is the first node (head)
if previous_node is not None:
previous_node.next = current_node.next
else:
self.head = current_node.next
# needed for the next iteration
previous_node = current_node
current_node = current_node.next
class SingleListIter(object):
def __init__(self, node):
self.node = node
def __next__(self):
if self.node == None:
raise StopIteration()
else:
data = self.node.data
self.node = self.node.next
return data
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Simon'
copyright = u'2013, Andy Dirnberger'
version = '0.8'
release = '0.8.0'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'Simondoc'
latex_elements = {
}
latex_documents = [
('index', 'Simon.tex', u'Simon Documentation',
u'Andy Dirnberger', 'manual'),
]
man_pages = [
('index', 'simon', u'Simon Documentation',
[u'Andy Dirnberger'], 1)
]
texinfo_documents = [
('index', 'Simon', u'Simon Documentation',
u'Andy Dirnberger', 'Simon', 'One line description of project.',
'Miscellaneous'),
]
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'forkme_nature'
intersphinx_mapping = {
'pymongo': ('http://api.mongodb.org/python/current/', None),
}
|
"""
Classe for reading data in CED spike2 files (.smr).
This code is based on:
- sonpy, written by Antonio Gonzalez <Antonio.Gonzalez@cantab.net>
Disponible here ::
http://www.neuro.ki.se/broberger/
and sonpy come from :
- SON Library 2.0 for MATLAB, written by Malcolm Lidierth at
King's College London. See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html
This IO support old (<v6) and new files (>v7) of spike2
Depend on:
Supported : Read
Author: sgarcia
"""
from .baseio import BaseIO
from ..core import *
from .tools import create_many_to_one_relationship
import numpy as np
from numpy import dtype, zeros, fromstring, empty
import quantities as pq
import os, sys
PY3K = (sys.version_info[0] == 3)
class Spike2IO(BaseIO):
"""
Class for reading data from CED spike2.
Usage:
>>> from neo import io
>>> r = io.Spike2IO( filename = 'File_spike2_1.smr')
>>> seg = r.read_segment(lazy = False, cascade = True,)
>>> print seg.analogsignals
>>> print seg.spiketrains
>>> print seg.eventarrays
"""
is_readable = True
is_writable = False
supported_objects = [ Segment , AnalogSignal , EventArray, SpikeTrain]
readable_objects = [Segment]
writeable_objects = [ ]
has_header = False
is_streameable = False
read_params = { Segment : [ ], }
write_params = None
name = 'Spike 2 CED'
extensions = [ 'smr' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read a smr file.
Arguments:
filename : the filename
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self ,
lazy = False,
cascade = True,
):
"""
Arguments:
"""
header = self.read_header(filename = self.filename)
#~ print header
fid = open(self.filename, 'rb')
seg = Segment(
file_origin = os.path.basename(self.filename),
ced_version = str(header.system_id),
)
if not cascade:
return seg
def addannotations(ob, channelHeader):
ob.annotate(title = channelHeader.title)
ob.annotate(physical_channel_index = channelHeader.phy_chan)
ob.annotate(comment = channelHeader.comment)
for i in range(header.channels) :
channelHeader = header.channelHeaders[i]
#~ print 'channel' , i , 'kind' , channelHeader.kind
if channelHeader.kind !=0:
#~ print '####'
#~ print 'channel' , i, 'kind' , channelHeader.kind , channelHeader.type , channelHeader.phy_chan
#~ print channelHeader
pass
if channelHeader.kind in [1, 9]:
#~ print 'analogChanel'
anaSigs = self.readOneChannelContinuous( fid, i, header ,lazy = lazy)
#~ print 'nb sigs', len(anaSigs) , ' sizes : ',
for anaSig in anaSigs :
addannotations(anaSig, channelHeader)
seg.analogsignals.append( anaSig )
#~ print sig.signal.size,
#~ print ''
elif channelHeader.kind in [2, 3, 4, 5, 8] :
ea = self.readOneChannelEventOrSpike( fid, i, header , lazy = lazy)
addannotations(ea, channelHeader)
seg.eventarrays.append(ea)
elif channelHeader.kind in [6,7] :
sptr = self.readOneChannelEventOrSpike( fid, i, header, lazy = lazy )
if sptr is not None:
addannotations(sptr, channelHeader)
seg.spiketrains.append(sptr)
fid.close()
create_many_to_one_relationship(seg)
return seg
def read_header(self , filename = ''):
fid = open(filename, 'rb')
header = HeaderReader(fid, dtype(headerDescription))
#~ print 'chan_size' , header.chan_size
if header.system_id < 6:
header.dtime_base = 1e-6
header.datetime_detail = 0
header.datetime_year = 0
channelHeaders = [ ]
for i in range(header.channels):
# read global channel header
fid.seek(512 + 140*i) # TODO verifier i ou i-1
channelHeader = HeaderReader(fid, dtype(channelHeaderDesciption1))
if channelHeader.kind in [1, 6]:
dt = [('scale' , 'f4'),
('offset' , 'f4'),
('unit' , 'S6'),]
channelHeader += HeaderReader(fid, dtype(dt))
if header.system_id < 6:
channelHeader += HeaderReader(fid, dtype([ ('divide' , 'i4')]) )#i8
else :
channelHeader +=HeaderReader(fid, dtype([ ('interleave' , 'i4')]) )#i8
if channelHeader.kind in [7, 9]:
dt = [('min' , 'f4'),
('max' , 'f4'),
('unit' , 'S6'),]
channelHeader += HeaderReader(fid, dtype(dt))
if header.system_id < 6:
channelHeader += HeaderReader(fid, dtype([ ('divide' , 'i4')]))#i8
else :
channelHeader += HeaderReader(fid, dtype([ ('interleave' , 'i4')]) )#i8
if channelHeader.kind in [4]:
dt = [('init_low' , 'u1'),
('next_low' , 'u1'),]
channelHeader += HeaderReader(fid, dtype(dt))
channelHeader.type = dict_kind[channelHeader.kind]
channelHeaders.append(channelHeader)
header.channelHeaders = channelHeaders
fid.close()
return header
def readOneChannelContinuous(self , fid, channel_num, header ,lazy = True):
# read AnalogSignal
channelHeader = header.channelHeaders[channel_num]
# data type
if channelHeader.kind == 1:
dt = np.dtype('i2')
elif channelHeader.kind == 9:
dt = np.dtype('f4')
# sample rate
if header.system_id in [1,2,3,4,5]: # Before version 5
sample_interval = (channelHeader.divide*header.us_per_time*header.time_per_adc)*1e-6
else :
sample_interval = (channelHeader.l_chan_dvd*header.us_per_time*header.dtime_base)
sampling_rate = (1./sample_interval)*pq.Hz
# read blocks header to preallocate memory by jumping block to block
fid.seek(channelHeader.firstblock)
blocksize = [ 0 ]
starttimes = [ ]
for b in range(channelHeader.blocks) :
blockHeader = HeaderReader(fid, dtype(blockHeaderDesciption))
if len(blocksize) > len(starttimes):
starttimes.append(blockHeader.start_time)
blocksize[-1] += blockHeader.items
if blockHeader.succ_block > 0 :
# this is ugly but CED do not garanty continuity in AnalogSignal
fid.seek(blockHeader.succ_block)
nextBlockHeader = HeaderReader(fid, dtype(blockHeaderDesciption))
sample_interval = (blockHeader.end_time-blockHeader.start_time)/(blockHeader.items-1)
interval_with_next = nextBlockHeader.start_time - blockHeader.end_time
if interval_with_next > sample_interval:
blocksize.append(0)
fid.seek(blockHeader.succ_block)
anaSigs = [ ]
if channelHeader.unit in unit_convert:
unit = pq.Quantity(1, unit_convert[channelHeader.unit] )
else:
#print channelHeader.unit
try:
unit = pq.Quantity(1, channelHeader.unit )
except:
unit = pq.Quantity(1, '')
for b,bs in enumerate(blocksize ):
if lazy:
signal = [ ]*unit
else:
signal = empty( bs , dtype = 'f4') * unit
anaSig = AnalogSignal(signal ,
sampling_rate = sampling_rate,
t_start = starttimes[b]*header.us_per_time * header.dtime_base * pq.s,
)
anaSig.annotate(channel_index = channel_num)
anaSigs.append( anaSig )
if lazy:
for s, anaSig in enumerate(anaSigs):
anaSig.lazy_shape = blocksize[s]
else:
# read data by jumping block to block
fid.seek(channelHeader.firstblock)
pos = 0
numblock = 0
for b in range(channelHeader.blocks) :
blockHeader = HeaderReader(fid, dtype(blockHeaderDesciption))
# read data
sig = fromstring( fid.read(blockHeader.items*dt.itemsize) , dtype = dt)
anaSigs[numblock][pos:pos+sig.size] = sig.astype('f4')*unit
pos += sig.size
if pos >= blocksize[numblock] :
numblock += 1
pos = 0
# jump to next block
if blockHeader.succ_block > 0 :
fid.seek(blockHeader.succ_block)
# convert for int16
if dt.kind == 'i' :
for anaSig in anaSigs :
anaSig *= channelHeader.scale/ 6553.6
anaSig += channelHeader.offset*unit
return anaSigs
def readOneChannelEventOrSpike(self , fid, channel_num, header ,lazy = True):
# return SPikeTrain or EventArray
channelHeader = header.channelHeaders[channel_num]
if channelHeader.firstblock <0: return
if channelHeader.kind not in [2, 3, 4 , 5 , 6 ,7, 8]: return
## Step 1 : type of blocks
if channelHeader.kind in [2, 3, 4]:
# Event data
format = [('tick' , 'i4') ]
elif channelHeader.kind in [5]:
# Marker data
format = [('tick' , 'i4') , ('marker' , 'i4') ]
elif channelHeader.kind in [6]:
# AdcMark data
format = [('tick' , 'i4') , ('marker' , 'i4') , ('adc' , 'S%d' %channelHeader.n_extra )]
elif channelHeader.kind in [7]:
# RealMark data
format = [('tick' , 'i4') , ('marker' , 'i4') , ('real' , 'S%d' %channelHeader.n_extra )]
elif channelHeader.kind in [8]:
# TextMark data
format = [('tick' , 'i4') , ('marker' , 'i4') , ('label' , 'S%d'%channelHeader.n_extra)]
dt = dtype(format)
## Step 2 : first read for allocating mem
fid.seek(channelHeader.firstblock)
totalitems = 0
for b in range(channelHeader.blocks) :
blockHeader = HeaderReader(fid, dtype(blockHeaderDesciption))
totalitems += blockHeader.items
if blockHeader.succ_block > 0 :
fid.seek(blockHeader.succ_block)
#~ print 'totalitems' , totalitems
if lazy :
if channelHeader.kind in [2, 3, 4 , 5 , 8]:
ea = EventArray( )
ea.annotate(channel_index = channel_num)
ea.lazy_shape = totalitems
return ea
elif channelHeader.kind in [6 ,7]:
sptr = SpikeTrain([ ]*pq.s, t_stop=1e99) # correct value for t_stop to be put in later
sptr.annotate(channel_index = channel_num)
sptr.lazy_shape = totalitems
return sptr
else:
alltrigs = zeros( totalitems , dtype = dt)
## Step 3 : read
fid.seek(channelHeader.firstblock)
pos = 0
for b in range(channelHeader.blocks) :
blockHeader = HeaderReader(fid, dtype(blockHeaderDesciption))
# read all events in block
trigs = fromstring( fid.read( blockHeader.items*dt.itemsize) , dtype = dt)
alltrigs[pos:pos+trigs.size] = trigs
pos += trigs.size
if blockHeader.succ_block > 0 :
fid.seek(blockHeader.succ_block)
## Step 3 convert in neo standard class : eventarrays or spiketrains
alltimes = alltrigs['tick'].astype('f')*header.us_per_time * header.dtime_base*pq.s
if channelHeader.kind in [2, 3, 4 , 5 , 8]:
#events
ea = EventArray( )
ea.annotate(channel_index = channel_num)
ea.times = alltimes
if channelHeader.kind >= 5:
# Spike2 marker is closer to label sens of neo
ea.labels = alltrigs['marker'].astype('S')
if channelHeader.kind == 8:
ea.annotate(extra_labels = alltrigs['label'])
return ea
elif channelHeader.kind in [6 ,7]:
# spiketrains
# waveforms
if channelHeader.kind == 6 :
waveforms = fromstring(alltrigs['adc'].tostring() , dtype = 'i2')
waveforms = waveforms.astype('f4') *channelHeader.scale/ 6553.6 + channelHeader.offset
elif channelHeader.kind == 7 :
waveforms = fromstring(alltrigs['real'].tostring() , dtype = 'f4')
if header.system_id>=6 and channelHeader.interleave>1:
waveforms = waveforms.reshape((alltimes.size,-1,channelHeader.interleave))
waveforms = waveforms.swapaxes(1,2)
else:
waveforms = waveforms.reshape(( alltimes.size,1, -1))
if header.system_id in [1,2,3,4,5]:
sample_interval = (channelHeader.divide*header.us_per_time*header.time_per_adc)*1e-6
else :
sample_interval = (channelHeader.l_chan_dvd*header.us_per_time*header.dtime_base)
if channelHeader.unit in unit_convert:
unit = pq.Quantity(1, unit_convert[channelHeader.unit] )
else:
#print channelHeader.unit
try:
unit = pq.Quantity(1, channelHeader.unit )
except:
unit = pq.Quantity(1, '')
if len(alltimes) > 0:
t_stop = alltimes.max() # can get better value from associated AnalogSignal(s) ?
else:
t_stop = 0.0
sptr = SpikeTrain(alltimes,
waveforms = waveforms*unit,
sampling_rate = (1./sample_interval)*pq.Hz,
t_stop = t_stop
)
sptr.annotate(channel_index = channel_num)
return sptr
class HeaderReader(object):
def __init__(self , fid , dtype):
if fid is not None :
array = np.fromstring( fid.read(dtype.itemsize) , dtype)[0]
else :
array = zeros( (1) , dtype = dtype)[0]
object.__setattr__(self, 'dtype' , dtype)
object.__setattr__(self, 'array' , array)
def __setattr__(self, name , val):
if name in self.dtype.names :
self.array[name] = val
else :
object.__setattr__(self, name , val)
def __getattr__(self , name):
#~ print name
if name in self.dtype.names :
if self.dtype[name].kind == 'S':
if PY3K:
l = np.fromstring(self.array[name].decode('iso-8859-1')[0], 'u1')
else:
l = np.fromstring(self.array[name][0], 'u1')
return self.array[name][1:l+1]
else:
return self.array[name]
else :
object.__getattr__(self, name )
def names(self):
return self.array.dtype.names
def __repr__(self):
s = 'HEADER'
for name in self.dtype.names :
#~ if self.dtype[name].kind != 'S' :
s += name + self.__getattr__(name)
return s
def __add__(self, header2):
newdtype = [ ]
for name in self.dtype.names :
newdtype.append( (name , self.dtype[name].str) )
for name in header2.dtype.names :
newdtype.append( (name , header2.dtype[name].str) )
newdtype = dtype(newdtype)
newHeader = HeaderReader(None , newdtype )
newHeader.array = fromstring( self.array.tostring()+header2.array.tostring() , newdtype)[0]
return newHeader
headerDescription = [
( 'system_id', 'i2' ),
( 'copyright', 'S10' ),
( 'creator', 'S8' ),
( 'us_per_time', 'i2' ),
( 'time_per_adc', 'i2' ),
( 'filestate', 'i2' ),
( 'first_data', 'i4' ),#i8
( 'channels', 'i2' ),
( 'chan_size', 'i2' ),
( 'extra_data', 'i2' ),
( 'buffersize', 'i2' ),
( 'os_format', 'i2' ),
( 'max_ftime', 'i4' ),#i8
( 'dtime_base', 'f8' ),
( 'datetime_detail', 'u1' ),
( 'datetime_year', 'i2' ),
( 'pad', 'S52' ),
( 'comment1', 'S80' ),
( 'comment2', 'S80' ),
( 'comment3', 'S80' ),
( 'comment4', 'S80' ),
( 'comment5', 'S80' ),
]
channelHeaderDesciption1 = [
('del_size','i2'),
('next_del_block','i4'),#i8
('firstblock','i4'),#i8
('lastblock','i4'),#i8
('blocks','i2'),
('n_extra','i2'),
('pre_trig','i2'),
('free0','i2'),
('py_sz','i2'),
('max_data','i2'),
('comment','S72'),
('max_chan_time','i4'),#i8
('l_chan_dvd','i4'),#i8
('phy_chan','i2'),
('title','S10'),
('ideal_rate','f4'),
('kind','u1'),
('unused1','i1'),
]
dict_kind = {
0 : 'empty',
1: 'Adc',
2: 'EventFall',
3: 'EventRise',
4: 'EventBoth',
5: 'Marker',
6: 'AdcMark',
7: 'RealMark',
8: 'TextMark',
9: 'RealWave',
}
blockHeaderDesciption =[
('pred_block','i4'),#i8
('succ_block','i4'),#i8
('start_time','i4'),#i8
('end_time','i4'),#i8
('channel_num','i2'),
('items','i2'),
]
unit_convert = {
'Volts' : 'V' ,
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('metodologias', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TipoMetodologias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('modificado', models.DateTimeField(auto_now=True)),
('creado', models.DateTimeField(auto_now_add=True)),
('nombre', models.CharField(max_length=150, verbose_name='Metodolog\xeda')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='metodologias',
name='tipo_metodologia',
field=models.ForeignKey(verbose_name='Metodolog\xeda', blank=True, to='metodologias.TipoMetodologias', null=True),
preserve_default=True,
),
]
|
from django.contrib.localflavor.br.forms import (BRZipCodeField,
BRCNPJField, BRCPFField, BRPhoneNumberField, BRStateSelect,
BRStateChoiceField)
from django.test import SimpleTestCase
class BRLocalFlavorTests(SimpleTestCase):
def test_BRZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX-XXX.']
valid = {
'12345-123': '12345-123',
}
invalid = {
'12345_123': error_format,
'1234-123': error_format,
'abcde-abc': error_format,
'12345-': error_format,
'-123': error_format,
}
self.assertFieldOutput(BRZipCodeField, valid, invalid)
def test_BRCNPJField(self):
error_format = [u'Invalid CNPJ number.']
error_numbersonly = [u'This field requires only numbers.']
valid = {
'64.132.916/0001-88': '64.132.916/0001-88',
'64-132-916/0001-88': '64-132-916/0001-88',
'64132916/0001-88': '64132916/0001-88',
}
invalid = {
'12-345-678/9012-10': error_format,
'12.345.678/9012-10': error_format,
'12345678/9012-10': error_format,
'64.132.916/0001-XX': error_numbersonly,
}
self.assertFieldOutput(BRCNPJField, valid, invalid)
def test_BRCPFField(self):
error_format = [u'Invalid CPF number.']
error_numbersonly = [u'This field requires only numbers.']
error_atmost_chars = [u'Ensure this value has at most 14 characters (it has 15).']
error_atleast_chars = [u'Ensure this value has at least 11 characters (it has 10).']
error_atmost = [u'This field requires at most 11 digits or 14 characters.']
valid = {
'663.256.017-26': '663.256.017-26',
'66325601726': '66325601726',
'375.788.573-20': '375.788.573-20',
'84828509895': '84828509895',
}
invalid = {
'489.294.654-54': error_format,
'295.669.575-98': error_format,
'539.315.127-22': error_format,
'375.788.573-XX': error_numbersonly,
'375.788.573-000': error_atmost_chars,
'123.456.78': error_atleast_chars,
'123456789555': error_atmost,
}
self.assertFieldOutput(BRCPFField, valid, invalid)
def test_BRPhoneNumberField(self):
# TODO: this doesn't test for any invalid inputs.
valid = {
'41-3562-3464': u'41-3562-3464',
'4135623464': u'41-3562-3464',
'41 3562-3464': u'41-3562-3464',
'41 3562 3464': u'41-3562-3464',
'(41) 3562 3464': u'41-3562-3464',
'41.3562.3464': u'41-3562-3464',
'41.3562-3464': u'41-3562-3464',
' (41) 3562.3464': u'41-3562-3464',
}
invalid = {}
self.assertFieldOutput(BRPhoneNumberField, valid, invalid)
def test_BRStateSelect(self):
f = BRStateSelect()
out = u'''<select name="states">
<option value="AC">Acre</option>
<option value="AL">Alagoas</option>
<option value="AP">Amap\xe1</option>
<option value="AM">Amazonas</option>
<option value="BA">Bahia</option>
<option value="CE">Cear\xe1</option>
<option value="DF">Distrito Federal</option>
<option value="ES">Esp\xedrito Santo</option>
<option value="GO">Goi\xe1s</option>
<option value="MA">Maranh\xe3o</option>
<option value="MT">Mato Grosso</option>
<option value="MS">Mato Grosso do Sul</option>
<option value="MG">Minas Gerais</option>
<option value="PA">Par\xe1</option>
<option value="PB">Para\xedba</option>
<option value="PR" selected="selected">Paran\xe1</option>
<option value="PE">Pernambuco</option>
<option value="PI">Piau\xed</option>
<option value="RJ">Rio de Janeiro</option>
<option value="RN">Rio Grande do Norte</option>
<option value="RS">Rio Grande do Sul</option>
<option value="RO">Rond\xf4nia</option>
<option value="RR">Roraima</option>
<option value="SC">Santa Catarina</option>
<option value="SP">S\xe3o Paulo</option>
<option value="SE">Sergipe</option>
<option value="TO">Tocantins</option>
</select>'''
self.assertEqual(f.render('states', 'PR'), out)
def test_BRStateChoiceField(self):
error_invalid = [u'Select a valid brazilian state. That state is not one of the available states.']
valid = {
'AC': 'AC',
'AL': 'AL',
'AP': 'AP',
'AM': 'AM',
'BA': 'BA',
'CE': 'CE',
'DF': 'DF',
'ES': 'ES',
'GO': 'GO',
'MA': 'MA',
'MT': 'MT',
'MS': 'MS',
'MG': 'MG',
'PA': 'PA',
'PB': 'PB',
'PR': 'PR',
'PE': 'PE',
'PI': 'PI',
'RJ': 'RJ',
'RN': 'RN',
'RS': 'RS',
'RO': 'RO',
'RR': 'RR',
'SC': 'SC',
'SP': 'SP',
'SE': 'SE',
'TO': 'TO',
}
invalid = {
'pr': error_invalid,
}
self.assertFieldOutput(BRStateChoiceField, valid, invalid)
|
from __future__ import absolute_import, print_function
import base64
import math
import jsonschema
import logging
import os
import six
import traceback
import uuid
from time import time
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.files import uploadhandler
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed
from django.http.multipartparser import MultiPartParser
from django.utils.encoding import force_bytes
from django.views.decorators.cache import never_cache, cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View as BaseView
from functools import wraps
from querystring_parser import parser
from raven.contrib.django.models import client as Raven
from symbolic import ProcessMinidumpError
from sentry import quotas, tsdb
from sentry.coreapi import (
APIError, APIForbidden, APIRateLimited, ClientApiHelper, SecurityApiHelper, LazyData,
MinidumpApiHelper,
)
from sentry.interfaces import schemas
from sentry.interfaces.base import get_interface
from sentry.lang.native.utils import merge_minidump_event
from sentry.models import Project, OrganizationOption, Organization
from sentry.signals import (
event_accepted, event_dropped, event_filtered, event_received)
from sentry.quotas.base import RateLimit
from sentry.utils import json, metrics
from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES
from sentry.utils.data_scrubber import SensitiveDataFilter
from sentry.utils.dates import to_datetime
from sentry.utils.http import (
is_valid_origin,
get_origins,
is_same_domain,
)
from sentry.utils.pubsub import QueuedPublisher, RedisPublisher
from sentry.utils.safe import safe_execute
from sentry.web.helpers import render_to_response
logger = logging.getLogger('sentry')
PIXEL = base64.b64decode('R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')
PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7'))
pubsub = QueuedPublisher(
RedisPublisher(getattr(settings, 'REQUESTS_PUBSUB_CONNECTION', None))
) if getattr(settings, 'REQUESTS_PUBSUB_ENABLED', False) else None
def api(func):
@wraps(func)
def wrapped(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if request.is_ajax():
response = HttpResponse(data)
response['Content-Type'] = 'application/json'
else:
ref = request.META.get('HTTP_REFERER')
if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
ref = reverse('sentry')
return HttpResponseRedirect(ref)
return response
return wrapped
class APIView(BaseView):
helper_cls = ClientApiHelper
def _get_project_from_id(self, project_id):
if not project_id:
return
if not project_id.isdigit():
raise APIError('Invalid project_id: %r' % project_id)
try:
return Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
raise APIError('Invalid project_id: %r' % project_id)
def _parse_header(self, request, helper, project):
auth = helper.auth_from_request(request)
if auth.version not in PROTOCOL_VERSIONS:
raise APIError(
'Client using unsupported server protocol version (%r)' %
six.text_type(auth.version or '')
)
if not auth.client:
raise APIError("Client did not send 'client' identifier")
return auth
@csrf_exempt
@never_cache
def dispatch(self, request, project_id=None, *args, **kwargs):
helper = self.helper_cls(
agent=request.META.get('HTTP_USER_AGENT'),
project_id=project_id,
ip_address=request.META['REMOTE_ADDR'],
)
origin = None
try:
origin = helper.origin_from_request(request)
response = self._dispatch(
request, helper, project_id=project_id, origin=origin, *args, **kwargs
)
except APIError as e:
context = {
'error': force_bytes(e.msg, errors='replace'),
}
if e.name:
context['error_name'] = e.name
response = HttpResponse(
json.dumps(context), content_type='application/json', status=e.http_status
)
# Set X-Sentry-Error as in many cases it is easier to inspect the headers
response['X-Sentry-Error'] = context['error']
if isinstance(e, APIRateLimited) and e.retry_after is not None:
response['Retry-After'] = six.text_type(int(math.ceil(e.retry_after)))
except Exception as e:
# TODO(dcramer): test failures are not outputting the log message
# here
if settings.DEBUG:
content = traceback.format_exc()
else:
content = ''
logger.exception(e)
response = HttpResponse(
content, content_type='text/plain', status=500)
# TODO(dcramer): it'd be nice if we had an incr_multi method so
# tsdb could optimize this
metrics.incr('client-api.all-versions.requests')
metrics.incr('client-api.all-versions.responses.%s' %
(response.status_code, ))
metrics.incr(
'client-api.all-versions.responses.%sxx' % (
six.text_type(response.status_code)[0], )
)
if helper.context.version:
metrics.incr('client-api.v%s.requests' %
(helper.context.version, ))
metrics.incr(
'client-api.v%s.responses.%s' % (
helper.context.version, response.status_code)
)
metrics.incr(
'client-api.v%s.responses.%sxx' %
(helper.context.version, six.text_type(
response.status_code)[0])
)
if response.status_code != 200 and origin:
# We allow all origins on errors
response['Access-Control-Allow-Origin'] = '*'
if origin:
response['Access-Control-Allow-Headers'] = \
'X-Sentry-Auth, X-Requested-With, Origin, Accept, ' \
'Content-Type, Authentication'
response['Access-Control-Allow-Methods'] = \
', '.join(self._allowed_methods())
response['Access-Control-Expose-Headers'] = \
'X-Sentry-Error, Retry-After'
return response
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs):
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
if project:
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
if origin is not None:
# This check is specific for clients who need CORS support
if not project:
raise APIError('Client must be upgraded for CORS support')
if not is_valid_origin(origin, project):
tsdb.incr(tsdb.models.project_total_received_cors,
project.id)
raise APIForbidden('Invalid origin: %s' % (origin, ))
# XXX: It seems that the OPTIONS call does not always include custom headers
if request.method == 'OPTIONS':
response = self.options(request, project)
else:
auth = self._parse_header(request, helper, project)
key = helper.project_key_from_auth(auth)
# Legacy API was /api/store/ and the project ID was only available elsewhere
if not project:
project = Project.objects.get_from_cache(id=key.project_id)
helper.context.bind_project(project)
elif key.project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
# Explicitly bind Organization so we don't implicitly query it later
# this just allows us to comfortably assure that `project.organization` is safe.
# This also allows us to pull the object from cache, instead of being
# implicitly fetched from database.
project.organization = Organization.objects.get_from_cache(
id=project.organization_id)
response = super(APIView, self).dispatch(
request=request, project=project, auth=auth, helper=helper, key=key, **kwargs
)
if origin:
if origin == 'null':
# If an Origin is `null`, but we got this far, that means
# we've gotten past our CORS check for some reason. But the
# problem is that we can't return "null" as a valid response
# to `Access-Control-Allow-Origin` and we don't have another
# value to work with, so just allow '*' since they've gotten
# this far.
response['Access-Control-Allow-Origin'] = '*'
else:
response['Access-Control-Allow-Origin'] = origin
return response
# XXX: backported from Django 1.5
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
def options(self, request, *args, **kwargs):
response = HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
class StoreView(APIView):
"""
The primary endpoint for storing new events.
This will validate the client's authentication and data, and if
successful pass on the payload to the internal database handler.
Authentication works in three flavors:
1. Explicit signed requests
These are implemented using the documented signed request protocol, and
require an authentication header which is signed using with the project
member's secret key.
2. CORS Secured Requests
Generally used for communications with client-side platforms (such as
JavaScript in the browser), they require a standard header, excluding
the signature and timestamp requirements, and must be listed in the
origins for the given project (or the global origins).
3. Implicit trusted requests
Used by the Sentry core, they are only available from same-domain requests
and do not require any authentication information. They only require that
the user be authenticated, and a project_id be sent in the GET variables.
"""
def post(self, request, **kwargs):
try:
data = request.body
except Exception as e:
logger.exception(e)
# We were unable to read the body.
# This would happen if a request were submitted
# as a multipart form for example, where reading
# body yields an Exception. There's also not a more
# sane exception to catch here. This will ultimately
# bubble up as an APIError.
data = None
if pubsub is not None and data is not None:
pubsub.publish('requests', data)
response_or_event_id = self.process(request, data=data, **kwargs)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(
json.dumps({
'id': response_or_event_id,
}), content_type='application/json'
)
def get(self, request, **kwargs):
data = request.GET.get('sentry_data', '')
response_or_event_id = self.process(request, data=data, **kwargs)
# Return a simple 1x1 gif for browser so they don't throw a warning
response = HttpResponse(PIXEL, 'image/gif')
if not isinstance(response_or_event_id, HttpResponse):
response['X-Sentry-ID'] = response_or_event_id
return response
def process(self, request, project, key, auth, helper, data, **kwargs):
metrics.incr('events.total')
if not data:
raise APIError('No JSON data was found')
remote_addr = request.META['REMOTE_ADDR']
data = LazyData(
data=data,
content_encoding=request.META.get('HTTP_CONTENT_ENCODING', ''),
helper=helper,
project=project,
key=key,
auth=auth,
client_ip=remote_addr,
)
event_received.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
)
start_time = time()
tsdb_start_time = to_datetime(start_time)
should_filter, filter_reason = helper.should_filter(
project, data, ip_address=remote_addr)
if should_filter:
increment_list = [
(tsdb.models.project_total_received, project.id),
(tsdb.models.project_total_blacklisted, project.id),
(tsdb.models.organization_total_received,
project.organization_id),
(tsdb.models.organization_total_blacklisted,
project.organization_id),
(tsdb.models.key_total_received, key.id),
(tsdb.models.key_total_blacklisted, key.id),
]
try:
increment_list.append(
(FILTER_STAT_KEYS_TO_VALUES[filter_reason], project.id))
# should error when filter_reason does not match a key in FILTER_STAT_KEYS_TO_VALUES
except KeyError:
pass
tsdb.incr_multi(
increment_list,
timestamp=tsdb_start_time,
)
metrics.incr('events.blacklisted', tags={
'reason': filter_reason})
event_filtered.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
)
raise APIForbidden('Event dropped due to filter: %s' % (filter_reason,))
# TODO: improve this API (e.g. make RateLimit act on __ne__)
rate_limit = safe_execute(
quotas.is_rate_limited, project=project, key=key, _with_transaction=False
)
if isinstance(rate_limit, bool):
rate_limit = RateLimit(is_limited=rate_limit, retry_after=None)
# XXX(dcramer): when the rate limiter fails we drop events to ensure
# it cannot cascade
if rate_limit is None or rate_limit.is_limited:
if rate_limit is None:
helper.log.debug(
'Dropped event due to error with rate limiter')
tsdb.incr_multi(
[
(tsdb.models.project_total_received, project.id),
(tsdb.models.project_total_rejected, project.id),
(tsdb.models.organization_total_received,
project.organization_id),
(tsdb.models.organization_total_rejected,
project.organization_id),
(tsdb.models.key_total_received, key.id),
(tsdb.models.key_total_rejected, key.id),
],
timestamp=tsdb_start_time,
)
metrics.incr(
'events.dropped',
tags={
'reason': rate_limit.reason_code if rate_limit else 'unknown',
}
)
event_dropped.send_robust(
ip=remote_addr,
project=project,
sender=type(self),
reason_code=rate_limit.reason_code if rate_limit else None,
)
if rate_limit is not None:
raise APIRateLimited(rate_limit.retry_after)
else:
tsdb.incr_multi(
[
(tsdb.models.project_total_received, project.id),
(tsdb.models.organization_total_received,
project.organization_id),
(tsdb.models.key_total_received, key.id),
],
timestamp=tsdb_start_time,
)
org_options = OrganizationOption.objects.get_all_values(
project.organization_id)
event_id = data['event_id']
# TODO(dcramer): ideally we'd only validate this if the event_id was
# supplied by the user
cache_key = 'ev:%s:%s' % (project.id, event_id, )
if cache.get(cache_key) is not None:
raise APIForbidden(
'An event with the same ID already exists (%s)' % (event_id, ))
scrub_ip_address = (org_options.get('sentry:require_scrub_ip_address', False) or
project.get_option('sentry:scrub_ip_address', False))
scrub_data = (org_options.get('sentry:require_scrub_data', False) or
project.get_option('sentry:scrub_data', True))
if scrub_data:
# We filter data immediately before it ever gets into the queue
sensitive_fields_key = 'sentry:sensitive_fields'
sensitive_fields = (
org_options.get(sensitive_fields_key, []) +
project.get_option(sensitive_fields_key, [])
)
exclude_fields_key = 'sentry:safe_fields'
exclude_fields = (
org_options.get(exclude_fields_key, []) +
project.get_option(exclude_fields_key, [])
)
scrub_defaults = (org_options.get('sentry:require_scrub_defaults', False) or
project.get_option('sentry:scrub_defaults', True))
SensitiveDataFilter(
fields=sensitive_fields,
include_defaults=scrub_defaults,
exclude_fields=exclude_fields,
).apply(data)
if scrub_ip_address:
# We filter data immediately before it ever gets into the queue
helper.ensure_does_not_have_ip(data)
# mutates data (strips a lot of context if not queued)
helper.insert_data_to_database(data, start_time=start_time)
cache.set(cache_key, '', 60 * 5)
helper.log.debug('New event received (%s)', event_id)
event_accepted.send_robust(
ip=remote_addr,
data=data,
project=project,
sender=type(self),
)
return event_id
class MinidumpView(StoreView):
helper_cls = MinidumpApiHelper
content_types = ('multipart/form-data', )
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs):
# TODO(ja): Refactor shared code with CspReportView. Especially, look at
# the sentry_key override and test it.
# A minidump submission as implemented by Breakpad and Crashpad or any
# other library following the Mozilla Soccorro protocol is a POST request
# without Origin or Referer headers. Therefore, we cannot validate the
# origin of the request, but we *can* validate the "prod" key in future.
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
content_type = request.META.get('CONTENT_TYPE')
# In case of multipart/form-data, the Content-Type header also includes
# a boundary. Therefore, we cannot check for an exact match.
if content_type is None or not content_type.startswith(self.content_types):
raise APIError('Invalid Content-Type')
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
# This is yanking the auth from the querystring since it's not
# in the POST body. This means we expect a `sentry_key` and
# `sentry_version` to be set in querystring
auth = helper.auth_from_request(request)
key = helper.project_key_from_auth(auth)
if key.project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
return super(APIView, self).dispatch(
request=request, project=project, auth=auth, helper=helper, key=key, **kwargs
)
def post(self, request, **kwargs):
# Minidump request payloads do not have the same structure as
# usual events from other SDKs. Most notably, the event needs
# to be transfered in the `sentry` form field. All other form
# fields are assumed "extra" information. The only exception
# to this is `upload_file_minidump`, which contains the minidump.
if any(key.startswith('sentry[') for key in request.POST):
# First, try to parse the nested form syntax `sentry[key][key]`
# This is required for the Breakpad client library, which only
# supports string values of up to 64 characters.
extra = parser.parse(request.POST.urlencode())
data = extra.pop('sentry', {})
else:
# Custom clients can submit longer payloads and should JSON
# encode event data into the optional `sentry` field.
extra = request.POST
json_data = extra.pop('sentry', None)
data = json.loads(json_data[0]) if json_data else {}
# Merge additional form fields from the request with `extra`
# data from the event payload and set defaults for processing.
extra.update(data.get('extra', {}))
data['extra'] = extra
# Assign our own UUID so we can track this minidump. We cannot trust the
# uploaded filename, and if reading the minidump fails there is no way
# we can ever retrieve the original UUID from the minidump.
event_id = data.get('event_id') or uuid.uuid4().hex
data['event_id'] = event_id
# At this point, we only extract the bare minimum information
# needed to continue processing. This requires to process the
# minidump without symbols and CFI to obtain an initial stack
# trace (most likely via stack scanning). If all validations
# pass, the event will be inserted into the database.
try:
minidump = request.FILES['upload_file_minidump']
except KeyError:
raise APIError('Missing minidump upload')
if settings.SENTRY_MINIDUMP_CACHE:
if not os.path.exists(settings.SENTRY_MINIDUMP_PATH):
os.mkdir(settings.SENTRY_MINIDUMP_PATH, 0o744)
with open('%s/%s.dmp' % (settings.SENTRY_MINIDUMP_PATH, event_id), 'wb') as out:
for chunk in minidump.chunks():
out.write(chunk)
# Breakpad on linux sometimes stores the entire HTTP request body as
# dump file instead of just the minidump. The Electron SDK then for
# example uploads a multipart formdata body inside the minidump file.
# It needs to be re-parsed, to extract the actual minidump before
# continuing.
minidump.seek(0)
if minidump.read(2) == b'--':
# The remaining bytes of the first line are the form boundary. We
# have already read two bytes, the remainder is the form boundary
# (excluding the initial '--').
boundary = minidump.readline().rstrip()
minidump.seek(0)
# Next, we have to fake a HTTP request by specifying the form
# boundary and the content length, or otherwise Django will not try
# to parse our form body. Also, we need to supply new upload
# handlers since they cannot be reused from the current request.
meta = {
'CONTENT_TYPE': b'multipart/form-data; boundary=%s' % boundary,
'CONTENT_LENGTH': minidump.size,
}
handlers = [
uploadhandler.load_handler(handler, request)
for handler in settings.FILE_UPLOAD_HANDLERS
]
_, files = MultiPartParser(meta, minidump, handlers).parse()
try:
minidump = files['upload_file_minidump']
except KeyError:
raise APIError('Missing minidump upload')
try:
merge_minidump_event(data, minidump)
except ProcessMinidumpError as e:
logger.exception(e)
raise APIError(e.message.split('\n', 1)[0])
response_or_event_id = self.process(request, data=data, **kwargs)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
# Return the formatted UUID of the generated event. This is
# expected by the Electron http uploader on Linux and doesn't
# break the default Breakpad client library.
return HttpResponse(
six.text_type(uuid.UUID(response_or_event_id)),
content_type='text/plain'
)
class StoreSchemaView(BaseView):
def get(self, request, **kwargs):
return HttpResponse(json.dumps(schemas.EVENT_SCHEMA), content_type='application/json')
class SecurityReportView(StoreView):
helper_cls = SecurityApiHelper
content_types = (
'application/csp-report',
'application/json',
'application/expect-ct-report',
'application/expect-ct-report+json',
'application/expect-staple-report',
)
def _dispatch(self, request, helper, project_id=None, origin=None, *args, **kwargs):
# A CSP report is sent as a POST request with no Origin or Referer
# header. What we're left with is a 'document-uri' key which is
# inside of the JSON body of the request. This 'document-uri' value
# should be treated as an origin check since it refers to the page
# that triggered the report. The Content-Type is supposed to be
# `application/csp-report`, but FireFox sends it as `application/json`.
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if request.META.get('CONTENT_TYPE') not in self.content_types:
raise APIError('Invalid Content-Type')
request.user = AnonymousUser()
project = self._get_project_from_id(project_id)
helper.context.bind_project(project)
Raven.tags_context(helper.context.get_tags_context())
# This is yanking the auth from the querystring since it's not
# in the POST body. This means we expect a `sentry_key` and
# `sentry_version` to be set in querystring
auth = helper.auth_from_request(request)
key = helper.project_key_from_auth(auth)
if key.project_id != project.id:
raise APIError('Two different projects were specified')
helper.context.bind_auth(auth)
Raven.tags_context(helper.context.get_tags_context())
return super(APIView, self).dispatch(
request=request, project=project, auth=auth, helper=helper, key=key, **kwargs
)
def post(self, request, project, helper, **kwargs):
json_body = helper.safely_load_json_string(request.body)
report_type = self.security_report_type(json_body)
if report_type is None:
raise APIError('Unrecognized security report type')
interface = get_interface(report_type)
try:
instance = interface.from_raw(json_body)
except jsonschema.ValidationError as e:
raise APIError('Invalid security report: %s' % str(e).splitlines()[0])
# Do origin check based on the `document-uri` key as explained in `_dispatch`.
origin = instance.get_origin()
if not is_valid_origin(origin, project):
if project:
tsdb.incr(tsdb.models.project_total_received_cors, project.id)
raise APIForbidden('Invalid origin')
data = {
'interface': interface.path,
'report': instance,
'release': request.GET.get('sentry_release'),
'environment': request.GET.get('sentry_environment'),
}
response_or_event_id = self.process(
request, project=project, helper=helper, data=data, **kwargs
)
if isinstance(response_or_event_id, HttpResponse):
return response_or_event_id
return HttpResponse(status=201)
def security_report_type(self, body):
report_type_for_key = {
'csp-report': 'sentry.interfaces.Csp',
'expect-ct-report': 'expectct',
'expect-staple-report': 'expectstaple',
'known-pins': 'hpkp',
}
if isinstance(body, dict):
for k in report_type_for_key:
if k in body:
return report_type_for_key[k]
return None
@cache_control(max_age=3600, public=True)
def robots_txt(request):
return HttpResponse("User-agent: *\nDisallow: /\n", content_type='text/plain')
@cache_control(max_age=3600, public=True)
def crossdomain_xml_index(request):
response = render_to_response('sentry/crossdomain_index.xml')
response['Content-Type'] = 'application/xml'
return response
@cache_control(max_age=60)
def crossdomain_xml(request, project_id):
if not project_id.isdigit():
return HttpResponse(status=404)
try:
project = Project.objects.get_from_cache(id=project_id)
except Project.DoesNotExist:
return HttpResponse(status=404)
origin_list = get_origins(project)
response = render_to_response(
'sentry/crossdomain.xml', {'origin_list': origin_list})
response['Content-Type'] = 'application/xml'
return response
|
import os
import fcntl
import errno
import shutil
import pytest
import os.path
import tempfile
from tectonic import prefork
def test_WorkerMetadata():
"""
This is a simple test, as WorkerMetadata only holds data
"""
pid = 'pid'
health_check_read = 100
last_seen = 'now'
metadata = prefork.WorkerMetadata(pid=pid,
health_check_read=health_check_read,
last_seen=last_seen)
assert metadata.pid == pid
assert metadata.health_check_read == health_check_read
assert metadata.last_seen == last_seen
def test_WriteAndFlushFile():
"""
Make sure we can write to and read from a file.
"""
try:
# Create a directory. Make sure to remove it at the end.
dirname = tempfile.mkdtemp()
filename = 'filename.txt'
text1 = 'The quick brown fox\n'
text2 = 'The lazy dog'
full_path = os.path.join(dirname, filename)
# Open a file and write using both changed methods
f = prefork.WriteAndFlushFile(full_path, 'w')
f.write(text1)
f.writelines(text2)
f.close()
# Read everything back
f = open(full_path, 'r')
data = f.readlines()
f.close()
assert data[0] == text1
assert data[1] == text2
finally:
# Always remove it
shutil.rmtree(dirname)
def test_set_nonblocking():
"""
See if we can set a file to non-blocking status
Create a random file for this.
"""
f = tempfile.TemporaryFile()
flags = fcntl.fcntl(f, fcntl.F_GETFL, os.O_NONBLOCK)
assert (flags | os.O_NONBLOCK) != flags
altered_f = prefork.set_nonblocking(f)
flags = fcntl.fcntl(f, fcntl.F_GETFL, os.O_NONBLOCK)
assert (flags | os.O_NONBLOCK) == flags
# Destroy the file, even though GC will do that anyway.
f.close()
def test_ignore_interupts():
"""
Make sure that we ignore interruption errors
"""
with pytest.raises(AssertionError):
a = AssertionError()
prefork._ignore_interrupts(a)
with pytest.raises(AssertionError):
a = AssertionError('Hello, how are you?', 'I am fine')
prefork._ignore_interrupts(a)
# Now, this one shouldn't raise
a = AssertionError(errno.EINTR, 'This is a happy error.')
prefork._ignore_interrupts(a)
# Similarly
a = AssertionError(errno.EAGAIN, 'This is a happy error.')
prefork._ignore_interrupts(a)
|
"""
log-reports.py -- report on VIVO log files
Version 1.0 M. Conlon 2012-05-10
-- read log fle for 1 (default) or many days and tabulates editor,
subjects, predicates, objects, actions
1.1 MC 2014-06-05
-- Update for reading vivo.all.log.1. Works as expected for single file
1.2 MC 2014-08-11
-- Now reads web files. Command line arguments control number of days to
read and the trim level
1.3 MC 2014-08-13
-- Fixed bug reading wrong date in the log. Date is now correct for
transactions
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "1.3"
from datetime import datetime
from datetime import timedelta
from urllib2 import urlopen
import argparse
def counts(s, log_data, trim=None):
"""
Given a keyword s, and log data, generate a frequency table for s from the
log data
:param s: name of key
:param log_data: log data
:param trim: number of lines to show in frequency tables
"""
trim_text = ""
if trim is not None:
trim_text = ' (trimmed at ' + str(trim) + ')'
print "\nCounts of " + s + trim_text
things = {}
for row in log_data:
try:
thing = row[s]
things[thing] = things.get(thing, 0) + 1
except KeyError:
continue
i = 0
for thing in sorted(things, key=things.get, reverse=True):
i = i + 1
if trim is not None and i > trim:
break
print things[thing], '\t', thing
def get_logs(start_date, end_date):
"""
Given a start and end date, gather and return log_records, a list of the log
records
"""
base_uri = 'http://vivo.ufl.edu/logs/vivo-triple-log-'
tail_uri = '.log'
date_fmt = '%Y-%m-%d'
log_records = []
log_date = start_date
while log_date <= end_date:
date_str = log_date.strftime(date_fmt)
uri = base_uri + date_str + tail_uri
print "Reading", date_str, "from", uri
try:
response = urlopen(uri)
log_file = response.read().split('\n')
log_records = log_records + log_file
except IOError:
pass
log_date = log_date + day
return log_records
parser = argparse.ArgumentParser()
parser.add_argument("days", help="number of days of logs to include",
type=int, default=7)
parser.add_argument("trim", help="number of lines to show in tables",
type=int, default=100)
args = parser.parse_args()
print datetime.now(), "Start"
day = timedelta(days=1)
to_date = datetime.now() - day
from_date = datetime.now() - args.days * day
log_recs = get_logs(from_date, to_date)
n = 0
log = []
for log_row in log_recs:
if len(log_row) < 127:
continue
words = log_row.split(' ')
if len(words) < 10:
continue
io = words[9][:-1]
user = words[8][:-1]
process = words[7][:-1]
date = words[0]
triple_string = ' '.join(words[10:])
triple_string = triple_string.replace('","', "|")
try:
[triple_subject, triple_predicate, triple_object] = \
triple_string.split("|")
triple_object = triple_object.replace('""', '"')
triple_object = triple_object.replace('>"', '>')
triple_object = triple_object.replace('\n', '')
if triple_subject[0] == '"':
triple_subject = triple_subject[1:]
except:
continue
if io != "ADD" and io != "SUB":
continue
n = n + 1
log.append({
"User": user,
"Process": process,
"ADD/SUB": io,
"Date": date,
"Subject": triple_subject,
"Predicate": triple_predicate,
"Object": triple_object
})
print n, "log lines read"
counts("Date", log)
counts("Process", log)
counts("ADD/SUB", log)
counts("User", log)
counts("Subject", log, trim=args.trim)
counts("Predicate", log, trim=args.trim)
counts("Object", log, trim=args.trim)
print datetime.now(), "Finish"
|
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('create_dataset', '0003_dataset_completed'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='errors',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name='dataset',
name='warnings',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
|
from sklearn.svm import SVC
|
"""Functions to help with reindexing against a reference dataset."""
from __future__ import annotations
from cctbx import sgtbx
from mmtbx.scaling.twin_analyses import twin_laws
import dials.util
from dials.util import Sorry
def determine_reindex_operator_against_reference(test_miller_set, reference_miller_set):
"""Reindex a miller set to match a reference miller set.
This function takes two miller arrays, a reference and a test array. The
space group is checked to see if any reindexing may be required to give
consistent indexing between both datasets. If possible twin operators exist,
the different indexing options are tested against the reference set, using
the correlation between datasets as the test.
Args:
test_miller_set (cctbx.miller.array): The input miller set to be reindexed.
reference_miller_set (cctbx.miller.array): The reference miller set.
Returns:
cctbx.sgtbx.change_of_basis_op: The change of basis operator which should be
applied to the test dataset to give consistent indexing with the reference.
"""
if (
reference_miller_set.space_group().type().number()
!= test_miller_set.space_group().type().number()
):
raise Sorry(
"""Space groups are not equal. Can only reindex against a
reference dataset if both dataset are in the same spacegroup."""
)
twin_ops = twin_laws(miller_array=test_miller_set.eliminate_sys_absent()).operators
twin_ops = [sgtbx.change_of_basis_op(op.operator.as_xyz()) for op in twin_ops]
if twin_ops:
correlations = []
print(
"Possible twin operators identified for space group %s:"
% test_miller_set.space_group().info()
)
for op in twin_ops:
print(op)
# Loop through twin operators, calculating cc between two datasets
cc = test_miller_set.correlation(
reference_miller_set, assert_is_similar_symmetry=False
)
correlations.append(cc.coefficient())
for op in twin_ops:
reindexed = test_miller_set.change_basis(op)
cc = reindexed.correlation(
reference_miller_set, assert_is_similar_symmetry=False
)
correlations.append(cc.coefficient())
# print out table of results and choose best
header = ["Reindex op", "CC to reference"]
rows = [["a, b, c (no reindex)", f"{correlations[0]:.5f}"]]
for i, op in enumerate(twin_ops):
rows.append([str(op), f"{correlations[i + 1]:.5f}"])
print(dials.util.tabulate(rows, header))
best_solution_idx = correlations.index(max(correlations))
print("\nOutcome of analysis against reference dataset:")
if best_solution_idx == 0:
print("No reindexing required \n")
change_of_basis_op = sgtbx.change_of_basis_op("a,b,c")
else:
print(
"Reindexing required with the twin operator:",
twin_ops[best_solution_idx - 1].as_hkl(),
"\n",
)
change_of_basis_op = twin_ops[best_solution_idx - 1]
else:
print("No twin operators found, no reindexing required \n")
change_of_basis_op = sgtbx.change_of_basis_op("a,b,c")
return change_of_basis_op
|
from venus.stock_base import StockEventBase
from dev_global.env import GLOBAL_HEADER
class strategyBase(StockEventBase):
def __init__(self):
super(strategyBase, self).__init__(GLOBAL_HEADER)
def _get_data(self):
pass
def _settle(self):
pass
class new_cta(strategyBase):
def _get_data(self):
stock_list = self.get_all_stock_list()
for stock in stock_list:
self.condition_1(stock)
def condition_1(self, stock_code):
df = event.mysql.select_values(stock_code, 'close_price')
df.columns = ['close']
# print(df.head(10))
df['MA5'] = df['close'].rolling(5).mean()
df['MA10'] = df['close'].rolling(10).mean()
df['MA20'] = df['close'].rolling(20).mean()
# df[-30:].plot()
# plt.show()
# print(df.iloc[-1:])
# if df['MA20'].values[-1] < df['close'].values[-1] < 1.05 * df['MA20'].values[-1]:
if df['MA5'].values[-1] > df['MA10'].values[-1] > df['MA20'].values[-1]:
print(stock_code)
result.append(stock_code)
|
import stomp
conn = stomp.Connection()
conn.start()
conn.connect()
conn.send(body='Example Message',
destination='/exchange/stomp-routing/example',
headers={'reply-to': 'my-reply-queue'})
conn.disconnect()
|
"""Protobuf message utilities.
The Serializer classes are adapters to standardize the reading and writing of
different protobuf message serialization formats to and from a message.
The base MessageHandler class encapsulates the functionality of reading
a file containing serialized data into a protobuf message instance, and
writing serialized data from a message instance out to a file.
"""
from __future__ import print_function
import os
import sys
from google.protobuf import json_format
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
FORMAT_BINARY = 1
FORMAT_JSON = 2
VALID_FORMATS = (FORMAT_BINARY, FORMAT_JSON)
class Error(Exception):
"""Base error class for the module."""
class InvalidHandlerError(Error):
"""Raised when a message handler has no input/output argument when needed."""
class InvalidInputFileError(Error):
"""Raised when the input file cannot be read."""
class InvalidInputFormatError(Error):
"""Raised when the passed input protobuf can't be parsed."""
class InvalidOutputFileError(Error):
"""Raised when the output file cannot be written."""
class UnknownHandlerError(Error):
"""Raised when a valid type has not been implemented yet.
This should only ever be raised when under active development.
See: get_message_handler.
"""
def get_message_handler(path, msg_format):
"""Get a message handler to handle the given message format."""
assert msg_format in VALID_FORMATS
if msg_format == FORMAT_BINARY:
return MessageHandler(
path=path,
serializer=BinarySerializer(),
binary=True,
input_arg='--input-binary',
output_arg='--output-binary',
config_arg='--config-binary')
elif msg_format == FORMAT_JSON:
return MessageHandler(
path=path,
serializer=JsonSerializer(),
binary=False,
input_arg='--input-json',
output_arg='--output-json',
config_arg='--config-json')
else:
# Unexpected. Your new format type needs a case in this function if
# you got this error.
raise UnknownHandlerError('Unknown format type.')
class Serializer(object):
"""Base (and null) serializer class."""
def deserialize(self, data, message):
"""Deserialize the data into the given message.
Args:
data (str): The message data to deserialize.
message (google.protobuf.Message): The message to load the data into.
"""
# pylint: disable=unused-argument
def serialize(self, message):
"""Serialize the message data.
Args:
message (google.protobuf.Message): The message to be serialized.
Returns:
str: The message's serialized data.
"""
return ''
class BinarySerializer(Serializer):
"""Protobuf binary serializer class."""
def deserialize(self, data, message):
"""Deserialize the data into the given message.
See: Serializer.deserialize
"""
message.ParseFromString(data)
def serialize(self, message):
"""Serialize the message data.
See: Serializer.serialize
"""
return message.SerializeToString()
class JsonSerializer(Serializer):
"""Protobuf json serializer class."""
def deserialize(self, data, message):
"""Deserialize the data into the given message.
See: Serializer.deserialize
"""
try:
json_format.Parse(data, message, ignore_unknown_fields=True)
except json_format.ParseError as e:
raise InvalidInputFormatError('Unable to parse the input json: %s' % e)
def serialize(self, message):
"""Serialize the message data.
See: Serializer.serialize
"""
return json_format.MessageToJson(
message, sort_keys=True, use_integers_for_enums=True) or '{}'
class MessageHandler(object):
"""Class to handle message (de)serialization to and from files.
The class is fairly tightly coupled to the build api, but we currently have
no other projected use cases for this, so it's handy. In particular, if we
scrap the "maintain the same input/output/config serialization when reexecing
inside the chroot" convention, this implementation is much less useful and
can be fairly trivially generalized.
The instance's path is the primary path the message handler was built for.
For the Build API, this means one of the input/output/config arguments. In
practice, it's largely a convenience/shortcut so we don't have to either
track which input files are what types (which we know from the argument used
to pass them in), or create another containing data class for the
functionality provided by the handler and serializer classes and the build
api data.
Examples:
message_handler = MessageHandler(path, ...)
message = ...
# Parse path into message.
message_handler.read_into(message)
# Write message to a different file.
message_handler.write_into(message, path=other_path)
"""
def __init__(self, path, serializer, binary, input_arg, output_arg,
config_arg):
"""MessageHandler init.
Args:
path (str): The path to the main file associated with this handler.
serializer (Serializer): The serializer to be used for the messages.
binary (bool): Whether the serialized content is binary.
input_arg (str): The --input-x argument used for this type. Used for
reexecution inside the chroot.
output_arg (str): The --output-x argument used for this type. Used for
reexecution inside the chroot.
config_arg (str): The --config-x argument used for this type. Used for
reexecution inside the chroot.
"""
self.path = path
self.serializer = serializer
self.read_mode = 'rb' if binary else 'r'
self.write_mode = 'wb' if binary else 'w'
self.input_arg = input_arg
self.output_arg = output_arg
self.config_arg = config_arg
def read_into(self, message, path=None):
"""Read a file containing serialized data into a message.
Args:
message (google.protobuf.Message): The message to populate.
path (str|None): A path to read. Uses the instance's path when not given.
Raises:
InvalidInputFileError: When a path has not been given, does not exist,
or cannot be read.
"""
if not path and not self.path:
raise InvalidInputFileError('No input file has been specified.')
if not os.path.exists(path or self.path):
raise InvalidInputFileError('The input file does not exist.')
try:
content = osutils.ReadFile(path or self.path, mode=self.read_mode)
except IOError as e:
raise InvalidInputFileError('Unable to read input file: %s' % e)
self.serializer.deserialize(content, message)
def write_from(self, message, path=None):
"""Write serialized data from the message to a file.
Args:
message (google.protobuf.Message): The message to serialize and persist.
path (str|None): An optional override of the instance's path.
Raises:
InvalidOutputFileError: When no path given, or it cannot be written to.
"""
if not path and not self.path:
raise InvalidOutputFileError('No output file has been specified.')
try:
osutils.WriteFile(
path or self.path,
self.serializer.serialize(message),
mode=self.write_mode)
except IOError as e:
raise InvalidOutputFileError('Cannot write output file: %s' % e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.