code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-18 16:18
from __future__ import unicode_literals
from django.db import migrations
def cancel_sessions(apps, schema_editor):
Session = apps.get_model('meeting', 'Session')
SchedulingEvent = apps.get_model('meeting', 'SchedulingEvent')
SessionStatusName = apps.get_model('name', 'SessionStatusName')
Person = apps.get_model('person', 'Person')
excludes = ['txauth','dispatch','add','raw','masque','wpack','drip','gendispatch','privacypass', 'ript', 'secdispatch', 'webtrans']
canceled = SessionStatusName.objects.get(slug='canceled')
person = Person.objects.get(name='<NAME>')
sessions = Session.objects.filter(meeting__number=107,group__type__in=['wg','rg','ag']).exclude(group__acronym__in=excludes)
for session in sessions:
SchedulingEvent.objects.create(
session = session,
status = canceled,
by = person)
def reverse(apps, schema_editor):
SchedulingEvent = apps.get_model('meeting', 'SchedulingEvent')
Person = apps.get_model('person', 'Person')
person = Person.objects.get(name='<NAME>')
SchedulingEvent.objects.filter(session__meeting__number=107, by=person).delete()
class Migration(migrations.Migration):
dependencies = [
('meeting', '0025_rename_type_session_to_regular'),
]
operations = [
migrations.RunPython(cancel_sessions, reverse),
]
|
[
"django.db.migrations.RunPython"
] |
[((1449, 1495), 'django.db.migrations.RunPython', 'migrations.RunPython', (['cancel_sessions', 'reverse'], {}), '(cancel_sessions, reverse)\n', (1469, 1495), False, 'from django.db import migrations\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""This config file is for running experiments needed for the EMNLP camera ready.
It will generate the following experiments (depending on the value of eval_split and model):
- 100 dev examples
- GPT-3 Constrained Canonical, n = 1000
- GPT-3 Constrained Canonical, n = 100
- GPT-3 Constrained Canonical, n = 25
- GPT-3 Constrained Canonical, n = 200
- GPT-3 Constrained Meaning, n = 200
- GPT-3 Unconstrained Canonical, n = 200
- GPT-3 Unconstrained Meaning, n = 200
- All dev examples
- GPT-3 Constrained Meaning, n = 200
- BART Constrained Canonical
- BART Constrained Meaning
- BART Unconstrained Canonical
- BART Unconstrained Meaning
- GPT-2 Constrained Canonical
- GPT-2 Constrained Meaning
- GPT-2 Unconstrained Canonical
- GPT-2 Unconstrained Meaning
"""
from typing import Any, Callable, Dict
import torch
from typing_extensions import Literal
from semantic_parsing_with_constrained_lm.configs.lib.common import PromptOrder, make_semantic_parser
from semantic_parsing_with_constrained_lm.datum import Datum
from semantic_parsing_with_constrained_lm.domains.qdmr_break import (
BreakDataType,
BreakDatum,
BreakMetrics,
BreakPieces,
BreakSamplingType,
)
from semantic_parsing_with_constrained_lm.fit_max_steps import compute_and_print_fit
from semantic_parsing_with_constrained_lm.lm import TRAINED_MODEL_DIR, AutoregressiveModel, ClientType
from semantic_parsing_with_constrained_lm.lm_bart import Seq2SeqBart
from semantic_parsing_with_constrained_lm.lm_openai_gpt3 import IncrementalOpenAIGPT3
from semantic_parsing_with_constrained_lm.run_exp import EvalSplit, Experiment
from semantic_parsing_with_constrained_lm.search import PartialParse, StartsWithSpacePartialParse
def build_config(
log_dir, # pylint: disable=unused-argument
eval_split: EvalSplit,
model: ClientType,
rank: int,
**kwargs: Any, # pylint: disable=unused-argument
) -> Dict[str, Callable[[], Experiment]]:
BEAM_SIZE = 10
DEV_SUBSET_SIZE = 100
MAX_STEPS_FOR_COMPLETION = 145
use_gpt3 = model == ClientType.GPT3
def create_exp(
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: BreakDataType,
train_size: int,
exp_name: str,
):
lm: AutoregressiveModel
if model == ClientType.GPT3:
lm = IncrementalOpenAIGPT3()
elif model == ClientType.BART:
lm = Seq2SeqBart(
# Part after / is set to match lm_finetune.py
f"{TRAINED_MODEL_DIR}/20000/break_{output_type}/",
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
)
else:
raise ValueError(model)
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=train_size,
test_total=DEV_SUBSET_SIZE,
seed=0,
)
train_data = piece.train_data
test_data = piece.test_data
if eval_split == EvalSplit.TrainSubset:
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=100000,
test_total=1,
seed=0,
)
test_data = piece.train_data[-100:]
elif eval_split == EvalSplit.DevFull:
piece = BreakPieces.build(
tokenizer=lm.tokenizer,
data_type=output_type,
train_sampling_type=BreakSamplingType.proportional,
test_sampling_type=BreakSamplingType.random,
train_total=train_size,
test_total=1000000,
seed=0,
skip_if_needed=False,
)
test_data = piece.test_data
elif eval_split == EvalSplit.DevSubset:
# train_data and test_data were already set outside of this if block
pass
else:
raise ValueError(f"{eval_split} not supported currently")
partial_parse_builder: Callable[[BreakDatum], PartialParse]
if problem_type == "constrained":
partial_parse_builder = piece.partial_parse_builder # type: ignore
beam_size = BEAM_SIZE
elif problem_type.startswith("unconstrained"):
# TODO: Only impose this if we are using a GPT-2-style tokenizer
partial_parse = StartsWithSpacePartialParse(lm.tokenizer)
partial_parse_builder = lambda _: partial_parse
if problem_type == "unconstrained-beam":
beam_size = BEAM_SIZE
elif problem_type == "unconstrained-greedy":
beam_size = 1
else:
raise ValueError(problem_type)
else:
raise ValueError(f"{problem_type} not allowed")
# Compute max_steps_fn
pairs = []
for d in train_data:
num_input_tokens = len(lm.tokenizer.tokenize(d.natural))
num_output_tokens = len(lm.tokenizer.tokenize(d.canonical)) + 1
pairs.append((num_input_tokens, num_output_tokens))
max_steps_intercept, max_steps_slope = compute_and_print_fit(pairs, 10, 3)
def max_steps_fn(datum: Datum) -> int:
return min(
int(
len(lm.tokenizer.tokenize(datum.natural)) * max_steps_slope
+ max_steps_intercept
),
MAX_STEPS_FOR_COMPLETION,
)
parser = make_semantic_parser(
train_data,
lm,
use_gpt3,
MAX_STEPS_FOR_COMPLETION,
beam_size,
partial_parse_builder,
max_steps_fn,
PromptOrder.BestLast,
)
return Experiment( # type: ignore
model=parser,
metrics={
"break_metrics": BreakMetrics(
log_dir=log_dir / exp_name / str(rank),
data_type=piece.data_type,
num_results=BEAM_SIZE,
),
},
test_data=test_data,
client=lm,
)
def add_exp_to_dict(
exps_dict: Dict[str, Callable[[], Experiment]],
problem_type: Literal[
"constrained", "unconstrained-beam", "unconstrained-greedy"
],
output_type: BreakDataType,
train_size: int,
):
exp_name = (
f"break_{model}_{eval_split}_{problem_type}_{output_type}_train{train_size}"
)
exps_dict[exp_name] = lambda: create_exp(
problem_type, output_type, train_size, exp_name
)
result: Dict[str, Callable[[], Experiment]] = {}
if eval_split == EvalSplit.DevFull:
if use_gpt3:
# - GPT-3 Constrained Meaning, n = 200
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
else:
# - BART Constrained Canonical
# - BART Constrained Meaning
# - BART Unconstrained Canonical
# - BART Unconstrained Meaning
# - GPT-2 Constrained Canonical
# - GPT-2 Constrained Meaning
# - GPT-2 Unconstrained Canonical
# - GPT-2 Unconstrained Meaning
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
elif eval_split == EvalSplit.DevSubset:
if use_gpt3:
# - GPT-3 Constrained Canonical, n = 1000
# - GPT-3 Constrained Canonical, n = 100
# - GPT-3 Constrained Canonical, n = 25
add_exp_to_dict(
result, "constrained", BreakDataType.nested, train_size=1000
)
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=100)
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=25)
# - GPT-3 Constrained Canonical, n = 200
# - GPT-3 Constrained Meaning, n = 200
# - GPT-3 Unconstrained Canonical, n = 200
# - GPT-3 Unconstrained Meaning, n = 200
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
else:
# No subset experiments for BART and GPT-2
pass
elif eval_split == EvalSplit.TrainSubset:
add_exp_to_dict(result, "constrained", BreakDataType.nested, train_size=200)
add_exp_to_dict(result, "constrained", BreakDataType.qdmr, train_size=200)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.nested, train_size=200
)
add_exp_to_dict(
result, "unconstrained-greedy", BreakDataType.qdmr, train_size=200
)
return result
|
[
"semantic_parsing_with_constrained_lm.lm_openai_gpt3.IncrementalOpenAIGPT3",
"semantic_parsing_with_constrained_lm.configs.lib.common.make_semantic_parser",
"semantic_parsing_with_constrained_lm.search.StartsWithSpacePartialParse",
"semantic_parsing_with_constrained_lm.fit_max_steps.compute_and_print_fit",
"torch.cuda.is_available",
"semantic_parsing_with_constrained_lm.domains.qdmr_break.BreakPieces.build"
] |
[((2847, 3082), 'semantic_parsing_with_constrained_lm.domains.qdmr_break.BreakPieces.build', 'BreakPieces.build', ([], {'tokenizer': 'lm.tokenizer', 'data_type': 'output_type', 'train_sampling_type': 'BreakSamplingType.proportional', 'test_sampling_type': 'BreakSamplingType.random', 'train_total': 'train_size', 'test_total': 'DEV_SUBSET_SIZE', 'seed': '(0)'}), '(tokenizer=lm.tokenizer, data_type=output_type,\n train_sampling_type=BreakSamplingType.proportional, test_sampling_type=\n BreakSamplingType.random, train_total=train_size, test_total=\n DEV_SUBSET_SIZE, seed=0)\n', (2864, 3082), False, 'from semantic_parsing_with_constrained_lm.domains.qdmr_break import BreakDataType, BreakDatum, BreakMetrics, BreakPieces, BreakSamplingType\n'), ((5541, 5576), 'semantic_parsing_with_constrained_lm.fit_max_steps.compute_and_print_fit', 'compute_and_print_fit', (['pairs', '(10)', '(3)'], {}), '(pairs, 10, 3)\n', (5562, 5576), False, 'from semantic_parsing_with_constrained_lm.fit_max_steps import compute_and_print_fit\n'), ((5885, 6031), 'semantic_parsing_with_constrained_lm.configs.lib.common.make_semantic_parser', 'make_semantic_parser', (['train_data', 'lm', 'use_gpt3', 'MAX_STEPS_FOR_COMPLETION', 'beam_size', 'partial_parse_builder', 'max_steps_fn', 'PromptOrder.BestLast'], {}), '(train_data, lm, use_gpt3, MAX_STEPS_FOR_COMPLETION,\n beam_size, partial_parse_builder, max_steps_fn, PromptOrder.BestLast)\n', (5905, 6031), False, 'from semantic_parsing_with_constrained_lm.configs.lib.common import PromptOrder, make_semantic_parser\n'), ((2457, 2480), 'semantic_parsing_with_constrained_lm.lm_openai_gpt3.IncrementalOpenAIGPT3', 'IncrementalOpenAIGPT3', ([], {}), '()\n', (2478, 2480), False, 'from semantic_parsing_with_constrained_lm.lm_openai_gpt3 import IncrementalOpenAIGPT3\n'), ((3307, 3519), 'semantic_parsing_with_constrained_lm.domains.qdmr_break.BreakPieces.build', 'BreakPieces.build', ([], {'tokenizer': 'lm.tokenizer', 'data_type': 'output_type', 'train_sampling_type': 'BreakSamplingType.proportional', 'test_sampling_type': 'BreakSamplingType.random', 'train_total': '(100000)', 'test_total': '(1)', 'seed': '(0)'}), '(tokenizer=lm.tokenizer, data_type=output_type,\n train_sampling_type=BreakSamplingType.proportional, test_sampling_type=\n BreakSamplingType.random, train_total=100000, test_total=1, seed=0)\n', (3324, 3519), False, 'from semantic_parsing_with_constrained_lm.domains.qdmr_break import BreakDataType, BreakDatum, BreakMetrics, BreakPieces, BreakSamplingType\n'), ((3752, 4000), 'semantic_parsing_with_constrained_lm.domains.qdmr_break.BreakPieces.build', 'BreakPieces.build', ([], {'tokenizer': 'lm.tokenizer', 'data_type': 'output_type', 'train_sampling_type': 'BreakSamplingType.proportional', 'test_sampling_type': 'BreakSamplingType.random', 'train_total': 'train_size', 'test_total': '(1000000)', 'seed': '(0)', 'skip_if_needed': '(False)'}), '(tokenizer=lm.tokenizer, data_type=output_type,\n train_sampling_type=BreakSamplingType.proportional, test_sampling_type=\n BreakSamplingType.random, train_total=train_size, test_total=1000000,\n seed=0, skip_if_needed=False)\n', (3769, 4000), False, 'from semantic_parsing_with_constrained_lm.domains.qdmr_break import BreakDataType, BreakDatum, BreakMetrics, BreakPieces, BreakSamplingType\n'), ((4786, 4827), 'semantic_parsing_with_constrained_lm.search.StartsWithSpacePartialParse', 'StartsWithSpacePartialParse', (['lm.tokenizer'], {}), '(lm.tokenizer)\n', (4813, 4827), False, 'from semantic_parsing_with_constrained_lm.search import PartialParse, StartsWithSpacePartialParse\n'), ((2727, 2752), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2750, 2752), False, 'import torch\n')]
|
# ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
"""
This module contains the declarations for the modeling components
typically used for buses (including loads and shunts)
"""
import pyomo.environ as pe
import egret.model_library.decl as decl
from pyomo.core.util import quicksum
from pyomo.core.expr.numeric_expr import LinearExpression
from egret.model_library.defn import FlowType, CoordinateType, ApproximationType
from math import tan, radians
def declare_var_vr(model, index_set, **kwargs):
"""
Create variable for the real component of the voltage at a bus
"""
decl.declare_var('vr', model=model, index_set=index_set, **kwargs)
def declare_var_vj(model, index_set, **kwargs):
"""
Create variable for the imaginary component of the voltage at a bus
"""
decl.declare_var('vj', model=model, index_set=index_set, **kwargs)
def declare_var_vm(model, index_set, **kwargs):
"""
Create variable for the voltage magnitude of the voltage at a bus
"""
decl.declare_var('vm', model=model, index_set=index_set, **kwargs)
def declare_var_va(model, index_set, **kwargs):
"""
Create variable for the phase angle of the voltage at a bus
"""
decl.declare_var('va', model=model, index_set=index_set, **kwargs)
def declare_expr_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create an expression for the voltage magnitude squared at a bus
"""
m = model
expr_set = decl.declare_set('_expr_vmsq', model, index_set)
m.vmsq = pe.Expression(expr_set)
if coordinate_type == CoordinateType.RECTANGULAR:
for bus in expr_set:
m.vmsq[bus] = m.vr[bus] ** 2 + m.vj[bus] ** 2
elif coordinate_type == CoordinateType.POLAR:
for bus in expr_set:
m.vmsq[bus] = m.vm[bus] ** 2
def declare_var_vmsq(model, index_set, **kwargs):
"""
Create auxiliary variable for the voltage magnitude squared at a bus
"""
decl.declare_var('vmsq', model=model, index_set=index_set, **kwargs)
def declare_eq_vmsq(model, index_set, coordinate_type=CoordinateType.POLAR):
"""
Create a constraint relating vmsq to the voltages
"""
m = model
con_set = decl.declare_set('_con_eq_vmsq', model, index_set)
m.eq_vmsq = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vm[bus] ** 2
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus in con_set:
m.eq_vmsq[bus] = m.vmsq[bus] == m.vr[bus]**2 + m.vj[bus]**2
else:
raise ValueError('unexpected coordinate_type: {0}'.format(str(coordinate_type)))
def declare_var_ir_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated real current at a bus
"""
decl.declare_var('ir_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_ij_aggregation_at_bus(model, index_set, **kwargs):
"""
Create a variable for the aggregated imaginary current at a bus
"""
decl.declare_var('ij_aggregation_at_bus', model=model, index_set=index_set, **kwargs)
def declare_var_pl(model, index_set, **kwargs):
"""
Create variable for the real power load at a bus
"""
decl.declare_var('pl', model=model, index_set=index_set, **kwargs)
def declare_var_ql(model, index_set, **kwargs):
"""
Create variable for the reactive power load at a bus
"""
decl.declare_var('ql', model=model, index_set=index_set, **kwargs)
def declare_var_p_nw(model, index_set, **kwargs):
"""
Create variable for the net real power withdrawals at a bus
"""
decl.declare_var('p_nw', model=model, index_set=index_set, **kwargs)
def declare_var_q_nw(model, index_set, **kwargs):
"""
Create variable for the net reactive power withdrawals at a bus
"""
decl.declare_var('q_nw', model=model, index_set=index_set, **kwargs)
def declare_expr_shunt_power_at_bus(model, index_set, shunt_attrs,
coordinate_type=CoordinateType.POLAR):
"""
Create the expression for the shunt power at the bus
"""
m = model
expr_set = decl.declare_set('_expr_shunt_at_bus_set', model, index_set)
m.shunt_p = pe.Expression(expr_set, initialize=0.0)
m.shunt_q = pe.Expression(expr_set, initialize=0.0)
if coordinate_type == CoordinateType.POLAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vm[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in expr_set:
if bus_name in shunt_attrs['bus']:
vmsq = m.vr[bus_name]**2 + m.vj[bus_name]**2
m.shunt_p[bus_name] = shunt_attrs['gs'][bus_name]*vmsq
m.shunt_q[bus_name] = -shunt_attrs['bs'][bus_name]*vmsq
def _get_dc_dicts(dc_inlet_branches_by_bus, dc_outlet_branches_by_bus, con_set):
if dc_inlet_branches_by_bus is None:
assert dc_outlet_branches_by_bus is None
dc_inlet_branches_by_bus = {bn:() for bn in con_set}
if dc_outlet_branches_by_bus is None:
dc_outlet_branches_by_bus = dc_inlet_branches_by_bus
return dc_inlet_branches_by_bus, dc_outlet_branches_by_bus
def declare_expr_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('p_nw', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
for b in index_set:
m.p_nw[b] = ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_eq_p_net_withdraw_at_bus(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts,
dc_inlet_branches_by_bus=None, dc_outlet_branches_by_bus=None,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_p_net_withdraw_at_bus', model, index_set)
dc_inlet_branches_by_bus, dc_outlet_branches_by_bus = _get_dc_dicts(dc_inlet_branches_by_bus,
dc_outlet_branches_by_bus,
index_set)
m.eq_p_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_p_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.pl[b] if bus_p_loads[b] != 0.0 else 0.0)
- sum(m.pg[g] for g in gens_by_bus[b])
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
return
else:
for b in index_set:
constr[b] = m.p_nw[b] == ( bus_gs_fixed_shunts[b]
+ ( m.pl[b] if bus_p_loads[b] != 0.0 else 0.0 )
- sum( m.pg[g] for g in gens_by_bus[b] )
+ sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[b])
- sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[b])
)
def declare_expr_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo expression for bus net withdraw
"""
m = model
decl.declare_expr('q_nw', model, index_set)
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
m.q_nw[b] = (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_q_net_withdraw_at_bus(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts,
vm_by_bus=None, **kwargs):
"""
Create a named pyomo constraint for bus net withdraw
"""
m = model
con_set = decl.declare_set('_con_eq_q_net_withdraw_at_bus', model, index_set)
m.eq_q_net_withdraw_at_bus = pe.Constraint(con_set)
constr = m.eq_q_net_withdraw_at_bus
if kwargs and vm_by_bus is not None:
for idx,val in kwargs.items():
if idx=='linearize_shunts' and val==True:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2)
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
if idx=='linearize_shunts' and val==False:
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2
+ (m.ql[b] if bus_q_loads[b] != 0.0 else 0.0)
- sum(m.qg[g] for g in gens_by_bus[b])
)
return
for b in index_set:
constr[b] = m.q_nw[b] == (-bus_bs_fixed_shunts[b]
+ ( m.ql[b] if bus_q_loads[b] != 0.0 else 0.0 )
- sum( m.qg[g] for g in gens_by_bus[b] )
)
def declare_eq_ref_bus_nonzero(model, ref_angle, ref_bus):
"""
Create an equality constraint to enforce tan(\theta) = vj/vr at the reference bus
"""
m = model
m.eq_ref_bus_nonzero = pe.Constraint(expr = tan(radians(ref_angle)) * m.vr[ref_bus] == m.vj[ref_bus])
def declare_eq_i_aggregation_at_bus(model, index_set,
bus_bs_fixed_shunts, bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus):
"""
Create the equality constraints for the aggregated real and imaginary
currents at the bus
"""
m = model
con_set = decl.declare_set('_con_eq_i_aggregation_at_bus_set', model, index_set)
m.eq_ir_aggregation_at_bus = pe.Constraint(con_set)
m.eq_ij_aggregation_at_bus = pe.Constraint(con_set)
for bus_name in con_set:
ir_expr = sum([m.ifr[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ir_expr += sum([m.itr[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
ij_expr = sum([m.ifj[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
ij_expr += sum([m.itj[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
ir_expr -= bus_bs_fixed_shunts[bus_name] * m.vj[bus_name]
ij_expr += bus_bs_fixed_shunts[bus_name] * m.vr[bus_name]
if bus_gs_fixed_shunts[bus_name] != 0.0:
ir_expr += bus_gs_fixed_shunts[bus_name] * m.vr[bus_name]
ij_expr += bus_gs_fixed_shunts[bus_name] * m.vj[bus_name]
ir_expr -= m.ir_aggregation_at_bus[bus_name]
ij_expr -= m.ij_aggregation_at_bus[bus_name]
m.eq_ir_aggregation_at_bus[bus_name] = ir_expr == 0
m.eq_ij_aggregation_at_bus[bus_name] = ij_expr == 0
def declare_eq_p_balance_ed(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
p_expr -= sum(bus_gs_fixed_shunts[bus_name] for bus_name in index_set if bus_gs_fixed_shunts[bus_name] != 0.0)
relaxed_balance = False
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_losses':
p_expr -= sum(m.pfl[branch_name] for branch_name in val)
if idx == 'relax_balance':
relaxed_balance = True
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr=p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr=p_expr == 0.0)
def declare_eq_p_balance_lopf(model, index_set, bus_p_loads, gens_by_bus, bus_gs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
p_expr = sum(m.pg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
p_expr -= sum(m.pl[bus_name] for bus_name in index_set if bus_p_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating pfl constraints
if idx == 'include_system_losses':
p_expr -= m.ploss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
p_expr -= sum( bus_gs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
elif val == False:
p_expr -= sum( bus_gs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_gs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_p_balance = pe.Constraint(expr = p_expr >= 0.0)
else:
m.eq_p_balance = pe.Constraint(expr = p_expr == 0.0)
def declare_eq_q_balance_lopf(model, index_set, bus_q_loads, gens_by_bus, bus_bs_fixed_shunts, vm_by_bus, **rhs_kwargs):
"""
Create the equality constraints for the system-wide real power balance.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
q_expr = sum(m.qg[gen_name] for bus_name in index_set for gen_name in gens_by_bus[bus_name])
q_expr -= sum(m.ql[bus_name] for bus_name in index_set if bus_q_loads[bus_name] is not None)
relaxed_balance = False
if rhs_kwargs:
for idx,val in rhs_kwargs.items():
if idx == 'include_reactive_load_shed':
q_expr += eval("m." + val)
if idx == 'include_reactive_over_generation':
q_expr -= eval("m." + val)
if idx == 'include_branch_losses':
pass # branch losses are added to the constraint after updating qfl constraints
if idx == 'include_system_losses':
q_expr -= m.qloss
if idx == 'relax_balance':
relaxed_balance = True
if idx == 'linearize_shunts':
if val == True:
q_expr -= sum( bus_bs_fixed_shunts[b] * (2 * vm_by_bus[b] * m.vm[b] - vm_by_bus[b] ** 2) \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
elif val == False:
q_expr -= sum( bus_bs_fixed_shunts[b] * vm_by_bus[b] ** 2 \
for b in index_set if bus_bs_fixed_shunts[b] != 0.0)
else:
raise Exception('linearize_shunts option is invalid.')
if relaxed_balance:
m.eq_q_balance = pe.Constraint(expr = q_expr >= 0.0)
else:
m.eq_q_balance = pe.Constraint(expr = q_expr == 0.0)
def declare_eq_ploss_sum_of_pfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
expr = sum(m.pfl[bn] for bn in index_set)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def declare_eq_qloss_sum_of_qfl(model, index_set):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m=model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
expr = sum(m.qfl[bn] for bn in index_set)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def declare_eq_ploss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
ploss_is_var = isinstance(m.ploss, pe.Var)
if ploss_is_var:
m.eq_ploss = pe.Constraint()
else:
if not isinstance(m.ploss, pe.Expression):
raise Exception("Unrecognized type for m.ploss", m.ploss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if ploss_is_var:
m.eq_ploss = m.ploss == expr
else:
m.ploss = expr
def get_ploss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_lossoffset()
iterator = PTDF.get_lossfactor_iterator()
else:
const = PTDF.get_lossoffset_resid()
iterator = PTDF.get_lossfactor_resid_iterator()
max_coef = PTDF.get_lossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_p_nw = m.p_nw
## if model.p_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_p_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys_masked[i]
coef_list.append(1)
var_list.append(m.pfl[bn])
if isinstance(m_p_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_qloss_ptdf_approx(model, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None, use_residuals=False):
"""
Create the equality constraint or expression for total real power losses (from PTDF approximation)
"""
m = model
qloss_is_var = isinstance(m.qloss, pe.Var)
if qloss_is_var:
m.eq_qloss = pe.Constraint()
else:
if not isinstance(m.qloss, pe.Expression):
raise Exception("Unrecognized type for m.qloss", m.qloss.pprint())
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
expr = get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol, use_residuals=use_residuals)
if qloss_is_var:
m.eq_qloss = m.qloss == expr
else:
m.qloss = expr
def get_qloss_expr_ptdf_approx(m, PTDF, abs_ptdf_tol=None, rel_ptdf_tol=None, use_residuals=False):
if not use_residuals:
const = PTDF.get_qlossoffset()
iterator = PTDF.get_qlossfactor_iterator()
else:
const = PTDF.get_qlossoffset_resid()
iterator = PTDF.get_qlossfactor_resid_iterator()
max_coef = PTDF.get_qlossfactor_abs_max()
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
m_q_nw = m.q_nw
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bus_name, coef in iterator:
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bus_name])
if use_residuals:
for i in m._idx_monitored:
bn = PTDF.branches_keys[i]
coef_list.append(1)
var_list.append(m.qfl[bn])
if isinstance(m_q_nw, pe.Var):
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_bus_vm_approx(model, index_set, PTDF=None, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create the equality constraints or expressions for voltage magnitude (from PTDF
approximation) at the bus
"""
m = model
con_set = decl.declare_set("_con_eq_bus_vm_approx_set", model, index_set)
vm_is_var = isinstance(m.vm, pe.Var)
if vm_is_var:
m.eq_vm_bus = pe.Constraint(con_set)
else:
if not isinstance(m.vm, pe.Expression):
raise Exception("Unrecognized type for m.vm", m.vm.pprint())
if PTDF is None:
return
for bus_name in con_set:
expr = \
get_vm_expr_ptdf_approx(m, bus_name, PTDF, rel_ptdf_tol=rel_ptdf_tol, abs_ptdf_tol=abs_ptdf_tol)
if vm_is_var:
m.eq_vm_bus[bus_name] = \
m.vm[bus_name] == expr
else:
m.vm[bus_name] = expr
def get_vm_expr_ptdf_approx(model, bus_name, PTDF, rel_ptdf_tol=None, abs_ptdf_tol=None):
"""
Create a pyomo reactive power flow expression from PTDF matrix
"""
if rel_ptdf_tol is None:
rel_ptdf_tol = 0.
if abs_ptdf_tol is None:
abs_ptdf_tol = 0.
const = PTDF.get_bus_vdf_const(bus_name)
max_coef = PTDF.get_bus_vdf_abs_max(bus_name)
ptdf_tol = max(abs_ptdf_tol, rel_ptdf_tol*max_coef)
## NOTE: It would be easy to hold on to the 'ptdf' dictionary here, if we wanted to
m_q_nw = model.q_nw
qnw_is_var = isinstance(m_q_nw, pe.Var)
## if model.q_nw is Var, we can use LinearExpression
## to build these dense constraints much faster
coef_list = []
var_list = []
for bn, coef in PTDF.get_bus_vdf_iterator(bus_name):
if abs(coef) >= ptdf_tol:
coef_list.append(coef)
var_list.append(m_q_nw[bn])
elif qnw_is_var:
const += coef * m_q_nw[bn].value
else:
const += coef * m_q_nw[bn].expr()
if qnw_is_var:
expr = LinearExpression(linear_vars=var_list, linear_coefs=coef_list, constant=const)
else:
expr = quicksum( (coef*var for coef, var in zip(coef_list, var_list)), start=const, linear=True)
return expr
def declare_eq_p_balance_dc_approx(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
approximation_type=ApproximationType.BTHETA,
dc_inlet_branches_by_bus=None,
dc_outlet_branches_by_bus=None,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
if approximation_type == ApproximationType.BTHETA:
p_expr = -sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
elif approximation_type == ApproximationType.BTHETA_LOSSES:
p_expr = -0.5*sum(m.pfl[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
p_expr -= 0.5*sum(m.pfl[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr -= sum(m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name])
p_expr += sum(m.pf[branch_name] for branch_name in inlet_branches_by_bus[bus_name])
if dc_inlet_branches_by_bus is not None:
p_expr -= sum(m.dcpf[branch_name] for branch_name in dc_outlet_branches_by_bus[bus_name])
p_expr += sum(m.dcpf[branch_name] for branch_name in dc_inlet_branches_by_bus[bus_name])
if bus_gs_fixed_shunts[bus_name] != 0.0:
p_expr -= bus_gs_fixed_shunts[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
k = bus_name
for idx, val in rhs_kwargs.items():
if isinstance(val, tuple):
val,key = val
k = (key,bus_name)
if not k in eval("m." + val).index_set():
continue
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[k]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[k]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance(model, index_set,
bus_p_loads,
gens_by_bus,
bus_gs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -sum([m.pf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
p_expr -= sum([m.pt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_gs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
p_expr -= bus_gs_fixed_shunts[bus_name] * vmsq
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_p_balance_with_i_aggregation(model, index_set,
bus_p_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the real power balance
at a bus using the variables for real power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_p_balance', model, index_set)
m.eq_p_balance = pe.Constraint(con_set)
for bus_name in con_set:
p_expr = -m.vr[bus_name] * m.ir_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ij_aggregation_at_bus[bus_name]
if bus_p_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
p_expr -= m.pl[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
p_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
p_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
p_expr += m.pg[gen_name]
m.eq_p_balance[bus_name] = \
p_expr == 0.0
def declare_eq_q_balance(model, index_set,
bus_q_loads,
gens_by_bus,
bus_bs_fixed_shunts,
inlet_branches_by_bus, outlet_branches_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = -sum([m.qf[branch_name] for branch_name in outlet_branches_by_bus[bus_name]])
q_expr -= sum([m.qt[branch_name] for branch_name in inlet_branches_by_bus[bus_name]])
if bus_bs_fixed_shunts[bus_name] != 0.0:
vmsq = m.vmsq[bus_name]
q_expr += bus_bs_fixed_shunts[bus_name] * vmsq
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_eq_q_balance_with_i_aggregation(model, index_set,
bus_q_loads,
gens_by_bus,
**rhs_kwargs):
"""
Create the equality constraints for the reactive power balance
at a bus using the variables for reactive power flows, respectively.
NOTE: Equation build orientates constants to the RHS in order to compute the correct dual variable sign
"""
m = model
con_set = decl.declare_set('_con_eq_q_balance', model, index_set)
m.eq_q_balance = pe.Constraint(con_set)
for bus_name in con_set:
q_expr = m.vr[bus_name] * m.ij_aggregation_at_bus[bus_name] + \
-m.vj[bus_name] * m.ir_aggregation_at_bus[bus_name]
if bus_q_loads[bus_name] != 0.0: # only applies to fixed loads, otherwise may cause an error
q_expr -= m.ql[bus_name]
if rhs_kwargs:
for idx, val in rhs_kwargs.items():
if idx == 'include_feasibility_load_shed':
q_expr += eval("m." + val)[bus_name]
if idx == 'include_feasibility_over_generation':
q_expr -= eval("m." + val)[bus_name]
for gen_name in gens_by_bus[bus_name]:
q_expr += m.qg[gen_name]
m.eq_q_balance[bus_name] = \
q_expr == 0.0
def declare_ineq_vm_bus_lbub(model, index_set, buses, coordinate_type=CoordinateType.POLAR):
"""
Create the inequalities for the voltage magnitudes from the
voltage variables
"""
m = model
con_set = decl.declare_set('_con_ineq_vm_bus_lbub',
model=model, index_set=index_set)
m.ineq_vm_bus_lb = pe.Constraint(con_set)
m.ineq_vm_bus_ub = pe.Constraint(con_set)
if coordinate_type == CoordinateType.POLAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min'] <= m.vm[bus_name]
m.ineq_vm_bus_ub[bus_name] = \
m.vm[bus_name] <= buses[bus_name]['v_max']
elif coordinate_type == CoordinateType.RECTANGULAR:
for bus_name in con_set:
m.ineq_vm_bus_lb[bus_name] = \
buses[bus_name]['v_min']**2 <= m.vr[bus_name]**2 + m.vj[bus_name]**2
m.ineq_vm_bus_ub[bus_name] = \
m.vr[bus_name]**2 + m.vj[bus_name]**2 <= buses[bus_name]['v_max']**2
|
[
"pyomo.core.expr.numeric_expr.LinearExpression",
"pyomo.environ.Expression",
"pyomo.environ.Constraint",
"math.radians",
"egret.model_library.decl.declare_expr",
"egret.model_library.decl.declare_set",
"egret.model_library.decl.declare_var"
] |
[((1025, 1091), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""vr"""'], {'model': 'model', 'index_set': 'index_set'}), "('vr', model=model, index_set=index_set, **kwargs)\n", (1041, 1091), True, 'import egret.model_library.decl as decl\n'), ((1234, 1300), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""vj"""'], {'model': 'model', 'index_set': 'index_set'}), "('vj', model=model, index_set=index_set, **kwargs)\n", (1250, 1300), True, 'import egret.model_library.decl as decl\n'), ((1441, 1507), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""vm"""'], {'model': 'model', 'index_set': 'index_set'}), "('vm', model=model, index_set=index_set, **kwargs)\n", (1457, 1507), True, 'import egret.model_library.decl as decl\n'), ((1642, 1708), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""va"""'], {'model': 'model', 'index_set': 'index_set'}), "('va', model=model, index_set=index_set, **kwargs)\n", (1658, 1708), True, 'import egret.model_library.decl as decl\n'), ((1903, 1951), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_expr_vmsq"""', 'model', 'index_set'], {}), "('_expr_vmsq', model, index_set)\n", (1919, 1951), True, 'import egret.model_library.decl as decl\n'), ((1965, 1988), 'pyomo.environ.Expression', 'pe.Expression', (['expr_set'], {}), '(expr_set)\n', (1978, 1988), True, 'import pyomo.environ as pe\n'), ((2396, 2464), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""vmsq"""'], {'model': 'model', 'index_set': 'index_set'}), "('vmsq', model=model, index_set=index_set, **kwargs)\n", (2412, 2464), True, 'import egret.model_library.decl as decl\n'), ((2642, 2692), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_vmsq"""', 'model', 'index_set'], {}), "('_con_eq_vmsq', model, index_set)\n", (2658, 2692), True, 'import egret.model_library.decl as decl\n'), ((2709, 2731), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (2722, 2731), True, 'import pyomo.environ as pe\n'), ((3275, 3364), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""ir_aggregation_at_bus"""'], {'model': 'model', 'index_set': 'index_set'}), "('ir_aggregation_at_bus', model=model, index_set=index_set,\n **kwargs)\n", (3291, 3364), True, 'import egret.model_library.decl as decl\n'), ((3518, 3607), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""ij_aggregation_at_bus"""'], {'model': 'model', 'index_set': 'index_set'}), "('ij_aggregation_at_bus', model=model, index_set=index_set,\n **kwargs)\n", (3534, 3607), True, 'import egret.model_library.decl as decl\n'), ((3727, 3793), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""pl"""'], {'model': 'model', 'index_set': 'index_set'}), "('pl', model=model, index_set=index_set, **kwargs)\n", (3743, 3793), True, 'import egret.model_library.decl as decl\n'), ((3921, 3987), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""ql"""'], {'model': 'model', 'index_set': 'index_set'}), "('ql', model=model, index_set=index_set, **kwargs)\n", (3937, 3987), True, 'import egret.model_library.decl as decl\n'), ((4123, 4191), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""p_nw"""'], {'model': 'model', 'index_set': 'index_set'}), "('p_nw', model=model, index_set=index_set, **kwargs)\n", (4139, 4191), True, 'import egret.model_library.decl as decl\n'), ((4331, 4399), 'egret.model_library.decl.declare_var', 'decl.declare_var', (['"""q_nw"""'], {'model': 'model', 'index_set': 'index_set'}), "('q_nw', model=model, index_set=index_set, **kwargs)\n", (4347, 4399), True, 'import egret.model_library.decl as decl\n'), ((4646, 4706), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_expr_shunt_at_bus_set"""', 'model', 'index_set'], {}), "('_expr_shunt_at_bus_set', model, index_set)\n", (4662, 4706), True, 'import egret.model_library.decl as decl\n'), ((4724, 4763), 'pyomo.environ.Expression', 'pe.Expression', (['expr_set'], {'initialize': '(0.0)'}), '(expr_set, initialize=0.0)\n', (4737, 4763), True, 'import pyomo.environ as pe\n'), ((4780, 4819), 'pyomo.environ.Expression', 'pe.Expression', (['expr_set'], {'initialize': '(0.0)'}), '(expr_set, initialize=0.0)\n', (4793, 4819), True, 'import pyomo.environ as pe\n'), ((6238, 6281), 'egret.model_library.decl.declare_expr', 'decl.declare_expr', (['"""p_nw"""', 'model', 'index_set'], {}), "('p_nw', model, index_set)\n", (6255, 6281), True, 'import egret.model_library.decl as decl\n'), ((8626, 8693), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_p_net_withdraw_at_bus"""', 'model', 'index_set'], {}), "('_con_eq_p_net_withdraw_at_bus', model, index_set)\n", (8642, 8693), True, 'import egret.model_library.decl as decl\n'), ((9009, 9031), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (9022, 9031), True, 'import pyomo.environ as pe\n'), ((11106, 11149), 'egret.model_library.decl.declare_expr', 'decl.declare_expr', (['"""q_nw"""', 'model', 'index_set'], {}), "('q_nw', model, index_set)\n", (11123, 11149), True, 'import egret.model_library.decl as decl\n'), ((12499, 12566), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_q_net_withdraw_at_bus"""', 'model', 'index_set'], {}), "('_con_eq_q_net_withdraw_at_bus', model, index_set)\n", (12515, 12566), True, 'import egret.model_library.decl as decl\n'), ((12601, 12623), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (12614, 12623), True, 'import pyomo.environ as pe\n'), ((14426, 14496), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_i_aggregation_at_bus_set"""', 'model', 'index_set'], {}), "('_con_eq_i_aggregation_at_bus_set', model, index_set)\n", (14442, 14496), True, 'import egret.model_library.decl as decl\n'), ((14531, 14553), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (14544, 14553), True, 'import pyomo.environ as pe\n'), ((14587, 14609), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (14600, 14609), True, 'import pyomo.environ as pe\n'), ((26022, 26085), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_bus_vm_approx_set"""', 'model', 'index_set'], {}), "('_con_eq_bus_vm_approx_set', model, index_set)\n", (26038, 26085), True, 'import egret.model_library.decl as decl\n'), ((28782, 28837), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_p_balance"""', 'model', 'index_set'], {}), "('_con_eq_p_balance', model, index_set)\n", (28798, 28837), True, 'import egret.model_library.decl as decl\n'), ((28860, 28882), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (28873, 28882), True, 'import pyomo.environ as pe\n'), ((31361, 31416), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_p_balance"""', 'model', 'index_set'], {}), "('_con_eq_p_balance', model, index_set)\n", (31377, 31416), True, 'import egret.model_library.decl as decl\n'), ((31439, 31461), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (31452, 31461), True, 'import pyomo.environ as pe\n'), ((32946, 33001), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_p_balance"""', 'model', 'index_set'], {}), "('_con_eq_p_balance', model, index_set)\n", (32962, 33001), True, 'import egret.model_library.decl as decl\n'), ((33024, 33046), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (33037, 33046), True, 'import pyomo.environ as pe\n'), ((34389, 34444), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_q_balance"""', 'model', 'index_set'], {}), "('_con_eq_q_balance', model, index_set)\n", (34405, 34444), True, 'import egret.model_library.decl as decl\n'), ((34467, 34489), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (34480, 34489), True, 'import pyomo.environ as pe\n'), ((35982, 36037), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_eq_q_balance"""', 'model', 'index_set'], {}), "('_con_eq_q_balance', model, index_set)\n", (35998, 36037), True, 'import egret.model_library.decl as decl\n'), ((36060, 36082), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (36073, 36082), True, 'import pyomo.environ as pe\n'), ((37077, 37152), 'egret.model_library.decl.declare_set', 'decl.declare_set', (['"""_con_ineq_vm_bus_lbub"""'], {'model': 'model', 'index_set': 'index_set'}), "('_con_ineq_vm_bus_lbub', model=model, index_set=index_set)\n", (37093, 37152), True, 'import egret.model_library.decl as decl\n'), ((37208, 37230), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (37221, 37230), True, 'import pyomo.environ as pe\n'), ((37254, 37276), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (37267, 37276), True, 'import pyomo.environ as pe\n'), ((16802, 16835), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(p_expr >= 0.0)'}), '(expr=p_expr >= 0.0)\n', (16815, 16835), True, 'import pyomo.environ as pe\n'), ((16871, 16904), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(p_expr == 0.0)'}), '(expr=p_expr == 0.0)\n', (16884, 16904), True, 'import pyomo.environ as pe\n'), ((18662, 18695), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(p_expr >= 0.0)'}), '(expr=p_expr >= 0.0)\n', (18675, 18695), True, 'import pyomo.environ as pe\n'), ((18733, 18766), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(p_expr == 0.0)'}), '(expr=p_expr == 0.0)\n', (18746, 18766), True, 'import pyomo.environ as pe\n'), ((20519, 20552), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(q_expr >= 0.0)'}), '(expr=q_expr >= 0.0)\n', (20532, 20552), True, 'import pyomo.environ as pe\n'), ((20590, 20623), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(q_expr == 0.0)'}), '(expr=q_expr == 0.0)\n', (20603, 20623), True, 'import pyomo.environ as pe\n'), ((20899, 20914), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {}), '()\n', (20912, 20914), True, 'import pyomo.environ as pe\n'), ((21467, 21482), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {}), '()\n', (21480, 21482), True, 'import pyomo.environ as pe\n'), ((22093, 22108), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {}), '()\n', (22106, 22108), True, 'import pyomo.environ as pe\n'), ((23553, 23631), 'pyomo.core.expr.numeric_expr.LinearExpression', 'LinearExpression', ([], {'linear_vars': 'var_list', 'linear_coefs': 'coef_list', 'constant': 'const'}), '(linear_vars=var_list, linear_coefs=coef_list, constant=const)\n', (23569, 23631), False, 'from pyomo.core.expr.numeric_expr import LinearExpression\n'), ((24095, 24110), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {}), '()\n', (24108, 24110), True, 'import pyomo.environ as pe\n'), ((25553, 25631), 'pyomo.core.expr.numeric_expr.LinearExpression', 'LinearExpression', ([], {'linear_vars': 'var_list', 'linear_coefs': 'coef_list', 'constant': 'const'}), '(linear_vars=var_list, linear_coefs=coef_list, constant=const)\n', (25569, 25631), False, 'from pyomo.core.expr.numeric_expr import LinearExpression\n'), ((26169, 26191), 'pyomo.environ.Constraint', 'pe.Constraint', (['con_set'], {}), '(con_set)\n', (26182, 26191), True, 'import pyomo.environ as pe\n'), ((27735, 27813), 'pyomo.core.expr.numeric_expr.LinearExpression', 'LinearExpression', ([], {'linear_vars': 'var_list', 'linear_coefs': 'coef_list', 'constant': 'const'}), '(linear_vars=var_list, linear_coefs=coef_list, constant=const)\n', (27751, 27813), False, 'from pyomo.core.expr.numeric_expr import LinearExpression\n'), ((14013, 14031), 'math.radians', 'radians', (['ref_angle'], {}), '(ref_angle)\n', (14020, 14031), False, 'from math import tan, radians\n')]
|
import os
from pymco.test import ctxt
from . import base
class RabbitMQTestCase(base.IntegrationTestCase):
'''RabbitMQ integration test case.'''
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': '61613',
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': '<PASSWORD>',
}
class TestWithRabbitMQMCo22x(base.MCollective22x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo23x(base.MCollective23x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo24x(base.MCollective24x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQSSLMCo23x(base.MCollective23x, RabbitMQTestCase):
"""MCollective integration test case."""
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': 61612,
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': '<PASSWORD>',
'plugin.rabbitmq.pool.1.ssl': 'true',
'plugin.rabbitmq.pool.1.ssl.ca': os.path.join(ctxt.ROOT,
'fixtures/ca.pem'),
'plugin.rabbitmq.pool.1.ssl.key': os.path.join(
ctxt.ROOT,
'fixtures/activemq_private.pem'),
'plugin.rabbitmq.pool.1.ssl.cert': os.path.join(
ctxt.ROOT,
'fixtures/activemq_cert.pem',
),
}
|
[
"os.path.join"
] |
[((1408, 1450), 'os.path.join', 'os.path.join', (['ctxt.ROOT', '"""fixtures/ca.pem"""'], {}), "(ctxt.ROOT, 'fixtures/ca.pem')\n", (1420, 1450), False, 'import os\n'), ((1549, 1605), 'os.path.join', 'os.path.join', (['ctxt.ROOT', '"""fixtures/activemq_private.pem"""'], {}), "(ctxt.ROOT, 'fixtures/activemq_private.pem')\n", (1561, 1605), False, 'import os\n'), ((1675, 1728), 'os.path.join', 'os.path.join', (['ctxt.ROOT', '"""fixtures/activemq_cert.pem"""'], {}), "(ctxt.ROOT, 'fixtures/activemq_cert.pem')\n", (1687, 1728), False, 'import os\n')]
|
import contextlib
import functools
import io
import os
from pathlib import Path
from typing import Callable, Union, Dict, Optional
import babel.localedata
from babel.core import Locale
__all__ = [
"get_locale",
"set_locale",
"reload_locales",
"cog_i18n",
"Translator",
"get_babel_locale",
]
_current_locale = "en-US"
WAITING_FOR_MSGID = 1
IN_MSGID = 2
WAITING_FOR_MSGSTR = 3
IN_MSGSTR = 4
MSGID = 'msgid "'
MSGSTR = 'msgstr "'
_translators = []
def get_locale():
return _current_locale
def set_locale(locale):
global _current_locale
_current_locale = locale
reload_locales()
def reload_locales():
for translator in _translators:
translator.load_translations()
def _parse(translation_file: io.TextIOWrapper) -> Dict[str, str]:
"""
Custom gettext parsing of translation files.
Parameters
----------
translation_file : io.TextIOWrapper
An open text file containing translations.
Returns
-------
Dict[str, str]
A dict mapping the original strings to their translations. Empty
translated strings are omitted.
"""
step = None
untranslated = ""
translated = ""
translations = {}
for line in translation_file:
line = line.strip()
if line.startswith(MSGID):
# New msgid
if step is IN_MSGSTR and translated:
# Store the last translation
translations[_unescape(untranslated)] = _unescape(translated)
step = IN_MSGID
untranslated = line[len(MSGID): -1]
elif line.startswith('"') and line.endswith('"'):
if step is IN_MSGID:
# Line continuing on from msgid
untranslated += line[1:-1]
elif step is IN_MSGSTR:
# Line continuing on from msgstr
translated += line[1:-1]
elif line.startswith(MSGSTR):
# New msgstr
step = IN_MSGSTR
translated = line[len(MSGSTR): -1]
if step is IN_MSGSTR and translated:
# Store the final translation
translations[_unescape(untranslated)] = _unescape(translated)
return translations
def _unescape(string):
string = string.replace(r"\\", "\\")
string = string.replace(r"\t", "\t")
string = string.replace(r"\r", "\r")
string = string.replace(r"\n", "\n")
string = string.replace(r"\"", '"')
return string
def get_locale_path(cog_folder: Path, extension: str) -> Path:
"""
Gets the folder path containing localization files.
:param Path cog_folder:
The cog folder that we want localizations for.
:param str extension:
Extension of localization files.
:return:
Path of possible localization file, it may not exist.
"""
return cog_folder / "locales" / "{}.{}".format(get_locale(), extension)
class Translator(Callable[[str], str]):
"""Function to get translated strings at runtime."""
def __init__(self, name: str, file_location: Union[str, Path, os.PathLike]):
"""
Initializes an internationalization object.
Parameters
----------
name : str
Your cog name.
file_location : `str` or `pathlib.Path`
This should always be ``__file__`` otherwise your localizations
will not load.
"""
self.cog_folder = Path(file_location).resolve().parent
self.cog_name = name
self.translations = {}
_translators.append(self)
self.load_translations()
def __call__(self, untranslated: str) -> str:
"""Translate the given string.
This will look for the string in the translator's :code:`.pot` file,
with respect to the current locale.
"""
try:
return self.translations[untranslated]
except KeyError:
return untranslated
def load_translations(self):
"""
Loads the current translations.
"""
self.translations = {}
locale_path = get_locale_path(self.cog_folder, "po")
with contextlib.suppress(IOError, FileNotFoundError):
with locale_path.open(encoding="utf-8") as file:
self._parse(file)
def _parse(self, translation_file):
self.translations.update(_parse(translation_file))
def _add_translation(self, untranslated, translated):
untranslated = _unescape(untranslated)
translated = _unescape(translated)
if translated:
self.translations[untranslated] = translated
@functools.lru_cache()
def _get_babel_locale(red_locale: str) -> babel.core.Locale:
supported_locales = babel.localedata.locale_identifiers()
try: # Handles cases where red_locale is already Babel supported
babel_locale = Locale(*babel.parse_locale(red_locale))
except (ValueError, babel.core.UnknownLocaleError):
try:
babel_locale = Locale(*babel.parse_locale(red_locale, sep="-"))
except (ValueError, babel.core.UnknownLocaleError):
# ValueError is Raised by `parse_locale` when an invalid Locale is given to it
# Lets handle it silently and default to "en_US"
try:
# Try to find a babel locale that's close to the one used by red
babel_locale = Locale(Locale.negotiate([red_locale], supported_locales, sep="-"))
except (ValueError, TypeError, babel.core.UnknownLocaleError):
# If we fail to get a close match we will then default to "en_US"
babel_locale = Locale("en", "US")
return babel_locale
def get_babel_locale(locale: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a locale to a ``babel.core.Locale``.
Parameters
----------
locale : Optional[str]
The locale to convert, if not specified it defaults to the bot's locale.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if locale is None:
locale = get_locale()
return _get_babel_locale(locale)
# This import to be down here to avoid circular import issues.
# This will be cleaned up at a later date
# noinspection PyPep8
from Helix.utils import commands
def cog_i18n(translator: Translator):
"""Get a class decorator to link the translator to this cog."""
def decorator(cog_class: type):
cog_class.__translator__ = translator
for name, attr in cog_class.__dict__.items():
if isinstance(attr, (commands.Group, commands.Command)):
attr.translator = translator
setattr(cog_class, name, attr)
return cog_class
return decorator
|
[
"contextlib.suppress",
"pathlib.Path",
"babel.core.Locale.negotiate",
"functools.lru_cache",
"babel.core.Locale"
] |
[((4597, 4618), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4616, 4618), False, 'import functools\n'), ((4121, 4168), 'contextlib.suppress', 'contextlib.suppress', (['IOError', 'FileNotFoundError'], {}), '(IOError, FileNotFoundError)\n', (4140, 4168), False, 'import contextlib\n'), ((3408, 3427), 'pathlib.Path', 'Path', (['file_location'], {}), '(file_location)\n', (3412, 3427), False, 'from pathlib import Path\n'), ((5368, 5426), 'babel.core.Locale.negotiate', 'Locale.negotiate', (['[red_locale]', 'supported_locales'], {'sep': '"""-"""'}), "([red_locale], supported_locales, sep='-')\n", (5384, 5426), False, 'from babel.core import Locale\n'), ((5616, 5634), 'babel.core.Locale', 'Locale', (['"""en"""', '"""US"""'], {}), "('en', 'US')\n", (5622, 5634), False, 'from babel.core import Locale\n')]
|
#!/bin/env/python3
import sys
import os
import argparse
#location = '.'
#suffix = '.py'
def get_filelist(location, suffix = None, recursive = False):
""" Get a list of files in a directory and optionally its subdirs,
with optional suffix matching requirement."""
if recursive == False:
if suffix is None:
filelist = [location+x for x in os.listdir(location)]
else:
filelist = [location+x for x in os.listdir(location) if x[-len(suffix):] == suffix]
elif recursive == True:
filelist = []
for path, subdirs, files in os.walk(location):
for x in files:
if suffix is None or x[-len(suffix):] == suffix:
rpath = os.path.join(path, x)
filelist.append(rpath)
return filelist
def build_to_azure_calls(files, local_location, azure_location,
keep_structure = True,
relative_paths = False,
trim_local_paths = None):
""" Take a list of local relative filepaths and build azure transfer calls
If keep_structure == true, the subfolders will be added to the azure calls.
Note for the retention of structure, only subfolders of the current working
directory will be valid (no higher levels in file heirarchy permitted)
"""
outlist = []
for f in files:
if keep_structure == False:
outstr = f'azcopy copy "{f}" "{azure_location}"\n'
else:
if relative_paths == True:
if f[:2] != './':
raise ValueError("The keep_structure argument requires relative imports (leading dotslash ('./')")
parts = f[2:].split("/")
else:
parts = f.split("/")
add_path = "/".join(parts[:-1])
add_path+="/"
#second bit of logic here is to avoid the double end slash when not
#including any subfolders
if trim_local_paths is None and add_path != "/":
outstr = f'azcopy copy "{f}" "{azure_location}{add_path}"\n'
else:
az_path = add_path.replace(local_location, '')
outstr = f'azcopy copy "{f}" "{azure_location}{az_path}"\n'
outlist.append(outstr)
return outlist
def build_from_azure_calls(files, azure_location, local_location = "."):
""" Take a list of files and their location on azure and build transfer calls
to move them to a specified local location."""
outlist = []
for f in files:
outstr = f'azcopy copy "{azure_location}{f}" "{local_location}"\n'
outlist.append(outstr)
return outlist
def read_filelist(file):
""" Read in a list of files for transfer FROM azure to local."""
dat = []
with open(file, "r") as f:
for line in f:
line = line.rstrip()
dat.append(line)
return dat
def write_calls_file(calls, outfile):
""" Take the produced azcopy file-by-file calls and write the output script """
f=open(outfile, 'w')
for line in calls:
f.write(line)
f.close()
|
[
"os.walk",
"os.path.join",
"os.listdir"
] |
[((556, 573), 'os.walk', 'os.walk', (['location'], {}), '(location)\n', (563, 573), False, 'import os\n'), ((361, 381), 'os.listdir', 'os.listdir', (['location'], {}), '(location)\n', (371, 381), False, 'import os\n'), ((428, 448), 'os.listdir', 'os.listdir', (['location'], {}), '(location)\n', (438, 448), False, 'import os\n'), ((663, 684), 'os.path.join', 'os.path.join', (['path', 'x'], {}), '(path, x)\n', (675, 684), False, 'import os\n')]
|
import csv
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from dcim.models import Site
from tenancy.models import Tenant
from sidekick.utils import MEMBER_TYPES
class Command(BaseCommand):
help = "Import existing members"
def add_arguments(self, parser):
parser.add_argument(
'--file', required=True, help='The path to the CSV file')
parser.add_argument(
'--quiet', required=False, action='store_true',
help='Suppress messages')
parser.add_argument(
'--dry-run', required=False, action='store_true',
help='Perform a dry-run and make no changes')
def handle(self, *args, **options):
quiet = options['quiet']
dry_run = options['dry_run']
f = options['file']
rows = []
with open(f) as csvfile:
r = csv.reader(csvfile)
for row in r:
rows.append(row)
for row in rows:
(name, description, member_type, comments, latitude, longitude) = row
name = name.strip()
if member_type not in MEMBER_TYPES:
self.stdout.write(f"ERROR: Incorrect member type for {name}: {member_type}. Skipping.")
continue
# See if there is an existing tenant/member.
# If there is, compare values and update as needed.
# If there isn't, create one.
try:
changed = False
tenant = Tenant.objects.get(name=name)
if tenant.description != description:
changed = True
tenant.description = description
if dry_run or not quiet:
self.stdout.write(f"Changing description of {name} to {description}")
if tenant.comments != comments:
changed = True
tenant.comments = comments
if dry_run or not quiet:
self.stdout.write(f"Changing comments of {name} to {comments}")
if 'member_type' not in tenant.cf or tenant.cf['member_type'] != member_type:
changed = True
tenant.cf['member_type'] = member_type
if dry_run or not quiet:
self.stdout.write(f"Changing member_type of {name} to {member_type}")
if not dry_run and changed:
self.stdout.write(f"Updated Tenant: {name}")
tenant.save()
except Tenant.MultipleObjectsReturned:
self.stdout.write(f"WARNING: Multiple results found for {name}. Skipping.")
continue
except Tenant.DoesNotExist:
if options['dry_run']:
self.stdout.write(f"Would have created Tenant: {name}")
continue
tenant = Tenant.objects.create(
name=name,
slug=slugify(name),
description=description,
comments=comments,
)
tenant.cf['member_type'] = member_type
tenant.save()
self.stdout.write(f"Created Tenant: {name}")
# See if there is an existing site.
# If there is, compare values and update as needed.
# If there isn't, create one.
try:
changed = False
site = Site.objects.get(name=name)
if site.latitude != latitude:
changed = True
site.latitude = latitude
if dry_run or not quiet:
self.stdout.write(f"Changing latitude of Site {name} to {latitude}")
if site.longitude != longitude:
changed = True
site.longitude = longitude
if dry_run or not quiet:
self.stdout.write(f"Changing longitude of Site {name} to {longitude}")
if not dry_run and changed:
self.stdout.write(f"Updated Site: {name}")
site.save()
except Site.MultipleObjectsReturned:
self.stdout.write(f"WARNING: Multiple sites found for {name}. Skipping.")
continue
except Site.DoesNotExist:
if options['dry_run']:
self.stdout.write(f"Would have created Site: {name}")
continue
site = Site.objects.create(
name=name,
tenant=tenant,
slug=slugify(name),
latitude=latitude,
longitude=longitude,
)
site.save()
self.stdout.write(f"Created Site: {name}")
|
[
"dcim.models.Site.objects.get",
"tenancy.models.Tenant.objects.get",
"csv.reader",
"django.utils.text.slugify"
] |
[((896, 915), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (906, 915), False, 'import csv\n'), ((1531, 1560), 'tenancy.models.Tenant.objects.get', 'Tenant.objects.get', ([], {'name': 'name'}), '(name=name)\n', (1549, 1560), False, 'from tenancy.models import Tenant\n'), ((3528, 3555), 'dcim.models.Site.objects.get', 'Site.objects.get', ([], {'name': 'name'}), '(name=name)\n', (3544, 3555), False, 'from dcim.models import Site\n'), ((3037, 3050), 'django.utils.text.slugify', 'slugify', (['name'], {}), '(name)\n', (3044, 3050), False, 'from django.utils.text import slugify\n'), ((4713, 4726), 'django.utils.text.slugify', 'slugify', (['name'], {}), '(name)\n', (4720, 4726), False, 'from django.utils.text import slugify\n')]
|
import json
import os
import shutil
import functools
import re
import requests
from unidecode import unidecode as udecode
from PIL import Image
from slugify import slugify
class ImageNotFound(Exception):
pass
def parse_string(string):
if string is None:
return None
return string.strip()
class Champion(object):
def __init__(self, pk, name, image, title=None, is_range=None, nation=None):
self.pk = pk
self.name = name
self.image = image
self.title = title
self.is_range = is_range
self.nation = nation
self.skills = []
self.translations = {}
def add_skill(self, skill):
self.skills.append(skill)
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'nation': parse_string(self.nation),
'image': parse_string(self.image),
'title': parse_string(self.title),
'is_range': self.is_range,
'skills': [s.to_dict() for s in self.skills]
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Skill(object):
def __init__(self, pk, name, image):
self.pk = pk
self.name = name
self.image = image
self.translations = {}
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'image': parse_string(self.image),
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Item(object):
def __init__(self, pk, name, image, into, _from, price):
self.pk = pk
self.name = name
self.image = image
self.into = into
self._from = _from
self.price = int(price) if price else None
self.translations = {}
def add_translation(self, field, value):
self.translations[field] = value
def to_dict(self):
data = {
'id': parse_string(self.pk),
'name': parse_string(self.name),
'image': parse_string(self.image),
'into': self.into,
'from': self._from,
'price': self.price,
}
for i18n_key, i18n_value in self.translations.items():
data['{}_i18n'.format(i18n_key)] = i18n_value
return data
class Importer(object):
export_path = './data/champions.json'
image_path = './data/images/champions/'
def run(self):
os.makedirs(self.image_path, exist_ok=True)
objects = self.get_objects()
try:
is_valid = self.validate(objects)
except Exception as e:
import ipdb; ipdb.set_trace()
is_valid = False
if not is_valid:
raise Exception('Something went wrong in the validate method.')
self.export(objects)
return objects
def get_objects(self):
return []
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump([o.to_dict() for o in objects], outfile, ensure_ascii=False)
return outfile
def slugify(self, value):
return slugify(value)
def clean_filename(self, filename):
filename = udecode(''.join(filename.split()).lower())
extension_dot = filename.rindex('.')
left_part = filename[:extension_dot]
right_part = filename[extension_dot:]
# Characters after last . can be [a-z] only
right_part = " ".join(re.findall("[a-zA-Z]+", right_part))
return "{}.{}".format(left_part, right_part)
def download_image(self, url, filename):
response = requests.get(url, stream=True)
if response.status_code != 200:
msg = 'Image at {} not found'.format(url)
print(msg)
raise ImageNotFound(msg)
filename = self.clean_filename(filename)
full_path = os.path.join(self.image_path, filename)
with open(full_path, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
# compress image
image = Image.open(full_path)
image.save(full_path, quality=95, optimize=True)
del response
return filename
def validate(self, objects):
return True
class ChampionImporter(Importer):
export_path = './data/champions.json'
image_path = './data/images/champions/'
def validate(self, objects):
for obj in objects:
# Validate basic fields
if not all([obj.pk, obj.name, obj.image]):
raise Exception('Champion {} missing fields.'.format(obj.pk))
# Validate skills
skills = obj.skills
if not skills:
raise Exception('Champion {} missing skills.'.format(obj.pk))
for skill in skills:
if not all([skill.pk, skill.name, skill.image]):
raise Exception('Champion {} skill {} missing fields'.format(
obj.pk, skill.pk
))
return True
class ItemImporter(Importer):
export_path = './data/items.json'
image_path = './data/images/items/'
def get_objects(self):
return []
def validate(self, objects):
flat_ids = set([i.pk for i in objects])
for obj in objects:
# Validate basic fields
if not all([obj.pk, obj.name, obj.image]):
raise Exception('Item {} missing fields.'.format(obj.pk))
# Validate recipe
components = obj._from
if not components:
continue
if not set(components).issubset(flat_ids):
raise Exception('Item {} contains invalid recipe: {}'.format(
obj.pk, components
))
return True
class SettingsImporter(Importer):
export_path = './data/settings.json'
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump(objects, outfile)
return outfile
def get_objects(self):
return {
'ios': {
'ad_small': 'ca-app-pub-4764697513834958/5120930069',
'ad_big': 'ca-app-pub-4764697513834958/7934795665',
'tracking': 'UA-77793311-8',
'store': 'itms-apps://itunes.apple.com/app/id1121065896',
'store_premium': 'com.puppybox.quizpokemon.premium_version',
},
'android': {
'ad_small': 'ca-app-pub-4764697513834958/5480856869',
'ad_big': 'ca-app-pub-4764697513834958/5062054468',
'tracking': 'UA-77793311-9',
'store': 'market://details?id=com.puppybox.quizpokemon',
'store_premium': 'com.puppybox.quizpokemon.premium_version',
},
'windows': {
'ad_small': 'ca-app-pub-4764697513834958/7883646863',
'ad_big': 'ca-app-pub-4764697513834958/7744046068',
'tracking': '',
'store': '',
'store_premium': '',
},
'legal_disclaimer': 'This application is not created, sponsored or endorsed by Niantic and doesn’t reflect the views or opinions of Niantic or anyone officially involved in producing or managing Pokemon GO. Pokemon GO is a registered trademark of Niantic. All in-game characters, locations, imagery and videos of game content are copyright and are trademarked to their respective owners. Usage for this game falls within fair use guidelines.',
'highscore_url': 'http://mobascore-puppybox.rhcloud.com/api/v1/leaderboards/pokemon/scores/',
'source_name': 'Pokemon GO',
'source_url': 'http://www.pokemongo.com/',
}
class AchievementImporter(Importer):
export_path = './data/achievements.json'
def __init__(self, items, champions):
self.items = items
self.champions = champions
def export(self, objects):
with open(self.export_path, 'w') as outfile:
json.dump(objects, outfile)
return outfile
def get_objects(self):
items = self.items
champions = self.champions
item_count = len(list(filter(lambda x: len(x._from) > 0, items)))
champion_count = len(champions)
skill_count = functools.reduce(
lambda x, y: x + len(y.skills), champions, 0
)
objects = [
{
"id": "seen_all_skills",
"name": "Watching your every move",
"description": "Open all skill levels",
"type": "array",
"goal": skill_count,
},
{
"id": "seen_all_items",
"name": "Recipe observer",
"description": "Open all recipe levels",
"type": "array",
"goal": item_count,
},
{
"id": "seen_all_champions",
"name": "High Five Everybody",
"description": "Open all champion levels",
"type": "array",
"goal": champion_count,
},
{
"id": "solved_all_skills",
"name": "Every move is mine",
"description": "Solve all skill levels",
"type": "array",
"goal": skill_count,
},
{
"id": "solved_all_items",
"name": "<NAME> blacksmith",
"description": "Solve all recipe levels",
"type": "array",
"goal": item_count,
},
{
"id": "solved_all_champions",
"name": "I know all of them",
"description": "Solve all champion levels",
"type": "array",
"goal": champion_count,
},
{
"id": "gameplay_small_strike",
"name": "<NAME>",
"description": "Make a 10x strike",
"type": "number",
"goal": 10
},
{
"id": "gameplay_medium_strike",
"name": "Unstoppable",
"description": "Make a 50x strike",
"type": "number",
"goal": 50
},
{
"id": "gameplay_big_strike",
"name": "Godlike",
"description": "Make a 150x strike",
"type": "number",
"goal": 150
},
{
"id": "gameplay_small_play_count",
"name": "Gamer",
"description": "Play the game 100 times",
"type": "increment",
"goal": 100
},
{
"id": "gameplay_medium_play_count",
"name": "<NAME>",
"description": "Play the game 250 times",
"type": "increment",
"goal": 250
},
{
"id": "gameplay_big_play_count",
"name": "<NAME>",
"description": "Play the game 1000 times",
"type": "increment",
"goal": 1000
},
]
return objects
|
[
"json.dump",
"slugify.slugify",
"os.makedirs",
"ipdb.set_trace",
"PIL.Image.open",
"re.findall",
"requests.get",
"shutil.copyfileobj",
"os.path.join"
] |
[((2834, 2877), 'os.makedirs', 'os.makedirs', (['self.image_path'], {'exist_ok': '(True)'}), '(self.image_path, exist_ok=True)\n', (2845, 2877), False, 'import os\n'), ((3522, 3536), 'slugify.slugify', 'slugify', (['value'], {}), '(value)\n', (3529, 3536), False, 'from slugify import slugify\n'), ((4015, 4045), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (4027, 4045), False, 'import requests\n'), ((4271, 4310), 'os.path.join', 'os.path.join', (['self.image_path', 'filename'], {}), '(self.image_path, filename)\n', (4283, 4310), False, 'import os\n'), ((4454, 4475), 'PIL.Image.open', 'Image.open', (['full_path'], {}), '(full_path)\n', (4464, 4475), False, 'from PIL import Image\n'), ((3859, 3894), 're.findall', 're.findall', (['"""[a-zA-Z]+"""', 'right_part'], {}), "('[a-zA-Z]+', right_part)\n", (3869, 3894), False, 'import re\n'), ((4370, 4411), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response.raw', 'outfile'], {}), '(response.raw, outfile)\n', (4388, 4411), False, 'import shutil\n'), ((6361, 6388), 'json.dump', 'json.dump', (['objects', 'outfile'], {}), '(objects, outfile)\n', (6370, 6388), False, 'import json\n'), ((8435, 8462), 'json.dump', 'json.dump', (['objects', 'outfile'], {}), '(objects, outfile)\n', (8444, 8462), False, 'import json\n'), ((3032, 3048), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (3046, 3048), False, 'import ipdb\n')]
|
import sys, os
sys.path.append('yolov3_detector')
from yolov3_custom_helper import yolo_detector
from darknet import Darknet
sys.path.append('pytorch-YOLOv4')
from tool.darknet2pytorch import Darknet as DarknetYolov4
import argparse
import cv2,time
import numpy as np
from tool.plateprocessing import find_coordinates, plate_to_string, padder, get_color
from tool.utils import alphanumeric_segemntor,plot_boxes_cv2
from tool.torch_utils import *
import time
from utility_codes.tsv_converter import ConverterTSV
use_cuda = True
#################### PLATE ####################
cfg_v4 = 'pytorch-YOLOv4/cfg/yolo-obj.cfg'
weight_v4 = 'weights/plate.weights'
m = DarknetYolov4(cfg_v4)
m.load_weights(weight_v4)
num_classes = m.num_classes
class_names = ['plate']
print('Loading weights from %s... Done!' % (weight_v4))
if use_cuda:
m.cuda()
# m_alpha.cuda()
# yolo_vehicle.cuda()
vehicle_save_filename = 'tsv_files/plate_tester.tsv'
vehicle_writer = ConverterTSV(vehicle_save_filename,file_type='vehicle')
image_dir = 'SIH_hackathon/Detection_Day3/Day3'
image_files = os.listdir(image_dir)
image_files.sort()
OUTPUT_SIZE = (1280, 720)
for img_name in image_files:
frame = cv2.imread(os.path.join(image_dir, img_name))
h, w = frame.shape[0:2]
sized = cv2.resize(frame, (m.width, m.height))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
confidence = 0.2
boxes = do_detect(m, sized, confidence , 0.6, use_cuda)
result_img, cls_conf_plate, coordinates_all, labels = plot_boxes_cv2(frame, boxes[0],classes_to_detect=class_names,fontScale=0.5,thick=2, savename=False, class_names=class_names)
cls_conf_plate = float(cls_conf_plate)
for i,co in enumerate(coordinates_all):
print(co)
data = [img_name, co, labels[i]]
vehicle_writer.put_vehicle(img_name, co, 'plate')
# vehicle_writer.put_vehicle(img_loc, c, 'plate')
cv2.imshow('Image', result_img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
import pandas as pd
def merge_and_save(fp1, fp2, outfile_path):
tsv_file1 = pd.read_csv(fp1, sep='\t', header=0)
tsv_file2 = pd.read_csv(fp2, sep='\t', header=0)
merged = pd.concat([tsv_file1, tsv_file2])
outfile = merged.sort_values(by='Image').reset_index(drop=True)
outfile.to_csv(outfile_path, sep='\t', index=False)
merge_and_save('tsv_files/plate_tester.tsv', 'tsv_files/vehicle_tester.tsv', 'tsv_files/IvLabs_Detection_Day3.tsv')
|
[
"sys.path.append",
"os.path.join",
"pandas.concat",
"cv2.cvtColor",
"pandas.read_csv",
"cv2.waitKey",
"cv2.imshow",
"tool.utils.plot_boxes_cv2",
"utility_codes.tsv_converter.ConverterTSV",
"cv2.destroyAllWindows",
"tool.darknet2pytorch.Darknet",
"os.listdir",
"cv2.resize"
] |
[((15, 49), 'sys.path.append', 'sys.path.append', (['"""yolov3_detector"""'], {}), "('yolov3_detector')\n", (30, 49), False, 'import sys, os\n'), ((125, 158), 'sys.path.append', 'sys.path.append', (['"""pytorch-YOLOv4"""'], {}), "('pytorch-YOLOv4')\n", (140, 158), False, 'import sys, os\n'), ((662, 683), 'tool.darknet2pytorch.Darknet', 'DarknetYolov4', (['cfg_v4'], {}), '(cfg_v4)\n', (675, 683), True, 'from tool.darknet2pytorch import Darknet as DarknetYolov4\n'), ((954, 1010), 'utility_codes.tsv_converter.ConverterTSV', 'ConverterTSV', (['vehicle_save_filename'], {'file_type': '"""vehicle"""'}), "(vehicle_save_filename, file_type='vehicle')\n", (966, 1010), False, 'from utility_codes.tsv_converter import ConverterTSV\n'), ((1073, 1094), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (1083, 1094), False, 'import sys, os\n'), ((1940, 1963), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1961, 1963), False, 'import cv2, time\n'), ((1260, 1298), 'cv2.resize', 'cv2.resize', (['frame', '(m.width, m.height)'], {}), '(frame, (m.width, m.height))\n', (1270, 1298), False, 'import cv2, time\n'), ((1308, 1346), 'cv2.cvtColor', 'cv2.cvtColor', (['sized', 'cv2.COLOR_BGR2RGB'], {}), '(sized, cv2.COLOR_BGR2RGB)\n', (1320, 1346), False, 'import cv2, time\n'), ((1479, 1611), 'tool.utils.plot_boxes_cv2', 'plot_boxes_cv2', (['frame', 'boxes[0]'], {'classes_to_detect': 'class_names', 'fontScale': '(0.5)', 'thick': '(2)', 'savename': '(False)', 'class_names': 'class_names'}), '(frame, boxes[0], classes_to_detect=class_names, fontScale=\n 0.5, thick=2, savename=False, class_names=class_names)\n', (1493, 1611), False, 'from tool.utils import alphanumeric_segemntor, plot_boxes_cv2\n'), ((2045, 2081), 'pandas.read_csv', 'pd.read_csv', (['fp1'], {'sep': '"""\t"""', 'header': '(0)'}), "(fp1, sep='\\t', header=0)\n", (2056, 2081), True, 'import pandas as pd\n'), ((2098, 2134), 'pandas.read_csv', 'pd.read_csv', (['fp2'], {'sep': '"""\t"""', 'header': '(0)'}), "(fp2, sep='\\t', header=0)\n", (2109, 2134), True, 'import pandas as pd\n'), ((2148, 2181), 'pandas.concat', 'pd.concat', (['[tsv_file1, tsv_file2]'], {}), '([tsv_file1, tsv_file2])\n', (2157, 2181), True, 'import pandas as pd\n'), ((1190, 1223), 'os.path.join', 'os.path.join', (['image_dir', 'img_name'], {}), '(image_dir, img_name)\n', (1202, 1223), False, 'import sys, os\n'), ((1840, 1871), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'result_img'], {}), "('Image', result_img)\n", (1850, 1871), False, 'import cv2, time\n'), ((1879, 1893), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1890, 1893), False, 'import cv2, time\n')]
|
#! /usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, ningh"
__credits__ = [ "<NAME>" ]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = [ "<NAME>" ]
__email__ = [ "<EMAIL>" ]
__status__ = "Beta"
import subprocess, json, socket, psutil, os, wmi
from pymongo import MongoClient
class Engine( object ) :
"""docstring for Info"""
def __init__( self ):
self.client = MongoClient( 'mongodb://127.0.0.1:27017' )
self.db = self.client[ 'machine' ]
self.conn = wmi.WMI()
def getSize( self, bytes, suffix = 'B' ) :
"""
Return the bytes unit and suffix.
"""
factor = 1024
for unit in [ '', 'K', 'M', 'G', 'T', 'P' ]:
if bytes < factor:
return "{0} {1}{2}".format( bytes, unit, suffix )
bytes /= factor
def getIp( self ) :
"""
Return IP address.
"""
return socket.gethostbyname( socket.gethostname() )
def getUser( self ) :
"""
Return the current username.
"""
return os.environ.get( 'USERNAME' )
def getComputer( self ) :
"""
Return the current computername.
"""
return os.environ.get( 'COMPUTERNAME' )
def getCpu( self ) :
"""
Return the name of Processor.
"""
for pr in self.conn.Win32_Processor():
return pr.Name
def getCores( self ) :
"""
Return the physical cores and total cores.
"""
out = {
'PhysicalCores': psutil.cpu_count( logical=False ),
'TotalCores': psutil.cpu_count( logical=True ),
}
return out
def getRam( self ):
"""
Return the size of Ram Memory.
"""
mem = psutil.virtual_memory()
return self.getSize(mem.total)
def getBoard( self ):
"""
Return the motherboard name.
"""
cs = self.conn.Win32_ComputerSystem()[0]
return cs.Model
def getGpu( self ):
"""
Return a list of GPUs.
"""
out = []
for vc in self.conn.Win32_VideoController():
out.append(vc.Name)
return out
def getDisks( self ):
"""
Return a list of dictionaries.
"""
out = []
for ld in self.conn.Win32_logicaldisk() :
if ld.DriveType == 3 :
kind = 'Local Disk'
elif ld.DriveType == 4 :
kind = 'Network Drive'
inside = {
'device': ld.DeviceID,
'type': kind,
'provider': ld.ProviderName
}
try:
inside[ 'size' ] = self.getSize( int( ld.Size ) )
inside[ 'free' ] = self.getSize( int( ld.FreeSpace ) )
except Exception as e:
pass
out.append( inside )
return out
################################################
# By SubProcess #
################################################
def getSensorBySpecs( self, hwType, snsrType, filename='bySpecs' ) :
"""
By subprocess returns sensor information from the requested hardware.
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {0} {1} {2}".format(
hwType,
snsrType,
filename
),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load(json_file)
return data
def getSensorsByHardware( self, hwType, filename='byHardware' ) :
"""
By subprocess returns the information of all sensors of the requested hardware
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {0} {1}".format( hwType, filename ),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load(json_file)
return data
def getSensors( self, filename='sensors' ) :
"""
By subprocess returns the information of all sensors of each important hardware
"""
subprocess.check_output(
os.path.abspath( os.path.dirname( __file__ ) ) + "\\monitor\\GarboMonitor {}".format( filename ),
shell = True
)
with open( "C:/bin/garbo/log/{}.json".format( filename ) ) as json_file:
data = json.load( json_file )
return data
################################################
# By Service #
################################################
def getMonitorServiceBySpecs( self, hwType, snsrType ) :
"""
By service returns sensor information from the requested hardware.
"""
out = { 'name': '', 'type': '', 'sensors': [] }
try :
data = list( self.db.hardware.find( { 'type': hwType }, { '_id': 0 } ) )
for item in data :
out[ 'name' ] = item[ 'name' ]
out[ 'type' ] = item[ 'type' ]
for sensor in item['sensors'] :
if sensor['type'] == snsrType :
out['sensors'].append(sensor)
return out
except Exception as e :
return e
def getMonitorServiceByHardware( self, hwType ) :
"""
By service returns the information of all sensors of the requested hardware
"""
try :
byHw = list( self.db.hardware.find( { 'type': hwType }, { '_id': 0 } ) )
return byHw
except Exception as e :
return e
def getMonitorService( self ) :
"""
By service returns the information of all sensors of each important hardware
"""
try :
byHw = list( self.db.hardware.find( {}, { '_id': 0 } ) )
return byHw
except Exception as e :
return e
|
[
"pymongo.MongoClient",
"psutil.virtual_memory",
"json.load",
"os.path.dirname",
"wmi.WMI",
"os.environ.get",
"socket.gethostname",
"psutil.cpu_count"
] |
[((421, 461), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://127.0.0.1:27017"""'], {}), "('mongodb://127.0.0.1:27017')\n", (432, 461), False, 'from pymongo import MongoClient\n'), ((517, 526), 'wmi.WMI', 'wmi.WMI', ([], {}), '()\n', (524, 526), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((991, 1017), 'os.environ.get', 'os.environ.get', (['"""USERNAME"""'], {}), "('USERNAME')\n", (1005, 1017), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((1112, 1142), 'os.environ.get', 'os.environ.get', (['"""COMPUTERNAME"""'], {}), "('COMPUTERNAME')\n", (1126, 1142), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((1590, 1613), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1611, 1613), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((884, 904), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (902, 904), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((1401, 1432), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (1417, 1432), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((1454, 1484), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(True)'}), '(logical=True)\n', (1470, 1484), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((3096, 3116), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3105, 3116), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((3553, 3573), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3562, 3573), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((3977, 3997), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3986, 3997), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((2860, 2885), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2875, 2885), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((3350, 3375), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3365, 3375), False, 'import subprocess, json, socket, psutil, os, wmi\n'), ((3787, 3812), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3802, 3812), False, 'import subprocess, json, socket, psutil, os, wmi\n')]
|
# -*- coding: utf-8 -*-
#
# MAPCore class: mAP Core API handling
#
# @COPYRIGHT@
#
import sys
import time
import json
import logging
import hashlib
import requests
from urllib.parse import urlencode
from django.utils import timezone
from django.db import transaction
from osf.models.user import OSFUser
from website.settings import (MAPCORE_HOSTNAME,
MAPCORE_REFRESH_PATH,
MAPCORE_API_PATH,
MAPCORE_CLIENTID,
MAPCORE_SECRET)
#
# Global settings.
#
VERIFY = True # for requests.{get,post}(verify=VERIFY)
MAPCORE_API_MEMBER_LIST_BUG_WORKAROUND = False # 2019/5/24 fixed
MAPCORE_DEBUG = False
# unicode to utf-8
def utf8(s):
return s.encode('utf-8')
class MAPCoreLogger(object):
def __init__(self, logger):
self.logger = logger
def error(self, msg, *args, **kwargs):
self.logger.error('MAPCORE: ' + msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logger.warning('MAPCORE: ' + msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.info('MAPCORE:' + msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.logger.debug('MAPCORE: ' + msg, *args, **kwargs)
def setLevel(self, level=logging.INFO):
self.logger.setLevel(level=level)
class MAPCoreLoggerDebug(object):
def __init__(self, logger):
self.logger = logger
def error(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_ERROR: ' + msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_WARNING: ' + msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_INFO:' + msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.logger.error('MAPCORE_DEBUG: ' + msg, *args, **kwargs)
def setLevel(self, level=logging.INFO):
self.logger.setLevel(level=level)
def mapcore_logger(logger):
if MAPCORE_DEBUG:
logger = MAPCoreLoggerDebug(logger)
else:
logger = MAPCoreLogger(logger)
return logger
def mapcore_api_disable_log(level=logging.CRITICAL):
logger.setLevel(level=level)
logger = mapcore_logger(logging.getLogger(__name__))
class MAPCoreException(Exception):
def __init__(self, mapcore, ext_message):
self.mapcore = mapcore
if ext_message is not None and mapcore is None:
super(MAPCoreException, self).__init__(
'ext_message={}'.format(ext_message))
else:
super(MAPCoreException, self).__init__(
'http_status_code={}, api_error_code={}, message={}, ext_message={}'.format(
mapcore.http_status_code, mapcore.api_error_code,
mapcore.error_message, ext_message))
def listing_group_member_is_not_permitted(self):
if self.mapcore.api_error_code == 206 and \
self.mapcore.error_message == 'Listing group member is not permitted':
return True
return False
def group_does_not_exist(self):
if self.mapcore.api_error_code == 208 and \
self.mapcore.error_message == 'You do not have access permission':
return True
return False
class MAPCoreTokenExpired(MAPCoreException):
def __init__(self, mapcore, ext_message):
self.caller = mapcore.user
super(MAPCoreTokenExpired, self).__init__(mapcore, ext_message)
def __str__(self):
if self.caller:
username = self.caller.username
else:
username = 'UNKNOWN USER'
return 'mAP Core Access Token (for {}) is expired'.format(username)
if MAPCORE_API_MEMBER_LIST_BUG_WORKAROUND:
OPEN_MEMBER_PRIVATE = 1
OPEN_MEMBER_PUBLIC = 0
OPEN_MEMBER_MEMBER_ONLY = 2
OPEN_MEMBER_DEFAULT = OPEN_MEMBER_MEMBER_ONLY
else:
OPEN_MEMBER_PRIVATE = 0
OPEN_MEMBER_PUBLIC = 1
OPEN_MEMBER_MEMBER_ONLY = 2
OPEN_MEMBER_DEFAULT = OPEN_MEMBER_PUBLIC
def mapcore_group_member_is_private(group_info):
return group_info['open_member'] == OPEN_MEMBER_PRIVATE
def mapcore_group_member_is_public(group_info):
return group_info['open_member'] == OPEN_MEMBER_PUBLIC
def mapcore_group_member_is_member_only(group_info):
return group_info['open_member'] == OPEN_MEMBER_MEMBER_ONLY
class MAPCore(object):
MODE_MEMBER = 0 # Ordinary member
MODE_ADMIN = 2 # Administrator member
user = False
http_status_code = None
api_error_code = None
error_message = None
#
# Constructor.
#
def __init__(self, user):
self.user = user
#
# Refresh access token.
#
def refresh_token0(self):
#logger.debug('MAPCore::refresh_token:')
url = MAPCORE_HOSTNAME + MAPCORE_REFRESH_PATH
basic_auth = (MAPCORE_CLIENTID, MAPCORE_SECRET)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
params = {
'grant_type': 'refresh_token',
'refresh_token': self.user.map_profile.oauth_refresh_token
}
params = urlencode(params)
logger.debug('MAPCore::refresh_token: params=' + params)
r = requests.post(url, auth=basic_auth, headers=headers, data=params, verify=VERIFY)
if r.status_code != requests.codes.ok:
logger.info('MAPCore::refresh_token: Refreshing token failed: status_code=' + str(r.status_code) + ', user=' + str(self.user) + ', text=' + r.text)
return False
j = json.loads(r.content)
if 'error' in j:
logger.info('MAPCore::refresh_token: Refreshing token failed: ' + j['error'] + ', user=' + str(self.user))
if 'error_description' in j:
logger.info('MAPCore::refresh_token: Refreshing token failed: ' + j['error_description'] + ', user=' + str(self.user))
return False
logger.debug('MAPCore::refresh_token: SUCCESS: user=' + str(self.user))
#logger.debug(' New access_token: ' + j['access_token'])
#logger.debug(' New refresh_token: ' + j['refresh_token'])
self.user.map_profile.oauth_access_token = j['access_token']
self.user.map_profile.oauth_refresh_token = j['refresh_token']
#
# Update database.
#
self.user.map_profile.oauth_refresh_time = timezone.now()
self.user.map_profile.save()
self.user.save()
return True
def refresh_token(self):
try:
self.lock_refresh()
return self.refresh_token0()
finally:
self.unlock_refresh()
#
# Lock refresh process.
#
def lock_refresh(self):
while True:
#print('before transaction.atomic')
with transaction.atomic():
#print('transaction.atomic start')
u = OSFUser.objects.select_for_update().get(username=self.user.username)
if not u.mapcore_refresh_locked:
#print('before lock')
#time.sleep(5) # for debug
u.mapcore_refresh_locked = True
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_refresh_locked=True')
return
#print('cannot get lock, sleep 1')
time.sleep(1)
#
# Unlock refresh process.
#
def unlock_refresh(self):
with transaction.atomic():
u = OSFUser.objects.select_for_update().get(username=self.user.username)
u.mapcore_refresh_locked = False
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_refresh_locked=False')
#
# GET|POST|DELETE for methods.
#
def req_api(self, method_name, args, requests_method, path, parameters):
logger.debug('MAPCore(user={}).{}{}'.format(self.user.username, method_name, str(args)))
if self.user.map_profile is None:
# Access token is not issued yet.
raise self.get_token_expired()
url = MAPCORE_HOSTNAME + MAPCORE_API_PATH + path
count = 0
while count < 2: # retry once
time_stamp, signature = self.calc_signature()
if requests_method == requests.get or \
requests_method == requests.delete:
payload = {'time_stamp': time_stamp, 'signature': signature}
if parameters:
for k, v in parameters.items():
payload[k] = v
headers = {'Authorization': 'Bearer '
+ self.user.map_profile.oauth_access_token}
r = requests_method(url, headers=headers,
params=payload, verify=VERIFY)
elif requests_method == requests.post:
params = {}
params['request'] = {
'time_stamp': time_stamp,
'signature': signature
}
params['parameter'] = parameters
params = json.dumps(params).encode('utf-8')
headers = {
'Authorization':
'Bearer ' + self.user.map_profile.oauth_access_token,
'Content-Type': 'application/json; charset=utf-8',
'Content-Length': str(len(params))
}
r = requests_method(url, headers=headers,
data=params, verify=VERIFY)
else:
raise Exception('unknown requests_method')
j = self.check_result(r, method_name, args)
if j is not False:
# Function succeeded.
return j
if self.is_token_expired(r, method_name, args):
if self.refresh_token() is False:
# Automatic refreshing token failed.
raise self.get_token_expired()
else:
# Any other API error.
raise self.get_exception()
count += 1
# Could not refresh token after retries (may not occur).
raise self.get_token_expired()
#
# Get API version.
#
def get_api_version(self):
method_name = sys._getframe().f_code.co_name
return self.req_api(method_name, (), requests.get, '/version', None)
#
# Get group information by group name. (unused by mapcore.py)
#
def get_group_by_name(self, group_name):
method_name = sys._getframe().f_code.co_name
parameters = {'searchWord': group_name.encode('utf-8')}
path = '/mygroup'
j = self.req_api(method_name, (group_name,),
requests.get, path, parameters)
if len(j['result']['groups']) == 0:
self.error_message = 'Group not found'
logger.debug(' {}'.format(self.error_message))
# Group not found.
raise self.get_exception()
return j
#
# Get group information by group key.
#
def get_group_by_key(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
j = self.req_api(method_name, (group_key,), requests.get, path, None)
if len(j['result']['groups']) == 0:
self.error_message = 'Group not found'
logger.debug(' {}'.format(self.error_message))
raise self.get_exception()
return j
#
# delete group by group key.
#
def delete_group(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
j = self.req_api(method_name, (group_key,),
requests.delete, path, None)
return j
#
# Create new group, and make it public, active and open_member.
#
def create_group(self, group_name):
method_name = sys._getframe().f_code.co_name
path = '/group'
parameters = {
'group_name': group_name,
'group_name_en': group_name
}
j = self.req_api(method_name, (group_name,),
requests.post, path, parameters)
group_key = j['result']['groups'][0]['group_key']
logger.debug(' New group has been created (group_key=' + group_key + ')')
# to set description (Empty description is invalid on CG)
j = self.edit_group(group_key, group_name, group_name)
return j
#
# Change group properties.
#
def edit_group(self, group_key, group_name, introduction):
method_name = sys._getframe().f_code.co_name
path = '/group/' + group_key
parameters = {
'group_name': group_name,
'group_name_en': '',
'introduction': introduction,
'introduction_en': '',
'public': 1,
'active': 1,
'open_member': OPEN_MEMBER_DEFAULT
}
j = self.req_api(method_name, (group_key, group_name, introduction),
requests.post, path, parameters)
return j
#
# Get member of group.
#
def get_group_members(self, group_key):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key
parameters = None
j = self.req_api(method_name, (group_key,),
requests.get, path, parameters)
return j
#
# Get joined group list.
#
def get_my_groups(self):
method_name = sys._getframe().f_code.co_name
path = '/mygroup'
parameters = None
j = self.req_api(method_name, (), requests.get, path, parameters)
return j
#
# Add to group.
#
def add_to_group(self, group_key, eppn, admin):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key + '/' + eppn
parameters = {
'admin': admin
}
j = self.req_api(method_name, (group_key, eppn, admin),
requests.post, path, parameters)
return j
#
# Remove from group.
#
def remove_from_group(self, group_key, eppn):
method_name = sys._getframe().f_code.co_name
path = '/member/' + group_key + '/' + eppn
parameters = None
j = self.req_api(method_name, (group_key, eppn),
requests.delete, path, parameters)
return j
#
# Edit member.
#
def edit_member(self, group_key, eppn, admin):
#logger.debug('MAPCore::edit_member (group_key=' + group_key + ', eppn=' + eppn + ', admin=' + str(admin) + ')')
# NOTE: If error occurs, an exception will be thrown.
j = self.remove_from_group(group_key, eppn)
j = self.add_to_group(group_key, eppn, admin)
return j
#
# Get MAPCoreException.
#
def get_exception(self):
return MAPCoreException(self, None)
#
# Get MAPCoreTokenExpired.
#
def get_token_expired(self):
return MAPCoreTokenExpired(self, None)
#
# Calculate API signature.
#
def calc_signature(self):
time_stamp = str(int(time.time()))
s = MAPCORE_SECRET + self.user.map_profile.oauth_access_token + time_stamp
digest = hashlib.sha256(s.encode('utf-8')).hexdigest()
return time_stamp, digest
WWW_AUTHENTICATE = 'WWW-Authenticate'
MSG_ACCESS_TOKEN_EXPIRED = 'Access token expired'
MSG_INVALID_ACCESS_TOKEN = 'Invalid access token'
#
# Check API result status.
# If any error occurs, a False will be returned.
#
def check_result(self, result, method_name, args):
self.http_status_code = result.status_code
self.api_error_code = None
self.error_message = ''
if result.status_code != requests.codes.ok:
if self.is_token_expired(result, method_name, args):
self.error_message = self.MSG_ACCESS_TOKEN_EXPIRED
else:
self.error_message = result.headers.get(self.WWW_AUTHENTICATE)
if not self.error_message:
self.error_message = result.text
logger.info('MAPCore(user={},eppn={}).{}{}:check_result: status_code={}, error_msg={}'.format(self.user.username, self.user.eppn, method_name, args, result.status_code, self.error_message))
return False
#logger.debug('result.encoding={}'.format(result.encoding))
j = json.loads(result.content)
if j['status']['error_code'] != 0:
self.api_error_code = j['status']['error_code']
self.error_message = j['status']['error_msg']
logger.info('MAPCore(user={},eppn={}).{}{}:check_result: error_code={}, error_msg={}'.format(self.user.username, self.user.eppn, method_name, args, self.api_error_code, self.error_message))
return False
return j
def is_token_expired(self, result, method_name, args):
if result.status_code != requests.codes.ok:
s = result.headers.get(self.WWW_AUTHENTICATE)
if s is None:
return False
#if s.find(self.MSG_ACCESS_TOKEN_EXPIRED) != -1 \
# or s.find(self.MSG_INVALID_ACCESS_TOKEN) != -1:
if result.status_code == 401: # Unauthorized
logger.debug('MAPCore(user={},eppn={}).{}{}:is_token_expired: status_code={}, {}={}'.format(self.user.username, self.user.eppn, method_name, args, result.status_code, self.WWW_AUTHENTICATE, self.error_message))
return True
else:
return False
return False
def encode_recursive(o, encoding='utf-8'):
if isinstance(o, dict):
return {encode_recursive(key): encode_recursive(val) for key, val in o.iteritems()}
elif isinstance(o, list):
return [encode_recursive(elem) for elem in o]
elif isinstance(o, str):
return o.encode(encoding)
else:
return o
|
[
"json.loads",
"urllib.parse.urlencode",
"django.utils.timezone.now",
"sys._getframe",
"time.sleep",
"time.time",
"json.dumps",
"requests.post",
"osf.models.user.OSFUser.objects.select_for_update",
"django.db.transaction.atomic",
"logging.getLogger"
] |
[((2289, 2316), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2306, 2316), False, 'import logging\n'), ((5211, 5228), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (5220, 5228), False, 'from urllib.parse import urlencode\n'), ((5307, 5392), 'requests.post', 'requests.post', (['url'], {'auth': 'basic_auth', 'headers': 'headers', 'data': 'params', 'verify': 'VERIFY'}), '(url, auth=basic_auth, headers=headers, data=params, verify=VERIFY\n )\n', (5320, 5392), False, 'import requests\n'), ((5633, 5654), 'json.loads', 'json.loads', (['r.content'], {}), '(r.content)\n', (5643, 5654), False, 'import json\n'), ((6455, 6469), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6467, 6469), False, 'from django.utils import timezone\n'), ((16577, 16603), 'json.loads', 'json.loads', (['result.content'], {}), '(result.content)\n', (16587, 16603), False, 'import json\n'), ((7433, 7446), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7443, 7446), False, 'import time\n'), ((7533, 7553), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (7551, 7553), False, 'from django.db import transaction\n'), ((6874, 6894), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (6892, 6894), False, 'from django.db import transaction\n'), ((10372, 10387), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (10385, 10387), False, 'import sys\n'), ((10626, 10641), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (10639, 10641), False, 'import sys\n'), ((11219, 11234), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (11232, 11234), False, 'import sys\n'), ((11683, 11698), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (11696, 11698), False, 'import sys\n'), ((12017, 12032), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (12030, 12032), False, 'import sys\n'), ((12710, 12725), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (12723, 12725), False, 'import sys\n'), ((13314, 13329), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (13327, 13329), False, 'import sys\n'), ((13628, 13643), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (13641, 13643), False, 'import sys\n'), ((13909, 13924), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (13922, 13924), False, 'import sys\n'), ((14300, 14315), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (14313, 14315), False, 'import sys\n'), ((15275, 15286), 'time.time', 'time.time', ([], {}), '()\n', (15284, 15286), False, 'import time\n'), ((7571, 7606), 'osf.models.user.OSFUser.objects.select_for_update', 'OSFUser.objects.select_for_update', ([], {}), '()\n', (7604, 7606), False, 'from osf.models.user import OSFUser\n'), ((6967, 7002), 'osf.models.user.OSFUser.objects.select_for_update', 'OSFUser.objects.select_for_update', ([], {}), '()\n', (7000, 7002), False, 'from osf.models.user import OSFUser\n'), ((9168, 9186), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (9178, 9186), False, 'import json\n')]
|
import openpyxl
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from datetime import datetime, timedelta
from dateutil import tz, parser
from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token
from tutorial.graph_helper import *
import dateutil.parser
# <HomeViewSnippet>
def home(request):
context = initialize_context(request)
return render(request, 'tutorial/home.html', context)
# </HomeViewSnippet>
# <InitializeContextSnippet>
def initialize_context(request):
context = {}
# Check for any errors in the session
error = request.session.pop('flash_error', None)
if error != None:
context['errors'] = []
context['errors'].append(error)
# Check for user in the session
context['user'] = request.session.get('user', {'is_authenticated': False})
return context
# </InitializeContextSnippet>
# <SignInViewSnippet>
def sign_in(request):
# Get the sign-in flow
flow = get_sign_in_flow()
# Save the expected flow so we can use it in the callback
try:
request.session['auth_flow'] = flow
except Exception as e:
print(e)
# Redirect to the Azure sign-in page
return HttpResponseRedirect(flow['auth_uri'])
# </SignInViewSnippet>
# <SignOutViewSnippet>
def sign_out(request):
# Clear out the user and token
remove_user_and_token(request)
return HttpResponseRedirect(reverse('home'))
# </SignOutViewSnippet>
# <CallbackViewSnippet>
def callback(request):
# Make the token request
result = get_token_from_code(request)
# Get the user's profile
# user = get_user(result['code'])
user = get_user(result['access_token'])
# Store user
store_user(request, user)
return HttpResponseRedirect(reverse('home'))
# </CallbackViewSnippet>
# <CalendarViewSnippet>
def calendar(request):
context = initialize_context(request)
user = context['user']
# Load the user's time zone
# Microsoft Graph can return the user's time zone as either
# a Windows time zone name or an IANA time zone identifier
# Python datetime requires IANA, so convert Windows to IANA
time_zone = get_iana_from_windows(user['timeZone'])
tz_info = tz.gettz(time_zone)
# Get midnight today in user's time zone
today = datetime.now(tz_info).replace(
hour=0,
minute=0,
second=0,
microsecond=0)
# Based on today, get the start of the week (Sunday)
if (today.weekday() != 6):
start = today - timedelta(days=today.isoweekday())
else:
start = today
end = start + timedelta(days=7)
token = get_token(request)
events = get_calendar_events(
token,
start.isoformat(timespec='seconds'),
end.isoformat(timespec='seconds'),
user['timeZone'])
if events:
# Convert the ISO 8601 date times to a datetime object
# This allows the Django template to format the value nicely
for event in events['value']:
event['start']['dateTime'] = parser.parse(event['start']['dateTime'])
event['end']['dateTime'] = parser.parse(event['end']['dateTime'])
context['events'] = events['value']
return render(request, 'tutorial/calendar.html', context)
# </CalendarViewSnippet>
# <NewEventViewSnippet>
def newevent(request):
context = initialize_context(request)
user = context['user']
if request.method == 'POST':
# Validate the form values
# Required values
if (not request.POST['ev-subject']) or \
(not request.POST['ev-start']) or \
(not request.POST['ev-end']):
context['errors'] = [
{'message': 'Invalid values', 'debug': 'The subject, start, and end fields are required.'}
]
return render(request, 'tutorial/newevent.html', context)
attendees = None
if request.POST['ev-attendees']:
attendees = request.POST['ev-attendees'].split(';')
body = request.POST['ev-body']
# Create the event
token = get_token(request)
create_event(
token,
request.POST['ev-subject'],
request.POST['ev-start'],
request.POST['ev-end'],
attendees,
request.POST['ev-body'],
user['timeZone'])
# Redirect back to calendar view
return HttpResponseRedirect(reverse('calendar'))
else:
# Render the form
return render(request, 'tutorial/newevent.html', context)
# print('hello')
# </NewEventViewSnippet>
def bulkevent(request):
context = initialize_context(request)
user = context['user']
if request.method == 'POST':
body = request.POST['ev-body']
if not request.POST['ev-subject']:
context['errors'] = [
{'message': 'Invalid values', 'debug': 'The subject, start, and end fields are required.'}
]
return render(request, 'tutorial/bulkevent.html', context)
excel_file = request.FILES["excel_file"]
# you may put validations here to check extension or file size
try:
data = read_excel(excel_file)
except Exception as e:
context['errors'] = [
{'message': 'Excel parsing failed', 'debug': 'Check the format of your file.'}
]
return render(request, 'tutorial/bulkevent.html', context)
results = []
for row in data:
start_date = row[1]
start_time = row[2]
group = row[3]
attendees = row[4:]
# '2021-05-08T11:56'
start_time = datetime.combine(dateutil.parser.parse(start_date).date(),
dateutil.parser.parse(start_time).time()
)
end_time = start_time + timedelta(minutes=int(request.POST['ev-duration']))
# Create the event
token = get_token(request)
res = create_event(
token,
request.POST['ev-subject'] + " " + group,
start_time.isoformat(),
end_time.isoformat(),
attendees,
request.POST['ev-body'],
user['timeZone'])
results.append({'result':res,'group': group})
# Redirect back to calendar view
context['messages'] = [
{'message': f'Group {res["group"]}', 'detail': res["result"].status_code} for res in results
]
return render(request, 'tutorial/bulkevent.html', context)
# return HttpResponseRedirect(reverse('calendar'))
else:
# Render the form
return render(request, 'tutorial/bulkevent.html', context)
# print('hello')
def read_excel(excel_file):
wb = openpyxl.load_workbook(excel_file)
# getting a particular sheet by name out of many sheets
worksheet = wb["schedule"]
# print(worksheet)
excel_data = list()
# iterating over the rows and
# getting value from each cell in row
for row in worksheet.iter_rows():
row_data = list()
for cell in row:
row_data.append(str(cell.value))
excel_data.append(row_data)
return excel_data
|
[
"dateutil.parser.parse",
"tutorial.auth_helper.get_sign_in_flow",
"tutorial.auth_helper.remove_user_and_token",
"tutorial.auth_helper.get_token_from_code",
"tutorial.auth_helper.store_user",
"dateutil.tz.gettz",
"tutorial.auth_helper.get_token",
"openpyxl.load_workbook",
"django.urls.reverse",
"datetime.timedelta",
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"datetime.datetime.now"
] |
[((487, 533), 'django.shortcuts.render', 'render', (['request', '"""tutorial/home.html"""', 'context'], {}), "(request, 'tutorial/home.html', context)\n", (493, 533), False, 'from django.shortcuts import render\n'), ((1077, 1095), 'tutorial.auth_helper.get_sign_in_flow', 'get_sign_in_flow', ([], {}), '()\n', (1093, 1095), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((1307, 1345), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["flow['auth_uri']"], {}), "(flow['auth_uri'])\n", (1327, 1345), False, 'from django.http import HttpResponse, HttpResponseRedirect\n'), ((1457, 1487), 'tutorial.auth_helper.remove_user_and_token', 'remove_user_and_token', (['request'], {}), '(request)\n', (1478, 1487), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((1654, 1682), 'tutorial.auth_helper.get_token_from_code', 'get_token_from_code', (['request'], {}), '(request)\n', (1673, 1682), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((1817, 1842), 'tutorial.auth_helper.store_user', 'store_user', (['request', 'user'], {}), '(request, user)\n', (1827, 1842), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((2330, 2349), 'dateutil.tz.gettz', 'tz.gettz', (['time_zone'], {}), '(time_zone)\n', (2338, 2349), False, 'from dateutil import tz, parser\n'), ((2744, 2762), 'tutorial.auth_helper.get_token', 'get_token', (['request'], {}), '(request)\n', (2753, 2762), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((3330, 3380), 'django.shortcuts.render', 'render', (['request', '"""tutorial/calendar.html"""', 'context'], {}), "(request, 'tutorial/calendar.html', context)\n", (3336, 3380), False, 'from django.shortcuts import render\n'), ((6982, 7016), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['excel_file'], {}), '(excel_file)\n', (7004, 7016), False, 'import openpyxl\n'), ((1521, 1536), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (1528, 1536), False, 'from django.urls import reverse\n'), ((1875, 1890), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (1882, 1890), False, 'from django.urls import reverse\n'), ((2713, 2730), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2722, 2730), False, 'from datetime import datetime, timedelta\n'), ((4206, 4224), 'tutorial.auth_helper.get_token', 'get_token', (['request'], {}), '(request)\n', (4215, 4224), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((4621, 4671), 'django.shortcuts.render', 'render', (['request', '"""tutorial/newevent.html"""', 'context'], {}), "(request, 'tutorial/newevent.html', context)\n", (4627, 4671), False, 'from django.shortcuts import render\n'), ((6708, 6759), 'django.shortcuts.render', 'render', (['request', '"""tutorial/bulkevent.html"""', 'context'], {}), "(request, 'tutorial/bulkevent.html', context)\n", (6714, 6759), False, 'from django.shortcuts import render\n'), ((6870, 6921), 'django.shortcuts.render', 'render', (['request', '"""tutorial/bulkevent.html"""', 'context'], {}), "(request, 'tutorial/bulkevent.html', context)\n", (6876, 6921), False, 'from django.shortcuts import render\n'), ((2408, 2429), 'datetime.datetime.now', 'datetime.now', (['tz_info'], {}), '(tz_info)\n', (2420, 2429), False, 'from datetime import datetime, timedelta\n'), ((3154, 3194), 'dateutil.parser.parse', 'parser.parse', (["event['start']['dateTime']"], {}), "(event['start']['dateTime'])\n", (3166, 3194), False, 'from dateutil import tz, parser\n'), ((3234, 3272), 'dateutil.parser.parse', 'parser.parse', (["event['end']['dateTime']"], {}), "(event['end']['dateTime'])\n", (3246, 3272), False, 'from dateutil import tz, parser\n'), ((3941, 3991), 'django.shortcuts.render', 'render', (['request', '"""tutorial/newevent.html"""', 'context'], {}), "(request, 'tutorial/newevent.html', context)\n", (3947, 3991), False, 'from django.shortcuts import render\n'), ((4549, 4568), 'django.urls.reverse', 'reverse', (['"""calendar"""'], {}), "('calendar')\n", (4556, 4568), False, 'from django.urls import reverse\n'), ((5104, 5155), 'django.shortcuts.render', 'render', (['request', '"""tutorial/bulkevent.html"""', 'context'], {}), "(request, 'tutorial/bulkevent.html', context)\n", (5110, 5155), False, 'from django.shortcuts import render\n'), ((6132, 6150), 'tutorial.auth_helper.get_token', 'get_token', (['request'], {}), '(request)\n', (6141, 6150), False, 'from tutorial.auth_helper import get_sign_in_flow, get_token_from_code, store_user, remove_user_and_token, get_token\n'), ((5525, 5576), 'django.shortcuts.render', 'render', (['request', '"""tutorial/bulkevent.html"""', 'context'], {}), "(request, 'tutorial/bulkevent.html', context)\n", (5531, 5576), False, 'from django.shortcuts import render\n')]
|
import os
import json
import logging
import traceback
from importlib import import_module
class PluginLoader:
def __init__(self):
self.logger = logging.getLogger('newsbot.py')
self.actions = []
self.parsers = []
self.action_dir = 'actions'
self.parser_dir = 'parsers'
self.config = None
self.action_config = None
self.parser_config = None
def load_plugins(self, config):
self.config = config
self.action_config = json.loads(self.config.get('newsbot', 'action_plugins'))
self.parser_config = json.loads(self.config.get('newsbot', 'parser_plugins'))
# Load the action plugins
try:
count = 0
for plugin in self.action_config:
plugin_file = plugin + '.py'
location = os.path.join(self.action_dir, plugin_file)
if not os.path.isdir(location):
self.actions.append(import_module(self.action_dir + '.' + plugin))
count += 1
self.logger.info("Loaded {} actions.".format(count))
except:
self.logger.error(traceback.format_exc())
# Load the parser plugins
try:
count = 0
for plugin in self.parser_config:
plugin_file = plugin + '.py'
location = os.path.join(self.parser_dir, plugin_file)
if not os.path.isdir(location):
self.parsers.append(import_module(self.parser_dir + '.' + plugin))
count += 1
self.logger.info("Loaded {} parsers.".format(count))
except:
self.logger.error(traceback.format_exc())
|
[
"importlib.import_module",
"os.path.isdir",
"traceback.format_exc",
"os.path.join",
"logging.getLogger"
] |
[((158, 189), 'logging.getLogger', 'logging.getLogger', (['"""newsbot.py"""'], {}), "('newsbot.py')\n", (175, 189), False, 'import logging\n'), ((835, 877), 'os.path.join', 'os.path.join', (['self.action_dir', 'plugin_file'], {}), '(self.action_dir, plugin_file)\n', (847, 877), False, 'import os\n'), ((1369, 1411), 'os.path.join', 'os.path.join', (['self.parser_dir', 'plugin_file'], {}), '(self.parser_dir, plugin_file)\n', (1381, 1411), False, 'import os\n'), ((902, 925), 'os.path.isdir', 'os.path.isdir', (['location'], {}), '(location)\n', (915, 925), False, 'import os\n'), ((1157, 1179), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1177, 1179), False, 'import traceback\n'), ((1436, 1459), 'os.path.isdir', 'os.path.isdir', (['location'], {}), '(location)\n', (1449, 1459), False, 'import os\n'), ((1691, 1713), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1711, 1713), False, 'import traceback\n'), ((967, 1012), 'importlib.import_module', 'import_module', (["(self.action_dir + '.' + plugin)"], {}), "(self.action_dir + '.' + plugin)\n", (980, 1012), False, 'from importlib import import_module\n'), ((1501, 1546), 'importlib.import_module', 'import_module', (["(self.parser_dir + '.' + plugin)"], {}), "(self.parser_dir + '.' + plugin)\n", (1514, 1546), False, 'from importlib import import_module\n')]
|
'''tests for admins endpoints'''
import json
from unittest import TestCase
from manage import Connection
from app import create_app
class TestOrders(TestCase):
'''loads up all confiugration settings'''
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
with self.app.app_context():
Connection().drop()
Connection().create()
Connection().create_admin()
self.order_data = {
"name": "Burger",
"description": "Beef burger",
"price": 60}
def login(self):
""" test for loggin in """
login_data = {
"username": "Admin",
"password": "<PASSWORD>"
}
response = self.client.post(
"api/v2/auth/login",
data=json.dumps(login_data),
headers={'content-type': 'application/json'})
return response
def user_login(self):
""" test for signing up"""
signup_data = {
"username": "salmaa",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>"
}
self.client.post(
"api/v2/auth/signup",
data=json.dumps(signup_data),
headers={'content-type': 'application/json'}
)
login_data = {
"username": "salmaa",
"password": "<PASSWORD>"
}
response = self.client.post(
"api/v2/auth/login",
data=json.dumps(login_data),
headers={'content-type': 'application/json'}
)
return response
def get_token(self):
""" function to get user token """
response = self.login()
token = json.loads(response.data.decode('utf-8')).get('token', None)
return token
def get_user_token(self):
""" function to get user token """
response = self.user_login()
token = json.loads(response.data.decode('utf-8')).get('token', None)
return token
def test_place_new_menu(self):
''' Test to place an order '''
token = self.get_token()
order_data = {
"name": "Burger",
"description": "Beef burger",
"image": "Burger",
"price": 60
}
response = self.client.post(
"/api/v2/menu",
data=json.dumps(order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)}
)
response_data = json.loads(response.data.decode('utf-8'))
self.assertEqual(response_data['message'], "Food menu created", 201)
# def test_all_menu(self):
# '''Test get all menu'''
# token = self.get_token()
# response = self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)}
# )
# response = self.client.get(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# print(response.data)
# self.assertEqual(response.status_code, 200)
def test_empty_menu(self):
'''Test get all menu'''
token = self.get_token()
response = self.client.get(
"/api/v2/menu",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
self.assertEqual(response.status_code, 404)
# def test_get_specific_menu(self):
# '''Test to get a specific menu'''
# token = self.get_token()
# order_data = {
# "name": "Burger",
# "description": "Beef burger",
# "price": 60
# }
# response = self.client.post(
# "/api/v2/menu",
# data=json.dumps(order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# response = self.client.get(
# "/api/v2/menu/1",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
# def test_get_specific_order(self):
# '''Test to get a specific menu'''
# user_token = self.get_user_token()
# token = self.get_token()
# self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# data = {
# 'name': 'Chicken'
# }
# response = self.client.post(
# "/api/v2/users/orders/1",
# data=json.dumps(data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(user_token)})
# response = self.client.get(
# "/api/v2/orders/1",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
def test_get_non_existing_menu(self):
'''Test to get a specific menu'''
token = self.get_token()
response = self.client.post(
"/api/v2/menu",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
response = self.client.get(
"/api/v2/menu/2331",
data=json.dumps(self.order_data),
headers={"content-type": "application/json",
'Authorization': 'Bearer {}'.format(token)})
self.assertEqual(response.status_code, 404)
# def test_update_order_status(self):
# '''Test to get a specific menu'''
# user_token = self.get_user_token()
# token = self.get_token()
# self.client.post(
# "/api/v2/menu",
# data=json.dumps(self.order_data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# data = {
# 'name': 'Burger'
# }
# status = {
# "status": "accept"
# }
# self.client.post(
# "/api/v2/users/orders/1",
# data=json.dumps(data),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(user_token)})
# response = self.client.put(
# "/api/v2/update/order/1",
# data=json.dumps(status),
# headers={"content-type": "application/json",
# 'Authorization': 'Bearer {}'.format(token)})
# self.assertEqual(response.status_code, 200)
|
[
"app.create_app",
"manage.Connection",
"json.dumps"
] |
[((249, 270), 'app.create_app', 'create_app', (['"""testing"""'], {}), "('testing')\n", (259, 270), False, 'from app import create_app\n'), ((832, 854), 'json.dumps', 'json.dumps', (['login_data'], {}), '(login_data)\n', (842, 854), False, 'import json\n'), ((1262, 1285), 'json.dumps', 'json.dumps', (['signup_data'], {}), '(signup_data)\n', (1272, 1285), False, 'import json\n'), ((1547, 1569), 'json.dumps', 'json.dumps', (['login_data'], {}), '(login_data)\n', (1557, 1569), False, 'import json\n'), ((2427, 2449), 'json.dumps', 'json.dumps', (['order_data'], {}), '(order_data)\n', (2437, 2449), False, 'import json\n'), ((3600, 3627), 'json.dumps', 'json.dumps', (['self.order_data'], {}), '(self.order_data)\n', (3610, 3627), False, 'import json\n'), ((5818, 5845), 'json.dumps', 'json.dumps', (['self.order_data'], {}), '(self.order_data)\n', (5828, 5845), False, 'import json\n'), ((6056, 6083), 'json.dumps', 'json.dumps', (['self.order_data'], {}), '(self.order_data)\n', (6066, 6083), False, 'import json\n'), ((365, 377), 'manage.Connection', 'Connection', ([], {}), '()\n', (375, 377), False, 'from manage import Connection\n'), ((397, 409), 'manage.Connection', 'Connection', ([], {}), '()\n', (407, 409), False, 'from manage import Connection\n'), ((431, 443), 'manage.Connection', 'Connection', ([], {}), '()\n', (441, 443), False, 'from manage import Connection\n')]
|
import hmac
from typing import Optional
from fastapi import FastAPI, Request
from .config import get_config
import logging
import os
import subprocess
import uvicorn
from shellescape import quote
app = FastAPI()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_hooks(repo_config, hook_type):
old_path = os.getcwd()
os.chdir(repo_config["path"])
for hook_file in repo_config.get("hooks", {}).get(hook_type, []):
subprocess.run(
command_preparation([hook_file], repo_config.get("executing_user"))
)
os.chdir(old_path)
def command_preparation(command, user):
if user is None:
return command
return ["su", user, "-s", "/bin/bash", "-c", " ".join([quote(c) for c in command])]
@app.post(path="/pull/{repo}")
async def pull(request: Request, repo: str):
c = get_config()
#X-Event-Key: diagnostics:ping
try:
event_key = request.headers["X-Event-Key"]
if event_key == 'diagnostics:ping':
return "ok"
except KeyError:
pass
try:
repo_config = c[repo]
except KeyError:
logger.error("Repo does not seem to be configured")
return
body = await request.body()
signature_local = hmac.new(
bytes(repo_config["shared_secret"], "UTF-8"), body, digestmod="SHA256"
).hexdigest()
signature_request = request.headers["X-Hub-Signature"].split("=")[1]
if signature_local != signature_request:
logger.error("Repo does not seem to be configured")
return
path = os.getcwd()
os.chdir(repo_config["path"])
if repo_config.get("git_reset"):
logging.info("Resetting the repository before pulling")
subprocess.run(
command_preparation(
["git", "reset", "--hard"], repo_config.get("executing_user")
)
)
pull_process = subprocess.run(
command_preparation(["git", "pull"], repo_config.get("executing_user"))
)
git_url_process = subprocess.run(
command_preparation(
["git", "config", "--get", "remote.origin.url"],
repo_config.get("executing_user"),
),
capture_output=True,
)
git_url = git_url_process.stdout.decode("UTF-8").split("\n")[0]
run_hooks(repo_config, "post_pull")
os.chdir(path)
if pull_process.returncode != 0 and repo_config.get("git_delete_if_pull_failed"):
subprocess.run(
command_preparation(
["rm", "-rf", repo_config["path"]], repo_config.get("executing_user")
)
)
subprocess.run(
command_preparation(
["git", "clone", git_url, repo_config["path"]],
repo_config.get("executing_user"),
)
)
pass
return {}
def start_server():
try:
port = int(os.environ["PULLER_PORT"])
except:
port = 8000
uvicorn.run("puller:app", host="0.0.0.0", port=port, log_level="info")
|
[
"logging.basicConfig",
"os.getcwd",
"logging.info",
"uvicorn.run",
"shellescape.quote",
"os.chdir",
"logging.getLogger",
"fastapi.FastAPI"
] |
[((203, 212), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (210, 212), False, 'from fastapi import FastAPI, Request\n'), ((214, 253), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (233, 253), False, 'import logging\n'), ((263, 290), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (280, 290), False, 'import logging\n'), ((347, 358), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (356, 358), False, 'import os\n'), ((363, 392), 'os.chdir', 'os.chdir', (["repo_config['path']"], {}), "(repo_config['path'])\n", (371, 392), False, 'import os\n'), ((581, 599), 'os.chdir', 'os.chdir', (['old_path'], {}), '(old_path)\n', (589, 599), False, 'import os\n'), ((1579, 1590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1588, 1590), False, 'import os\n'), ((1596, 1625), 'os.chdir', 'os.chdir', (["repo_config['path']"], {}), "(repo_config['path'])\n", (1604, 1625), False, 'import os\n'), ((2345, 2359), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (2353, 2359), False, 'import os\n'), ((2952, 3022), 'uvicorn.run', 'uvicorn.run', (['"""puller:app"""'], {'host': '"""0.0.0.0"""', 'port': 'port', 'log_level': '"""info"""'}), "('puller:app', host='0.0.0.0', port=port, log_level='info')\n", (2963, 3022), False, 'import uvicorn\n'), ((1672, 1727), 'logging.info', 'logging.info', (['"""Resetting the repository before pulling"""'], {}), "('Resetting the repository before pulling')\n", (1684, 1727), False, 'import logging\n'), ((746, 754), 'shellescape.quote', 'quote', (['c'], {}), '(c)\n', (751, 754), False, 'from shellescape import quote\n')]
|
#
# import modules
#
from ahvl.options.generate.password import OptionsGeneratePassword
from ahvl.helper import AhvlMsg, AhvlHelper
from passlib import pwd
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# GeneratePassword
#
class GeneratePassword:
def __init__(self, lookup_plugin):
# set lookup plugin
self.lookup_plugin = lookup_plugin
self.variables = lookup_plugin.variables
self.kwargs = lookup_plugin.kwargs
# set options
self.opts = OptionsGeneratePassword(lookup_plugin)
def generate(self):
# password or passphrase
if self.opts.get('pwd_type') == "phrase":
passwd = pwd.genphrase(entropy=self.opts.get('pwd_entropy'),
length=self.opts.get('pwd_length'),
returns=None,
words=self.opts.get('pwd_words'),
wordset=self.opts.get('pwd_wordset'),
sep=self.opts.get('pwd_sep'))
else:
passwd = pwd.genword(entropy=self.opts.get('pwd_entropy'),
length=self.opts.get('pwd_length'),
returns=None,
chars=self.opts.get('pwd_words'),
charset=self.opts.get('pwd_charset'))
# return result
return passwd
|
[
"ahvl.helper.AhvlMsg",
"ahvl.options.generate.password.OptionsGeneratePassword",
"ahvl.helper.AhvlHelper"
] |
[((184, 193), 'ahvl.helper.AhvlMsg', 'AhvlMsg', ([], {}), '()\n', (191, 193), False, 'from ahvl.helper import AhvlMsg, AhvlHelper\n'), ((200, 212), 'ahvl.helper.AhvlHelper', 'AhvlHelper', ([], {}), '()\n', (210, 212), False, 'from ahvl.helper import AhvlMsg, AhvlHelper\n'), ((522, 560), 'ahvl.options.generate.password.OptionsGeneratePassword', 'OptionsGeneratePassword', (['lookup_plugin'], {}), '(lookup_plugin)\n', (545, 560), False, 'from ahvl.options.generate.password import OptionsGeneratePassword\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 12:03:59 2017
@author: Kevin
"""
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import LeaveOneGroupOut,GridSearchCV
dataPath = 'UTDallas/'
dataName = 'UTD'
nJobs = 12 # Number of cores to use
# Load feature matrices, labels, and groups (denoting which labeled time
# segment each row of the feature matrix comes from)
featuresAll = np.loadtxt(dataPath+dataName+'_all.csv',delimiter=',')
featuresAcc = np.loadtxt(dataPath+dataName+'_acc.csv',delimiter=',')
featuresEda = np.loadtxt(dataPath+dataName+'_eda.csv',delimiter=',')
labels = np.loadtxt(dataPath+dataName+'_label.csv')
groups = np.loadtxt(dataPath+dataName+'_groups.csv')
# Indicates the subjects that have no MAs, in order to exclude them during grid search
includeRowsTrain = np.logical_and(
np.logical_and(np.where(groups!=5,True,False),
np.where(groups!=17,True,False)),np.where(groups!=18,True,False))
# Leave-one-group-out cross-validation
cv = LeaveOneGroupOut()
# Parameter tuning by grid search
solver='lbfgs'
activation='relu'
regParam = 10.0**np.arange(-3,5)
# Comment out one of the choices below (either 1 or 2 hidden layers)
# 1 hidden layer
hiddenLayerSizes = 2**np.arange(0,8)
"""
# 2 hidden layers
hidden1,hidden2 = np.meshgrid(2**np.arange(0,8),2**np.arange(0,8))
hiddenLayerSizes = np.reshape(np.stack([hidden1,hidden2]),
(2,np.size(hidden1))).T.tolist()
"""
parameters = {'alpha': regParam,
'hidden_layer_sizes': hiddenLayerSizes}
gsAll = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAll.fit(featuresAll[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAll = gsAll.best_params_['alpha']
bestHiddenSizesAll = gsAll.best_params_['hidden_layer_sizes']
gsAcc = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsAcc.fit(featuresAcc[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaAcc = gsAcc.best_params_['alpha']
bestHiddenSizesAcc = gsAcc.best_params_['hidden_layer_sizes']
gsEda = GridSearchCV(MLPClassifier(solver=solver,activation=activation),
parameters,'roc_auc',n_jobs=nJobs,cv=cv,refit=False,
verbose=1)
gsEda.fit(featuresEda[includeRowsTrain,:],labels[includeRowsTrain],
groups[includeRowsTrain])
bestAlphaEda = gsEda.best_params_['alpha']
bestHiddenSizesEda = gsEda.best_params_['hidden_layer_sizes']
predAll = np.zeros(np.shape(labels))
predAcc = np.zeros(np.shape(labels))
predEda = np.zeros(np.shape(labels))
for train, test in cv.split(featuresAll,labels,groups):
mlpAll = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAll,
solver=solver,alpha=bestAlphaAll)
mlpAll.fit(featuresAll[train,:],labels[train])
predAll[test] = mlpAll.predict_proba(featuresAll[test,:])[:,1]
mlpAcc = MLPClassifier(hidden_layer_sizes=bestHiddenSizesAcc,
solver=solver,alpha=bestAlphaAcc)
mlpAcc.fit(featuresAcc[train,:],labels[train])
predAcc[test] = mlpAcc.predict_proba(featuresAcc[test,:])[:,1]
mlpEda = MLPClassifier(hidden_layer_sizes=bestHiddenSizesEda,
solver=solver,alpha=bestAlphaEda)
mlpEda.fit(featuresEda[train,:],labels[train])
predEda[test] = mlpEda.predict_proba(featuresEda[test,:])[:,1]
# Save the scores for further analysis
#np.save('MLPpredAllScores_UTD',predAll)
#np.save('MLPpredAccScores_UTD',predAcc)
#np.save('MLPpredEdaScores_UTD',predEda)
print('MLP AUC ALL: %f (%s)' % (roc_auc_score(labels,predAll),gsAll.best_params_))
print('MLP AUC ACC: %f (%s)' % (roc_auc_score(labels,predAcc),gsAcc.best_params_))
print('MLP AUC EDA: %f (%s)' % (roc_auc_score(labels,predEda),gsEda.best_params_))
|
[
"sklearn.metrics.roc_auc_score",
"numpy.shape",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"sklearn.neural_network.MLPClassifier",
"sklearn.model_selection.LeaveOneGroupOut"
] |
[((480, 539), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_all.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_all.csv', delimiter=',')\n", (490, 539), True, 'import numpy as np\n'), ((549, 608), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_acc.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_acc.csv', delimiter=',')\n", (559, 608), True, 'import numpy as np\n'), ((618, 677), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_eda.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_eda.csv', delimiter=',')\n", (628, 677), True, 'import numpy as np\n'), ((682, 728), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_label.csv')"], {}), "(dataPath + dataName + '_label.csv')\n", (692, 728), True, 'import numpy as np\n'), ((734, 781), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_groups.csv')"], {}), "(dataPath + dataName + '_groups.csv')\n", (744, 781), True, 'import numpy as np\n'), ((1066, 1084), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (1082, 1084), False, 'from sklearn.model_selection import LeaveOneGroupOut, GridSearchCV\n'), ((988, 1023), 'numpy.where', 'np.where', (['(groups != 18)', '(True)', '(False)'], {}), '(groups != 18, True, False)\n', (996, 1023), True, 'import numpy as np\n'), ((1170, 1186), 'numpy.arange', 'np.arange', (['(-3)', '(5)'], {}), '(-3, 5)\n', (1179, 1186), True, 'import numpy as np\n'), ((1296, 1311), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (1305, 1311), True, 'import numpy as np\n'), ((1644, 1695), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (1657, 1695), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2033, 2084), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (2046, 2084), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2422, 2473), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': 'solver', 'activation': 'activation'}), '(solver=solver, activation=activation)\n', (2435, 2473), False, 'from sklearn.neural_network import MLPClassifier\n'), ((2809, 2825), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2817, 2825), True, 'import numpy as np\n'), ((2846, 2862), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2854, 2862), True, 'import numpy as np\n'), ((2883, 2899), 'numpy.shape', 'np.shape', (['labels'], {}), '(labels)\n', (2891, 2899), True, 'import numpy as np\n'), ((2971, 3063), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesAll', 'solver': 'solver', 'alpha': 'bestAlphaAll'}), '(hidden_layer_sizes=bestHiddenSizesAll, solver=solver, alpha=\n bestAlphaAll)\n', (2984, 3063), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3221, 3313), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesAcc', 'solver': 'solver', 'alpha': 'bestAlphaAcc'}), '(hidden_layer_sizes=bestHiddenSizesAcc, solver=solver, alpha=\n bestAlphaAcc)\n', (3234, 3313), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3467, 3559), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'bestHiddenSizesEda', 'solver': 'solver', 'alpha': 'bestAlphaEda'}), '(hidden_layer_sizes=bestHiddenSizesEda, solver=solver, alpha=\n bestAlphaEda)\n', (3480, 3559), False, 'from sklearn.neural_network import MLPClassifier\n'), ((919, 953), 'numpy.where', 'np.where', (['(groups != 5)', '(True)', '(False)'], {}), '(groups != 5, True, False)\n', (927, 953), True, 'import numpy as np\n'), ((955, 990), 'numpy.where', 'np.where', (['(groups != 17)', '(True)', '(False)'], {}), '(groups != 17, True, False)\n', (963, 990), True, 'import numpy as np\n'), ((3895, 3925), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predAll'], {}), '(labels, predAll)\n', (3908, 3925), False, 'from sklearn.metrics import roc_auc_score\n'), ((3978, 4008), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predAcc'], {}), '(labels, predAcc)\n', (3991, 4008), False, 'from sklearn.metrics import roc_auc_score\n'), ((4061, 4091), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'predEda'], {}), '(labels, predEda)\n', (4074, 4091), False, 'from sklearn.metrics import roc_auc_score\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# vertices: frames x meshVerNum x 3
# trifaces: facePolygonNum x 3 = 22800 x 3
def ComputeNormal(vertices, trifaces):
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
U = vertices[:,trifaces[:,1],:] - vertices[:,trifaces[:,0],:] #frames x faceNum x 3
V = vertices[:,trifaces[:,2],:] - vertices[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = U.shape #remember: frames x faceNum x 3
U = np.reshape(U, [-1,3])
V = np.reshape(V, [-1,3])
faceNormals = np.cross(U,V) #frames x 13776 x 3
from sklearn.preprocessing import normalize
if np.isnan(np.max(faceNormals)):
print('ComputeNormal: Warning nan is detected {0}')
return
faceNormals = normalize(faceNormals)
faceNormals = np.reshape(faceNormals, originalShape)
if False: #Slow version
vertex_normals = np.zeros(vertices.shape) #(frames x 11510) x 3
for fIdx, vIdx in enumerate(trifaces[:,0]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,1]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
for fIdx, vIdx in enumerate(trifaces[:,2]):
vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
else: #Faster version
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = vertex_normals.astype(np.float64)
originalShape = vertex_normals.shape
vertex_normals = np.reshape(vertex_normals, [-1,3])
vertex_normals = normalize(vertex_normals)
vertex_normals = np.reshape(vertex_normals,originalShape)
return vertex_normals
def ComputeNormal_gpu(vertices, trifaces):
import torch
import torch.nn.functional as F
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
return
#compute vertex Normals for all frames
#trifaces_cuda = torch.from_numpy(trifaces.astype(np.long)).cuda()
vertices_cuda = torch.from_numpy(vertices.astype(np.float32)).cuda()
U_cuda = vertices_cuda[:,trifaces[:,1],:] - vertices_cuda[:,trifaces[:,0],:] #frames x faceNum x 3
V_cuda = vertices_cuda[:,trifaces[:,2],:] - vertices_cuda[:,trifaces[:,1],:] #frames x faceNum x 3
originalShape = list(U_cuda.size()) #remember: frames x faceNum x 3
U_cuda = torch.reshape(U_cuda, [-1,3])#.astype(np.float32)
V_cuda = torch.reshape(V_cuda, [-1,3])#.astype(np.float32)
faceNormals = U_cuda.cross(V_cuda)
faceNormals = F.normalize(faceNormals,dim=1)
faceNormals = torch.reshape(faceNormals, originalShape)
# trifaces has duplicated vertex index, so cannot be parallazied
# vertex_normals = torch.zeros(vertices.shape,dtype=torch.float32).cuda() #(frames x 11510) x 3
# for fIdx, vIdx in enumerate(trifaces[:,0]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,1]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# for fIdx, vIdx in enumerate(trifaces[:,2]):
# vertex_normals[:,vIdx,:] += faceNormals[:,fIdx,:]
# Computing vertex normals, much faster (and obscure) replacement
index = np.vstack((np.ravel(trifaces), np.repeat(np.arange(len(trifaces)), 3))).T
index_sorted = index[index[:,0].argsort()]
vertex_normals = np.add.reduceat(faceNormals[:,index_sorted[:, 1],:][0],
np.concatenate(([0], np.cumsum(np.unique(index_sorted[:, 0],
return_counts=True)[1])[:-1])))[None, :]
vertex_normals = torch.from_numpy(vertex_normals).float().cuda()
vertex_normals = F.normalize(vertex_normals,dim=2)
vertex_normals = vertex_normals.data.cpu().numpy() #(batch, chunksize, dim)
return vertex_normals
|
[
"numpy.ravel",
"numpy.cross",
"numpy.zeros",
"numpy.max",
"sklearn.preprocessing.normalize",
"numpy.reshape",
"torch.reshape",
"torch.nn.functional.normalize",
"numpy.unique",
"torch.from_numpy"
] |
[((620, 642), 'numpy.reshape', 'np.reshape', (['U', '[-1, 3]'], {}), '(U, [-1, 3])\n', (630, 642), True, 'import numpy as np\n'), ((650, 672), 'numpy.reshape', 'np.reshape', (['V', '[-1, 3]'], {}), '(V, [-1, 3])\n', (660, 672), True, 'import numpy as np\n'), ((690, 704), 'numpy.cross', 'np.cross', (['U', 'V'], {}), '(U, V)\n', (698, 704), True, 'import numpy as np\n'), ((908, 930), 'sklearn.preprocessing.normalize', 'normalize', (['faceNormals'], {}), '(faceNormals)\n', (917, 930), False, 'from sklearn.preprocessing import normalize\n'), ((950, 988), 'numpy.reshape', 'np.reshape', (['faceNormals', 'originalShape'], {}), '(faceNormals, originalShape)\n', (960, 988), True, 'import numpy as np\n'), ((2011, 2046), 'numpy.reshape', 'np.reshape', (['vertex_normals', '[-1, 3]'], {}), '(vertex_normals, [-1, 3])\n', (2021, 2046), True, 'import numpy as np\n'), ((2067, 2092), 'sklearn.preprocessing.normalize', 'normalize', (['vertex_normals'], {}), '(vertex_normals)\n', (2076, 2092), False, 'from sklearn.preprocessing import normalize\n'), ((2114, 2155), 'numpy.reshape', 'np.reshape', (['vertex_normals', 'originalShape'], {}), '(vertex_normals, originalShape)\n', (2124, 2155), True, 'import numpy as np\n'), ((2902, 2932), 'torch.reshape', 'torch.reshape', (['U_cuda', '[-1, 3]'], {}), '(U_cuda, [-1, 3])\n', (2915, 2932), False, 'import torch\n'), ((2965, 2995), 'torch.reshape', 'torch.reshape', (['V_cuda', '[-1, 3]'], {}), '(V_cuda, [-1, 3])\n', (2978, 2995), False, 'import torch\n'), ((3073, 3104), 'torch.nn.functional.normalize', 'F.normalize', (['faceNormals'], {'dim': '(1)'}), '(faceNormals, dim=1)\n', (3084, 3104), True, 'import torch.nn.functional as F\n'), ((3123, 3164), 'torch.reshape', 'torch.reshape', (['faceNormals', 'originalShape'], {}), '(faceNormals, originalShape)\n', (3136, 3164), False, 'import torch\n'), ((4154, 4188), 'torch.nn.functional.normalize', 'F.normalize', (['vertex_normals'], {'dim': '(2)'}), '(vertex_normals, dim=2)\n', (4165, 4188), True, 'import torch.nn.functional as F\n'), ((793, 812), 'numpy.max', 'np.max', (['faceNormals'], {}), '(faceNormals)\n', (799, 812), True, 'import numpy as np\n'), ((1050, 1074), 'numpy.zeros', 'np.zeros', (['vertices.shape'], {}), '(vertices.shape)\n', (1058, 1074), True, 'import numpy as np\n'), ((3758, 3776), 'numpy.ravel', 'np.ravel', (['trifaces'], {}), '(trifaces)\n', (3766, 3776), True, 'import numpy as np\n'), ((1568, 1586), 'numpy.ravel', 'np.ravel', (['trifaces'], {}), '(trifaces)\n', (1576, 1586), True, 'import numpy as np\n'), ((4084, 4116), 'torch.from_numpy', 'torch.from_numpy', (['vertex_normals'], {}), '(vertex_normals)\n', (4100, 4116), False, 'import torch\n'), ((3984, 4033), 'numpy.unique', 'np.unique', (['index_sorted[:, 0]'], {'return_counts': '(True)'}), '(index_sorted[:, 0], return_counts=True)\n', (3993, 4033), True, 'import numpy as np\n'), ((1806, 1855), 'numpy.unique', 'np.unique', (['index_sorted[:, 0]'], {'return_counts': '(True)'}), '(index_sorted[:, 0], return_counts=True)\n', (1815, 1855), True, 'import numpy as np\n')]
|
import os
import numpy as np
import cv2
import sys
#sys.path.insert(0, '/home/kumarak/Desktop/campus_temp/pred2/')
#import get_dataset_colormap
read="./all_at_100_nocol/"
gtread=open("./thinglabels.txt").readlines()
gt={}
#print(gtread)
for i in gtread:
gt[int(i.split(':')[0])]=i.split(':')[1][1:-1]
#print(gt)
#map=get_dataset_colormap.create_label_colormap()
#list=[(map[i],i) for i in range(0,len(map))]
list=[]
for filename in os.listdir(read):
#print(filename)
if filename.endswith('.png'):
img=cv2.imread(read+filename)
classes=[gt[i] for i in np.unique(img) if i!=255]
list.append((filename,classes))
for i in sorted(list):
print(i)
|
[
"cv2.imread",
"os.listdir",
"numpy.unique"
] |
[((435, 451), 'os.listdir', 'os.listdir', (['read'], {}), '(read)\n', (445, 451), False, 'import os\n'), ((508, 535), 'cv2.imread', 'cv2.imread', (['(read + filename)'], {}), '(read + filename)\n', (518, 535), False, 'import cv2\n'), ((560, 574), 'numpy.unique', 'np.unique', (['img'], {}), '(img)\n', (569, 574), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
img = cv2.imread('imagem.jpg')
##img = cv2.imread('imagem3.jpg',0)
cv2.imshow('imagem',img)
img = cv2.GaussianBlur(img, (7, 5), 0)
cv2.imshow('imagemblur',img)
gray_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(gray_img,cv2.HOUGH_GRADIENT,1,30,
param1=50,param2=30,minRadius=0,maxRadius=60)
cimg = img
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.circle(cimg,(0,0),i[2],(0,0,255),2)
cv2.circle(cimg,(390,390),i[2],(255,0,0),2)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.GaussianBlur",
"cv2.HoughCircles",
"cv2.circle",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.around",
"cv2.imshow"
] |
[((40, 64), 'cv2.imread', 'cv2.imread', (['"""imagem.jpg"""'], {}), "('imagem.jpg')\n", (50, 64), False, 'import cv2\n'), ((103, 128), 'cv2.imshow', 'cv2.imshow', (['"""imagem"""', 'img'], {}), "('imagem', img)\n", (113, 128), False, 'import cv2\n'), ((135, 167), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(7, 5)', '(0)'], {}), '(img, (7, 5), 0)\n', (151, 167), False, 'import cv2\n'), ((169, 198), 'cv2.imshow', 'cv2.imshow', (['"""imagemblur"""', 'img'], {}), "('imagemblur', img)\n", (179, 198), False, 'import cv2\n'), ((210, 247), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (222, 247), False, 'import cv2\n'), ((262, 368), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray_img', 'cv2.HOUGH_GRADIENT', '(1)', '(30)'], {'param1': '(50)', 'param2': '(30)', 'minRadius': '(0)', 'maxRadius': '(60)'}), '(gray_img, cv2.HOUGH_GRADIENT, 1, 30, param1=50, param2=30,\n minRadius=0, maxRadius=60)\n', (278, 368), False, 'import cv2\n'), ((635, 681), 'cv2.circle', 'cv2.circle', (['cimg', '(0, 0)', 'i[2]', '(0, 0, 255)', '(2)'], {}), '(cimg, (0, 0), i[2], (0, 0, 255), 2)\n', (645, 681), False, 'import cv2\n'), ((676, 726), 'cv2.circle', 'cv2.circle', (['cimg', '(390, 390)', 'i[2]', '(255, 0, 0)', '(2)'], {}), '(cimg, (390, 390), i[2], (255, 0, 0), 2)\n', (686, 726), False, 'import cv2\n'), ((723, 759), 'cv2.imshow', 'cv2.imshow', (['"""detected circles"""', 'cimg'], {}), "('detected circles', cimg)\n", (733, 759), False, 'import cv2\n'), ((760, 774), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (771, 774), False, 'import cv2\n'), ((776, 799), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (797, 799), False, 'import cv2\n'), ((423, 441), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (432, 441), True, 'import numpy as np\n'), ((501, 553), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (511, 553), False, 'import cv2\n'), ((589, 638), 'cv2.circle', 'cv2.circle', (['cimg', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (599, 638), False, 'import cv2\n')]
|
# Generated by Django 1.11.10 on 2018-02-27 19:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("letters", "0017_auto_20180227_1908")]
operations = [
migrations.AlterField(
model_name="letter",
name="record",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="letters_letter_related",
related_query_name="letters_letters",
to="records.Record",
),
)
]
|
[
"django.db.models.OneToOneField"
] |
[((356, 528), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""letters_letter_related"""', 'related_query_name': '"""letters_letters"""', 'to': '"""records.Record"""'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='letters_letter_related', related_query_name=\n 'letters_letters', to='records.Record')\n", (376, 528), False, 'from django.db import migrations, models\n')]
|
# Author: <NAME>
# Date: Nov 13, 2018; revision: Mar 13, 2019
# License: MIT
import torch.nn as nn
import torch.nn.init as init
from torch.nn.init import kaiming_normal_, constant_
activation_functions = {
'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
'elu': nn.ELU,
'sigmoid': nn.Sigmoid,
'tanh': nn.Tanh,
'softplus': nn.Softplus,
'softmax': nn.Softmax
}
init_gain = {
'relu': 1.41414,
'leaky_relu': 1.41414,
'elu': 1.41414,
'sigmoid': 1,
'tanh': 1.66667,
'softplus': 1,
'softmax': 1
}
class _LayerNd(nn.Module):
def __init__(self, kernel_initializer, activation):
super(_LayerNd, self).__init__()
if isinstance(activation, str):
self.activation = activation_functions[activation]()
else:
self.activation = activation
if isinstance(kernel_initializer, str):
if kernel_initializer == 'normal':
self.kernel_initializer = init.normal_
elif kernel_initializer == 'kaiming':
self.kernel_initializer = init.kaiming_normal_
elif kernel_initializer == 'xavier':
self.kernel_initializer = init.xavier_normal_
self.gain = init_gain.setdefault(activation, 1)
elif kernel_initializer == 'orthogonal':
self.kernel_initializer = init.orthogonal_
self.gain = init_gain.setdefault(activation, 1)
else:
self.kernel_initializer = kernel_initializer
class Conv2DNorm(nn.Module):
"""Applies 2D convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(Conv2DNorm, self).__init__()
conv_base = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# if hasattr(self, 'gain'):
# self.kernel_initializer(conv_base.weight, gain=self.gain)
# else:
# self.kernel_initializer(conv_base.weight)
if batch_norm:
if activation:
self.conv = nn.Sequential(
conv_base,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_base,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.conv = nn.Sequential(
conv_base,
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_base)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.conv(x)
return x
# reference: https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
class SeparableConv2D(nn.Module):
"""Applies depthwise separable 2D convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(SeparableConv2D, self).__init__()
conv_depthwise = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias)
conv_pointwise = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=bias)
# init.xavier_normal_(conv_depthwise.weight)
# if hasattr(self, 'gain'):
# self.kernel_initializer(conv_pointwise.weight, gain=self.gain)
# else:
# self.kernel_initializer(conv_pointwise.weight)
if batch_norm:
if activation:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise,
nn.LeakyReLU(0.1,inplace=True))
else:
self.conv = nn.Sequential(
conv_depthwise,
conv_pointwise)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.conv(x)
return x
class ConvResidual2D(Conv2DNorm):
"""Convolutional 2D residual block with batch normalization and activation."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, kernel_initializer='normal', batch_norm=False, activation=None):
super(ConvResidual2D, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, kernel_initializer=kernel_initializer, batch_norm=batch_norm, activation=activation)
def forward(self, x):
out = self.conv(x)
return x + out
class Deconv2DNorm(nn.Module):
"""Applies 2D transposed convolution over an input signal with batch normalization and activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, output_padding=0, groups=1, bias=True, kernel_initializer='normal', dilation=1, batch_norm=False, activation=None):
super(Deconv2DNorm, self).__init__()
deconv_base = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
bias=bias,
dilation=dilation)
# if hasattr(self, 'gain'):
# self.kernel_initializer(deconv_base.weight, gain=self.gain)
# else:
# self.kernel_initializer(deconv_base.weight)
if batch_norm:
if activation:
self.deconv = nn.Sequential(
deconv_base,
nn.BatchNorm2d(num_features=out_channels),
nn.LeakyReLU(0.1,inplace=True))
else:
self.deconv = nn.Sequential(
deconv_base,
nn.BatchNorm2d(num_features=out_channels))
else:
if activation:
self.deconv = nn.Sequential(
deconv_base,
nn.LeakyReLU(0.1,inplace=True))
else:
self.deconv = nn.Sequential(
deconv_base)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight, 0.1)
if m.bias is not None:
constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
constant_(m.weight, 1)
constant_(m.bias, 0)
def forward(self, x):
x = self.deconv(x)
return x
def crop_like(input, target):
"""Crop input hieght and width to match target."""
if input.size()[2:] == target.size()[2:]:
return input
else:
return input[:, :, :target.size(2), :target.size(3)]
|
[
"torch.nn.init.kaiming_normal_",
"torch.nn.ConvTranspose2d",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"torch.nn.LeakyReLU"
] |
[((1895, 2068), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding, dilation=dilation, groups=\n groups, bias=bias)\n', (1904, 2068), True, 'import torch.nn as nn\n'), ((3938, 4115), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'in_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'in_channels', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=in_channels, kernel_size=\n kernel_size, stride=stride, padding=padding, dilation=dilation, groups=\n in_channels, bias=bias)\n', (3947, 4115), True, 'import torch.nn as nn\n'), ((4229, 4363), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=1,\n stride=1, padding=0, dilation=1, groups=1, bias=bias)\n', (4238, 4363), True, 'import torch.nn as nn\n'), ((7000, 7212), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'output_padding': 'output_padding', 'groups': 'groups', 'bias': 'bias', 'dilation': 'dilation'}), '(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding, output_padding\n =output_padding, groups=groups, bias=bias, dilation=dilation)\n', (7018, 7212), True, 'import torch.nn as nn\n'), ((2945, 2969), 'torch.nn.Sequential', 'nn.Sequential', (['conv_base'], {}), '(conv_base)\n', (2958, 2969), True, 'import torch.nn as nn\n'), ((3119, 3149), 'torch.nn.init.kaiming_normal_', 'kaiming_normal_', (['m.weight', '(0.1)'], {}), '(m.weight, 0.1)\n', (3134, 3149), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((5432, 5477), 'torch.nn.Sequential', 'nn.Sequential', (['conv_depthwise', 'conv_pointwise'], {}), '(conv_depthwise, conv_pointwise)\n', (5445, 5477), True, 'import torch.nn as nn\n'), ((5647, 5677), 'torch.nn.init.kaiming_normal_', 'kaiming_normal_', (['m.weight', '(0.1)'], {}), '(m.weight, 0.1)\n', (5662, 5677), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((8120, 8146), 'torch.nn.Sequential', 'nn.Sequential', (['deconv_base'], {}), '(deconv_base)\n', (8133, 8146), True, 'import torch.nn as nn\n'), ((8296, 8326), 'torch.nn.init.kaiming_normal_', 'kaiming_normal_', (['m.weight', '(0.1)'], {}), '(m.weight, 0.1)\n', (8311, 8326), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((2482, 2523), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (2496, 2523), True, 'import torch.nn as nn\n'), ((2545, 2576), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (2557, 2576), True, 'import torch.nn as nn\n'), ((2689, 2730), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (2703, 2730), True, 'import torch.nn as nn\n'), ((2867, 2898), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (2879, 2898), True, 'import torch.nn as nn\n'), ((3209, 3229), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3218, 3229), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((3294, 3316), 'torch.nn.init.constant_', 'constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (3303, 3316), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((3333, 3353), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3342, 3353), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((4887, 4928), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (4901, 4928), True, 'import torch.nn as nn\n'), ((4950, 4981), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (4962, 4981), True, 'import torch.nn as nn\n'), ((5135, 5176), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (5149, 5176), True, 'import torch.nn as nn\n'), ((5354, 5385), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (5366, 5385), True, 'import torch.nn as nn\n'), ((5737, 5757), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5746, 5757), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((5822, 5844), 'torch.nn.init.constant_', 'constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (5831, 5844), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((5861, 5881), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5870, 5881), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((7647, 7688), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (7661, 7688), True, 'import torch.nn as nn\n'), ((7710, 7741), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7722, 7741), True, 'import torch.nn as nn\n'), ((7858, 7899), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'out_channels'}), '(num_features=out_channels)\n', (7872, 7899), True, 'import torch.nn as nn\n'), ((8040, 8071), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (8052, 8071), True, 'import torch.nn as nn\n'), ((8386, 8406), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8395, 8406), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((8471, 8493), 'torch.nn.init.constant_', 'constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (8480, 8493), False, 'from torch.nn.init import kaiming_normal_, constant_\n'), ((8510, 8530), 'torch.nn.init.constant_', 'constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (8519, 8530), False, 'from torch.nn.init import kaiming_normal_, constant_\n')]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from unittest import mock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.generator_run import GeneratorRun
from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_trial
class DummyEnum(Enum):
DUMMY: str = "dummy"
class ChemistryMetricTest(TestCase):
def testChemistryMetric(self):
# basic test
read_csv = pd.read_csv
for problem_type in (
ChemistryProblemType.DIRECT_ARYLATION,
ChemistryProblemType.SUZUKI,
):
with mock.patch(
"ax.metrics.chemistry.pd.read_csv",
wraps=lambda filename, index_col: read_csv(
filename, index_col=index_col, nrows=1
),
) as mock_read_csv:
metric = ChemistryMetric(name="test_metric", problem_type=problem_type)
self.assertFalse(metric.noiseless)
self.assertIs(metric.problem_type, problem_type)
self.assertFalse(metric.lower_is_better)
if problem_type is ChemistryProblemType.DIRECT_ARYLATION:
param_names = [
"Base_SMILES",
"Concentration",
"Ligand_SMILES",
"Solvent_SMILES",
"Temp_C",
]
param_values = (
"O=C([O-])C.[K+]",
0.1,
(
"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)"
"C4CCCCC4)C(OC)=CC=C2OC"
),
"CC(N(C)C)=O",
105,
)
obj = 5.47
else:
param_names = [
"Base_SMILES",
"Electrophile_SMILES",
"Ligand_SMILES",
"Nucleophile_SMILES",
"Solvent_SMILES",
]
param_values = (
"[Na+].[OH-]",
"ClC1=CC=C(N=CC=C2)C2=C1",
"CC(P(C(C)(C)C)C(C)(C)C)(C)C",
"CC1=CC=C(N(C2CCCCO2)N=C3)C3=C1B(O)O",
"N#CC",
)
obj = 4.76
params = dict(zip(param_names, param_values))
trial = get_trial()
trial._generator_run = GeneratorRun(
arms=[Arm(name="0_0", parameters=params)]
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(mock_read_csv.call_count, 1)
self.assertEqual(df["mean"].values[0], obj)
self.assertTrue(np.isnan(df["sem"].values[0]))
# test caching
metric.fetch_trial_data(trial)
self.assertEqual(mock_read_csv.call_count, 1)
# test noiseless
metric = ChemistryMetric(
name="test_metric", problem_type=problem_type, noiseless=True
)
df = metric.fetch_trial_data(trial).df
self.assertEqual(df["sem"].values[0], 0.0)
|
[
"ax.core.arm.Arm",
"ax.metrics.chemistry.ChemistryMetric",
"ax.utils.testing.core_stubs.get_trial",
"numpy.isnan"
] |
[((1119, 1181), 'ax.metrics.chemistry.ChemistryMetric', 'ChemistryMetric', ([], {'name': '"""test_metric"""', 'problem_type': 'problem_type'}), "(name='test_metric', problem_type=problem_type)\n", (1134, 1181), False, 'from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType\n'), ((2811, 2822), 'ax.utils.testing.core_stubs.get_trial', 'get_trial', ([], {}), '()\n', (2820, 2822), False, 'from ax.utils.testing.core_stubs import get_trial\n'), ((3395, 3473), 'ax.metrics.chemistry.ChemistryMetric', 'ChemistryMetric', ([], {'name': '"""test_metric"""', 'problem_type': 'problem_type', 'noiseless': '(True)'}), "(name='test_metric', problem_type=problem_type, noiseless=True)\n", (3410, 3473), False, 'from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType\n'), ((3165, 3194), 'numpy.isnan', 'np.isnan', (["df['sem'].values[0]"], {}), "(df['sem'].values[0])\n", (3173, 3194), True, 'import numpy as np\n'), ((2902, 2936), 'ax.core.arm.Arm', 'Arm', ([], {'name': '"""0_0"""', 'parameters': 'params'}), "(name='0_0', parameters=params)\n", (2905, 2936), False, 'from ax.core.arm import Arm\n')]
|
from unittest import mock
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
import lxml.objectify
import persistent.interfaces
import zeit.cms.interfaces
import zeit.edit.testing
import zeit.edit.tests.fixture
import zope.component
class ElementUniqueIdTest(zeit.edit.testing.FunctionalTestCase):
def setUp(self):
super(ElementUniqueIdTest, self).setUp()
xml = lxml.objectify.fromstring("""
<container
xmlns:cp="http://namespaces.zeit.de/CMS/cp"
cp:__name__="body">
<block cp:type="block" cp:__name__="foo"/>
</container>""")
content = self.repository['testcontent']
self.container = zeit.edit.tests.fixture.Container(content, xml)
self.block = zeit.edit.tests.fixture.Block(
self.container, xml.block)
# Fake traversal ability.
ExampleContentType.__getitem__ = lambda s, key: self.container
def tearDown(self):
del ExampleContentType.__getitem__
super(ElementUniqueIdTest, self).tearDown()
def test_block_ids_are_composed_of_parent_ids(self):
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de/testcontent#body',
self.container.uniqueId)
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de/testcontent#body/'
'foo',
self.block.uniqueId)
def test_resolving_block_ids_uses_traversal(self):
block = zeit.cms.interfaces.ICMSContent(self.block.uniqueId)
self.assertEqual(block, self.block)
def test_block_without_name_uses_index(self):
del self.block.xml.attrib['{http://namespaces.zeit.de/CMS/cp}__name__']
with mock.patch('zeit.edit.tests.fixture.Container.index') as index:
index.return_value = 0
self.assertEqual(
'http://block.vivi.zeit.de/http://xml.zeit.de'
'/testcontent#body/0', self.block.uniqueId)
def test_block_equality_compares_xml(self):
xml = """
<container xmlns:cp="http://namespaces.zeit.de/CMS/cp">
<block cp:type="block" cp:__name__="foo"/>
</container>"""
xml1 = lxml.objectify.fromstring(xml)
xml2 = lxml.objectify.fromstring(xml)
# CAUTION: xml1 == xml2 does not do what one might think it does,
# thus block equality uses a proper in-depth xml comparison:
self.assertNotEqual(xml1, xml2)
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertEqual(block1, block2)
def test_blocks_are_unequal_when_text_nodes_differ(self):
# Upstream xmldiff wants to write to (a copy of) text nodes, which is
# not possible with lxml.objectify.
xml1 = lxml.objectify.fromstring("""
<container>
<foo>bar</foo>
</container>""")
xml2 = lxml.objectify.fromstring("""
<container>
<foo>qux</foo>
</container>""")
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertNotEqual(block1, block2)
def test_blocks_are_unequal_when_tag_counts_differ(self):
xml1 = lxml.objectify.fromstring("""
<foo><one/></foo>
""")
xml2 = lxml.objectify.fromstring("""
<foo><one/><two/><three/></foo>
""")
block1 = zeit.edit.tests.fixture.Block(None, xml1)
block2 = zeit.edit.tests.fixture.Block(None, xml2)
self.assertNotEqual(block1, block2)
class ElementFactoryTest(zeit.edit.testing.FunctionalTestCase):
def test_factory_returns_interface_implemented_by_element(self):
context = mock.Mock()
zope.interface.alsoProvides(context, persistent.interfaces.IPersistent)
container = zeit.edit.tests.fixture.Container(
context, lxml.objectify.fromstring('<container/>'))
block_factory = zope.component.getAdapter(
container, zeit.edit.interfaces.IElementFactory, 'block')
self.assertEqual(
zeit.edit.tests.fixture.IBlock, block_factory.provided_interface)
|
[
"unittest.mock.patch",
"unittest.mock.Mock"
] |
[((3761, 3772), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3770, 3772), False, 'from unittest import mock\n'), ((1723, 1776), 'unittest.mock.patch', 'mock.patch', (['"""zeit.edit.tests.fixture.Container.index"""'], {}), "('zeit.edit.tests.fixture.Container.index')\n", (1733, 1776), False, 'from unittest import mock\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
""" setup.py
The setup for this package.
"""
# Package Header #
from src.hdf5objects.__header__ import *
# Header #
__author__ = __author__
__credits__ = __credits__
__maintainer__ = __maintainer__
__email__ = __email__
# Imports #
# Standard Libraries #
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
# Third-Party Packages #
from setuptools import find_packages
from setuptools import setup
# Definitions #
# Functions #
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
# Main #
setup(
name=__package_name__,
version=__version__,
license=__license__,
description='Extra fileobjects for handling and typing HDF5 files.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/fonganthonym/python-hdf5objects',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://python-hdf5objects.readthedocs.io/',
'Changelog': 'https://python-hdf5objects.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/fonganthonym/python-hdf5objects/issues',
},
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
python_requires='>=3.6',
install_requires=[
'baseobjects>=1.5.1', 'classversioning', 'framestructure', 'dspobjects', 'h5py>=3.2.1', 'numpy',
'multipledispatch', 'pytz', 'tzlocal', 'bidict'
],
extras_require={
"dev": ['pytest>=6.2.3'],
},
entry_points={
'console_scripts': [
'hdf5objects = hdf5objects.cli:main',
]
},
)
|
[
"os.path.basename",
"os.path.dirname",
"glob.glob",
"setuptools.find_packages",
"re.compile"
] |
[((1256, 1276), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (1269, 1276), False, 'from setuptools import find_packages\n'), ((642, 659), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (649, 659), False, 'from os.path import dirname\n'), ((1363, 1379), 'glob.glob', 'glob', (['"""src/*.py"""'], {}), "('src/*.py')\n", (1367, 1379), False, 'from glob import glob\n'), ((1332, 1346), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (1340, 1346), False, 'from os.path import basename\n'), ((966, 1025), 're.compile', 're.compile', (['"""^.. start-badges.*^.. end-badges"""', '(re.M | re.S)'], {}), "('^.. start-badges.*^.. end-badges', re.M | re.S)\n", (976, 1025), False, 'import re\n')]
|
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from geometry_msgs.msg import PoseArray, Pose
from tf.transformations import euler_from_quaternion
import time
import math
import struct
import ctypes
from scipy import ndimage
import matplotlib.pyplot as plt
from nav_msgs.msg import Odometry
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
class identifyObstacle3D:
def __init__(self):
self.currentPosX, self.currentPosY, self.currentPosZ, self.currentPosYaw = 2, 2, 2, 0
self.count = 0
self.unic = 0
self.pub = rospy.Publisher('/build_map3D', PoseArray, queue_size=1)
self.all = []
self.obsX, self.obsY, self.obsZ = [], [], []
self.t = time.time()
self.number_of_sampling = 30
rospy.init_node("obstacle3D")
print("Start")
# _ = rospy.Subscriber("/uav1/velodyne/scan", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/rs_d435/depth/points", PointCloud2, self.callbackObstacle)
_ = rospy.Subscriber("/uav1/odometry/odom_main", Odometry, self.callbackPosicao)
def callbackPosicao(self, odom):
_, _, yaw = euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])
if self.count == 0:
self.lastYaw = yaw
self.currentPosX = odom.pose.pose.position.x
self.currentPosY = odom.pose.pose.position.y
self.currentPosY = odom.pose.pose.position.z
self.currentPosYaw = yaw
self.count += 1
def rotationMatrix(self, psi0, x1, y1, z1):
r = [[np.cos(psi0), np.sin(psi0) * -1, 0], [np.sin(psi0), np.cos(psi0), 0], [0, 0, 1]]
pos_local = np.dot(np.transpose(np.asarray(r)), np.asarray([x1, y1, z1]))
return pos_local
def callbackObstacle(self, data):
print(time.time()-self.t)
if self.count > 0:
a4, a5, a6 = [], [], []
a1, a2, a3 = [], [], []
x, y, z = [], [], []
abc = []
matriz = np.zeros((101, 101))
xyz = np.array([[0,0,0]])
gen = point_cloud2.read_points(data, skip_nans=True)
int_data = list(gen)
for x in int_data:
if round(x[2]) > 0 and [round(x[0]), round(-x[1]), round(x[2])] not in abc:
a4.append(round(x[0]))
a5.append(round(-x[1]))
a6.append(round(x[2]))
abc.append([round(x[0]), round(-x[1]), round(x[2])])
pl = self.rotationMatrix(0, a4, a5, a6)
for i1, i2, i3 in zip(pl[0], pl[1], pl[2]):
a1.append(i2)
a2.append(i1)
a3.append(i3)
xyz = np.append(xyz,[[i2, i1, i3]], axis = 0)
self.count += 1
if 8<time.time()-self.t<13:
ax = plt.axes(projection = "3d")
ax.plot3D(a1, a2, a3, 'y.')
ax.plot3D([self.currentPosX], [self.currentPosY], [self.currentPosZ], ".r")
ax.set_xlim(0,20)
ax.set_ylim(0,20)
ax.set_zlim(0,20)
ax.set_xlabel("x (m)" + str(self.currentPosX))
ax.set_ylabel("y (m)" + str(self.currentPosY))
ax.set_zlabel("z (m)" + str(self.currentPosZ))
ax.view_init(50, -137)
plt.pause(0.01)
plt.show()
def main():
identifyObstacle3D()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
[
"rospy.Subscriber",
"sensor_msgs.point_cloud2.read_points",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.asarray",
"numpy.zeros",
"rospy.Publisher",
"time.time",
"numpy.append",
"numpy.sin",
"numpy.array",
"rospy.init_node",
"numpy.cos",
"tf.transformations.euler_from_quaternion",
"rospy.spin",
"matplotlib.pyplot.pause"
] |
[((602, 658), 'rospy.Publisher', 'rospy.Publisher', (['"""/build_map3D"""', 'PoseArray'], {'queue_size': '(1)'}), "('/build_map3D', PoseArray, queue_size=1)\n", (617, 658), False, 'import rospy\n'), ((751, 762), 'time.time', 'time.time', ([], {}), '()\n', (760, 762), False, 'import time\n'), ((810, 839), 'rospy.init_node', 'rospy.init_node', (['"""obstacle3D"""'], {}), "('obstacle3D')\n", (825, 839), False, 'import rospy\n'), ((966, 1053), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/uav1/rs_d435/depth/points"""', 'PointCloud2', 'self.callbackObstacle'], {}), "('/uav1/rs_d435/depth/points', PointCloud2, self.\n callbackObstacle)\n", (982, 1053), False, 'import rospy\n'), ((1061, 1137), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/uav1/odometry/odom_main"""', 'Odometry', 'self.callbackPosicao'], {}), "('/uav1/odometry/odom_main', Odometry, self.callbackPosicao)\n", (1077, 1137), False, 'import rospy\n'), ((1196, 1344), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose\n .orientation.z, odom.pose.pose.orientation.w]'], {}), '([odom.pose.pose.orientation.x, odom.pose.pose.\n orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])\n', (1217, 1344), False, 'from tf.transformations import euler_from_quaternion\n'), ((3598, 3610), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3608, 3610), False, 'import rospy\n'), ((1834, 1858), 'numpy.asarray', 'np.asarray', (['[x1, y1, z1]'], {}), '([x1, y1, z1])\n', (1844, 1858), True, 'import numpy as np\n'), ((2132, 2152), 'numpy.zeros', 'np.zeros', (['(101, 101)'], {}), '((101, 101))\n', (2140, 2152), True, 'import numpy as np\n'), ((2171, 2192), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2179, 2192), True, 'import numpy as np\n'), ((2212, 2258), 'sensor_msgs.point_cloud2.read_points', 'point_cloud2.read_points', (['data'], {'skip_nans': '(True)'}), '(data, skip_nans=True)\n', (2236, 2258), False, 'from sensor_msgs import point_cloud2\n'), ((1697, 1709), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1703, 1709), True, 'import numpy as np\n'), ((1735, 1747), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1741, 1747), True, 'import numpy as np\n'), ((1749, 1761), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1755, 1761), True, 'import numpy as np\n'), ((1818, 1831), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (1828, 1831), True, 'import numpy as np\n'), ((1938, 1949), 'time.time', 'time.time', ([], {}), '()\n', (1947, 1949), False, 'import time\n'), ((2853, 2891), 'numpy.append', 'np.append', (['xyz', '[[i2, i1, i3]]'], {'axis': '(0)'}), '(xyz, [[i2, i1, i3]], axis=0)\n', (2862, 2891), True, 'import numpy as np\n'), ((2984, 3009), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2992, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3498, 3513), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (3507, 3513), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3538, 3540), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1723), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1717, 1723), True, 'import numpy as np\n'), ((2940, 2951), 'time.time', 'time.time', ([], {}), '()\n', (2949, 2951), False, 'import time\n')]
|
import re
class HttpRequest:
"""
Parser for HTTP requests
"""
def __init__(self, request):
"""
Accepts an HTTP request bytestring
"""
# Convert from bytes to string
self.request = request.decode("utf-8")
self.requestline = re.match("GET .* HTTP/1.1", self.request).group(0)
self.url = re.search("\s.*\s", self.requestline).group(0)[1:-1]
self.params = {}
if '?' in self.url:
elems = self.url.split('?')
self.path = elems[0]
self.query = elems[1]
querylines = self.query.split('&')
for line in querylines:
linekey, lineval = line.split('=')
self.params[linekey] = lineval
else:
self.path = self.url
|
[
"re.search",
"re.match"
] |
[((291, 332), 're.match', 're.match', (['"""GET .* HTTP/1.1"""', 'self.request'], {}), "('GET .* HTTP/1.1', self.request)\n", (299, 332), False, 'import re\n'), ((361, 400), 're.search', 're.search', (['"""\\\\s.*\\\\s"""', 'self.requestline'], {}), "('\\\\s.*\\\\s', self.requestline)\n", (370, 400), False, 'import re\n')]
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
import numpy as np
from rlgraph import get_backend
from rlgraph.agents import Agent
from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay
from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction
from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace
from rlgraph.spaces.space_utils import sanity_check_space
from rlgraph.utils import RLGraphError
from rlgraph.utils.decorators import rlgraph_api, graph_fn
from rlgraph.utils.ops import flatten_op, DataOpTuple
from rlgraph.utils.util import strip_list, force_list
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class SyncSpecification(object):
"""Describes a synchronization schedule, used to update the target value weights. The target values are gradually
updates using exponential moving average as suggested by the paper."""
def __init__(self, sync_interval=None, sync_tau=None):
"""
Arguments:
sync_interval: How often to update the target.
sync_tau: The smoothing constant to use in the averaging. Setting to 1 replaces the values each iteration.
"""
self.sync_interval = sync_interval
self.sync_tau = sync_tau
class SACAgentComponent(Component):
def __init__(self, agent, policy, q_function, preprocessor, memory, discount, initial_alpha, target_entropy,
optimizer, vf_optimizer, alpha_optimizer, q_sync_spec, num_q_functions=2):
super(SACAgentComponent, self).__init__(nesting_level=0)
self.agent = agent
self._policy = policy
self._preprocessor = preprocessor
self._memory = memory
self._q_functions = [q_function]
self._q_functions += [q_function.copy(scope="{}-{}".format(q_function.scope, i + 1), trainable=True)
for i in range(num_q_functions - 1)]
# Set number of return values for get_q_values graph_fn.
self.graph_fn_num_outputs["_graph_fn_get_q_values"] = num_q_functions
for q in self._q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in q.sub_components:
q.add_components(Synchronizable(), expose_apis="sync")
self._target_q_functions = [q.copy(scope="target-" + q.scope, trainable=True) for q in self._q_functions]
for target_q in self._target_q_functions:
# TODO: is there a better way to do this?
if "synchronizable" not in target_q.sub_components:
target_q.add_components(Synchronizable(), expose_apis="sync")
self._optimizer = optimizer
self.vf_optimizer = vf_optimizer
self.alpha_optimizer = alpha_optimizer
self.initial_alpha = initial_alpha
self.log_alpha = None
self.target_entropy = target_entropy
self.loss_function = SACLossFunction(target_entropy=target_entropy, discount=discount,
num_q_functions=num_q_functions)
memory_items = ["states", "actions", "rewards", "next_states", "terminals"]
self._merger = ContainerMerger(*memory_items)
q_names = ["q_{}".format(i) for i in range(len(self._q_functions))]
self._q_vars_merger = ContainerMerger(*q_names, scope="q_vars_merger")
self.add_components(policy, preprocessor, memory, self._merger, self.loss_function,
optimizer, vf_optimizer, self._q_vars_merger) # , self._q_vars_splitter)
self.add_components(*self._q_functions)
self.add_components(*self._target_q_functions)
if self.alpha_optimizer is not None:
self.add_components(self.alpha_optimizer)
self.steps_since_last_sync = None
self.q_sync_spec = q_sync_spec
self.env_action_space = None
self.episode_reward = None
def check_input_spaces(self, input_spaces, action_space=None):
for s in ["states", "actions", "env_actions", "preprocessed_states", "rewards", "terminals"]:
sanity_check_space(input_spaces[s], must_have_batch_rank=True)
self.env_action_space = input_spaces["env_actions"].flatten()
def create_variables(self, input_spaces, action_space=None):
self.steps_since_last_sync = self.get_variable("steps_since_last_sync", dtype="int", initializer=0)
self.log_alpha = self.get_variable("log_alpha", dtype="float", initializer=np.log(self.initial_alpha))
self.episode_reward = self.get_variable("episode_reward", shape=(), initializer=0.0)
@rlgraph_api
def get_policy_weights(self):
return self._policy.variables()
@rlgraph_api
def get_q_weights(self):
merged_weights = self._q_vars_merger.merge(*[q.variables() for q in self._q_functions])
return merged_weights
@rlgraph_api(must_be_complete=False)
def set_policy_weights(self, weights):
return self._policy.sync(weights)
""" TODO: need to define the input space
@rlgraph_api(must_be_complete=False)
def set_q_weights(self, q_weights):
split_weights = self._q_vars_splitter.call(q_weights)
assert len(split_weights) == len(self._q_functions)
update_ops = [q.sync(q_weights) for q_weights, q in zip(split_weights, self._q_functions)]
update_ops.extend([q.sync(q_weights) for q_weights, q in zip(split_weights, self._target_q_functions)])
return tuple(update_ops)
"""
@rlgraph_api
def preprocess_states(self, states):
return self._preprocessor.preprocess(states)
@rlgraph_api
def insert_records(self, preprocessed_states, env_actions, rewards, next_states, terminals):
records = self._merger.merge(preprocessed_states, env_actions, rewards, next_states, terminals)
return self._memory.insert_records(records)
@rlgraph_api
def update_from_memory(self, batch_size=64, time_percentage=None):
records, sample_indices, importance_weights = self._memory.get_records(batch_size)
result = self.update_from_external_batch(
records["states"], records["actions"], records["rewards"], records["terminals"],
records["next_states"], importance_weights, time_percentage
)
if isinstance(self._memory, PrioritizedReplay):
update_pr_step_op = self._memory.update_records(sample_indices, result["critic_loss_per_item"])
result["update_pr_step_op"] = update_pr_step_op
return result
@rlgraph_api
def update_from_external_batch(
self, preprocessed_states, env_actions, rewards, terminals, next_states, importance_weights,
time_percentage=None
):
actions = self._graph_fn_one_hot(env_actions)
actor_loss, actor_loss_per_item, critic_loss, critic_loss_per_item, alpha_loss, alpha_loss_per_item = \
self.get_losses(preprocessed_states, actions, rewards, terminals, next_states, importance_weights)
policy_vars = self._policy.variables()
q_vars = [q_func.variables() for q_func in self._q_functions]
merged_q_vars = self._q_vars_merger.merge(*q_vars)
critic_step_op = self.vf_optimizer.step(merged_q_vars, critic_loss, critic_loss_per_item, time_percentage)
actor_step_op = self._optimizer.step(policy_vars, actor_loss, actor_loss_per_item, time_percentage)
if self.target_entropy is not None:
alpha_step_op = self._graph_fn_update_alpha(alpha_loss, alpha_loss_per_item, time_percentage)
else:
alpha_step_op = self._graph_fn_no_op()
# TODO: optimizer for alpha
sync_op = self.sync_targets()
# Increase the global training step counter.
alpha_step_op = self._graph_fn_training_step(alpha_step_op)
return dict(
actor_step_op=actor_step_op,
critic_step_op=critic_step_op,
sync_op=sync_op,
alpha_step_op=alpha_step_op,
actor_loss=actor_loss,
actor_loss_per_item=actor_loss_per_item,
critic_loss=critic_loss,
critic_loss_per_item=critic_loss_per_item,
alpha_loss=alpha_loss,
alpha_loss_per_item=alpha_loss_per_item
)
@graph_fn(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)
def _graph_fn_one_hot(self, key, env_actions):
if isinstance(self.env_action_space[key], IntBox):
env_actions = tf.one_hot(env_actions, depth=self.env_action_space[key].num_categories, axis=-1)
return env_actions
@graph_fn(requires_variable_completeness=True)
def _graph_fn_update_alpha(self, alpha_loss, alpha_loss_per_item, time_percentage=None):
alpha_step_op = self.alpha_optimizer.step(
DataOpTuple([self.log_alpha]), alpha_loss, alpha_loss_per_item, time_percentage
)
return alpha_step_op
@rlgraph_api # `returns` are determined in ctor
def _graph_fn_get_q_values(self, preprocessed_states, actions, target=False):
backend = get_backend()
flat_actions = flatten_op(actions)
actions = []
for flat_key, action_component in self._policy.action_space.flatten().items():
actions.append(flat_actions[flat_key])
if backend == "tf":
actions = tf.concat(actions, axis=-1)
elif backend == "pytorch":
actions = torch.cat(actions, dim=-1)
q_funcs = self._q_functions if target is False else self._target_q_functions
# We do not concat states yet because we might pass states through a conv stack before merging it
# with actions.
return tuple(q.state_action_value(preprocessed_states, actions) for q in q_funcs)
@rlgraph_api
def get_losses(self, preprocessed_states, actions, rewards, terminals, next_states, importance_weights):
# TODO: internal states
samples_next = self._policy.get_action_and_log_likelihood(next_states, deterministic=False)
next_sampled_actions = samples_next["action"]
log_probs_next_sampled = samples_next["log_likelihood"]
q_values_next_sampled = self.get_q_values(
next_states, next_sampled_actions, target=True
)
q_values = self.get_q_values(preprocessed_states, actions)
samples = self._policy.get_action_and_log_likelihood(preprocessed_states, deterministic=False)
sampled_actions = samples["action"]
log_probs_sampled = samples["log_likelihood"]
q_values_sampled = self.get_q_values(preprocessed_states, sampled_actions)
alpha = self._graph_fn_compute_alpha()
return self.loss_function.loss(
alpha,
log_probs_next_sampled,
q_values_next_sampled,
q_values,
log_probs_sampled,
q_values_sampled,
rewards,
terminals
)
@rlgraph_api
def get_preprocessed_state_and_action(self, states, deterministic=False):
preprocessed_states = self._preprocessor.preprocess(states)
return self.action_from_preprocessed_state(preprocessed_states, deterministic)
@rlgraph_api
def action_from_preprocessed_state(self, preprocessed_states, deterministic=False):
out = self._policy.get_action(preprocessed_states, deterministic=deterministic)
return out["action"], preprocessed_states
@rlgraph_api(requires_variable_completeness=True)
def reset_targets(self):
ops = (target_q.sync(q.variables()) for q, target_q in zip(self._q_functions, self._target_q_functions))
return tuple(ops)
@rlgraph_api(requires_variable_completeness=True)
def sync_targets(self):
should_sync = self._graph_fn_get_should_sync()
return self._graph_fn_sync(should_sync)
@rlgraph_api
def get_memory_size(self):
return self._memory.get_size()
@graph_fn
def _graph_fn_compute_alpha(self):
backend = get_backend()
if backend == "tf":
return tf.exp(self.log_alpha)
elif backend == "pytorch":
return torch.exp(self.log_alpha)
# TODO: Move this into generic AgentRootComponent.
@graph_fn
def _graph_fn_training_step(self, other_step_op=None):
if self.agent is not None:
add_op = tf.assign_add(self.agent.graph_executor.global_training_timestep, 1)
op_list = [add_op] + [other_step_op] if other_step_op is not None else []
with tf.control_dependencies(op_list):
return tf.no_op() if other_step_op is None else other_step_op
else:
return tf.no_op() if other_step_op is None else other_step_op
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_get_should_sync(self):
if get_backend() == "tf":
inc_op = tf.assign_add(self.steps_since_last_sync, 1)
should_sync = inc_op >= self.q_sync_spec.sync_interval
def reset_op():
op = tf.assign(self.steps_since_last_sync, 0)
with tf.control_dependencies([op]):
return tf.no_op()
sync_op = tf.cond(
pred=inc_op >= self.q_sync_spec.sync_interval,
true_fn=reset_op,
false_fn=tf.no_op
)
with tf.control_dependencies([sync_op]):
return tf.identity(should_sync)
else:
raise NotImplementedError("TODO")
@graph_fn(returns=1, requires_variable_completeness=True)
def _graph_fn_sync(self, should_sync):
assign_ops = []
tau = self.q_sync_spec.sync_tau
if tau != 1.0:
all_source_vars = [source.get_variables(collections=None, custom_scope_separator="-") for source in self._q_functions]
all_dest_vars = [destination.get_variables(collections=None, custom_scope_separator="-") for destination in self._target_q_functions]
for source_vars, dest_vars in zip(all_source_vars, all_dest_vars):
for (source_key, source_var), (dest_key, dest_var) in zip(sorted(source_vars.items()), sorted(dest_vars.items())):
assign_ops.append(tf.assign(dest_var, tau * source_var + (1.0 - tau) * dest_var))
else:
all_source_vars = [source.variables() for source in self._q_functions]
for source_vars, destination in zip(all_source_vars, self._target_q_functions):
assign_ops.append(destination.sync(source_vars))
assert len(assign_ops) > 0
grouped_op = tf.group(assign_ops)
def assign_op():
# Make sure we are returning no_op as opposed to reference
with tf.control_dependencies([grouped_op]):
return tf.no_op()
cond_assign_op = tf.cond(should_sync, true_fn=assign_op, false_fn=tf.no_op)
with tf.control_dependencies([cond_assign_op]):
return tf.no_op()
@graph_fn
def _graph_fn_no_op(self):
return tf.no_op()
@rlgraph_api
def get_global_timestep(self):
return self.read_variable(self.agent.graph_executor.global_timestep)
@rlgraph_api
def _graph_fn_update_global_timestep(self, increment):
if get_backend() == "tf":
add_op = tf.assign_add(self.agent.graph_executor.global_timestep, increment)
return add_op
elif get_backend == "pytorch":
self.agent.graph_executor.global_timestep += increment
return self.agent.graph_executor.global_timestep
@rlgraph_api
def _graph_fn_get_episode_reward(self):
return self.episode_reward
@rlgraph_api
def _graph_fn_set_episode_reward(self, episode_reward):
return tf.assign(self.episode_reward, episode_reward)
class SACAgent(Agent):
def __init__(
self,
state_space,
action_space,
discount=0.98,
preprocessing_spec=None,
network_spec=None,
internal_states_space=None,
policy_spec=None,
value_function_spec=None,
execution_spec=None,
optimizer_spec=None,
value_function_optimizer_spec=None,
observe_spec=None,
update_spec=None,
summary_spec=None,
saver_spec=None,
auto_build=True,
name="sac-agent",
double_q=True,
initial_alpha=1.0,
gumbel_softmax_temperature=1.0,
target_entropy=None,
memory_spec=None,
value_function_sync_spec=None
):
"""
This is an implementation of the Soft-Actor Critic algorithm.
Paper: http://arxiv.org/abs/1801.01290
Args:
state_space (Union[dict,Space]): Spec dict for the state Space or a direct Space object.
action_space (Union[dict,Space]): Spec dict for the action Space or a direct Space object.
preprocessing_spec (Optional[list,PreprocessorStack]): The spec list for the different necessary states
preprocessing steps or a PreprocessorStack object itself.
discount (float): The discount factor (gamma).
network_spec (Optional[list,NeuralNetwork]): Spec list for a NeuralNetwork Component or the NeuralNetwork
object itself.
internal_states_space (Optional[Union[dict,Space]]): Spec dict for the internal-states Space or a direct
Space object for the Space(s) of the internal (RNN) states.
policy_spec (Optional[dict]): An optional dict for further kwargs passing into the Policy c'tor.
value_function_spec (list, dict, ValueFunction): Neural network specification for baseline or instance
of ValueFunction.
execution_spec (Optional[dict,Execution]): The spec-dict specifying execution settings.
optimizer_spec (Optional[dict,Optimizer]): The spec-dict to create the Optimizer for this Agent.
value_function_optimizer_spec (dict): Optimizer config for value function optimizer. If None, the optimizer
spec for the policy is used (same learning rate and optimizer type).
observe_spec (Optional[dict]): Spec-dict to specify `Agent.observe()` settings.
update_spec (Optional[dict]): Spec-dict to specify `Agent.update()` settings.
summary_spec (Optional[dict]): Spec-dict to specify summary settings.
saver_spec (Optional[dict]): Spec-dict to specify saver settings.
auto_build (Optional[bool]): If True (default), immediately builds the graph using the agent's
graph builder. If false, users must separately call agent.build(). Useful for debugging or analyzing
components before building.
name (str): Some name for this Agent object.
double_q (bool): Whether to train two q networks independently.
initial_alpha (float): "The temperature parameter α determines the
relative importance of the entropy term against the reward".
gumbel_softmax_temperature (float): Temperature parameter for the Gumbel-Softmax distribution used
for discrete actions.
memory_spec (Optional[dict,Memory]): The spec for the Memory to use for the DQN algorithm.
update_spec (dict): Here we can have sync_interval or sync_tau (for the value network update).
"""
# If VF spec is a network spec, wrap with SAC vf type. The VF must concatenate actions and states,
# which can require splitting the network in the case of e.g. conv-inputs.
if isinstance(value_function_spec, list):
value_function_spec = dict(type="sac_value_function", network_spec=value_function_spec)
self.logger.info("Using default SAC value function.")
elif isinstance(value_function_spec, ValueFunction):
self.logger.info("Using value function object {}".format(ValueFunction))
if policy_spec is None:
# Continuous action space: Use squashed normal.
# Discrete: Gumbel-softmax.
policy_spec = dict(deterministic=False,
distributions_spec=dict(
bounded_distribution_type="squashed",
discrete_distribution_type="gumbel_softmax",
gumbel_softmax_temperature=gumbel_softmax_temperature
))
super(SACAgent, self).__init__(
state_space=state_space,
action_space=action_space,
discount=discount,
preprocessing_spec=preprocessing_spec,
network_spec=network_spec,
internal_states_space=internal_states_space,
policy_spec=policy_spec,
value_function_spec=value_function_spec,
execution_spec=execution_spec,
optimizer_spec=optimizer_spec,
value_function_optimizer_spec=value_function_optimizer_spec,
observe_spec=observe_spec,
update_spec=update_spec,
summary_spec=summary_spec,
saver_spec=saver_spec,
auto_build=auto_build,
name=name
)
self.double_q = double_q
self.target_entropy = target_entropy
self.initial_alpha = initial_alpha
# Assert that the synch interval is a multiple of the update_interval.
if "sync_interval" in self.update_spec:
if self.update_spec["sync_interval"] / self.update_spec["update_interval"] != \
self.update_spec["sync_interval"] // self.update_spec["update_interval"]:
raise RLGraphError(
"ERROR: sync_interval ({}) must be multiple of update_interval "
"({})!".format(self.update_spec["sync_interval"], self.update_spec["update_interval"])
)
elif "sync_tau" in self.update_spec:
if self.update_spec["sync_tau"] <= 0 or self.update_spec["sync_tau"] > 1.0:
raise RLGraphError(
"sync_tau ({}) must be in interval (0.0, 1.0]!".format(self.update_spec["sync_tau"])
)
else:
self.update_spec["sync_tau"] = 0.005 # The value mentioned in the paper
# Extend input Space definitions to this Agent's specific API-methods.
preprocessed_state_space = self.preprocessed_state_space.with_batch_rank()
reward_space = FloatBox(add_batch_rank=True)
terminal_space = BoolBox(add_batch_rank=True)
#self.iterations = self.update_spec["num_iterations"]
self.batch_size = self.update_spec["batch_size"]
float_action_space = self.action_space.with_batch_rank().map(
mapping=lambda flat_key, space: space.as_one_hot_float_space() if isinstance(space, IntBox) else space
)
self.input_spaces.update(dict(
env_actions=self.action_space.with_batch_rank(),
actions=float_action_space,
preprocessed_states=preprocessed_state_space,
rewards=reward_space,
terminals=terminal_space,
next_states=preprocessed_state_space,
states=self.state_space.with_batch_rank(add_batch_rank=True),
batch_size=int,
importance_weights=FloatBox(add_batch_rank=True),
deterministic=bool,
weights="variables:{}".format(self.policy.scope)
))
if value_function_sync_spec is None:
value_function_sync_spec = SyncSpecification(
sync_interval=self.update_spec["sync_interval"] // self.update_spec["update_interval"],
sync_tau=self.update_spec["sync_tau"] if "sync_tau" in self.update_spec else 5e-3
)
self.memory = Memory.from_spec(memory_spec)
self.alpha_optimizer = self.optimizer.copy(scope="alpha-" + self.optimizer.scope) if self.target_entropy is not None else None
self.root_component = SACAgentComponent(
agent=self,
policy=self.policy,
q_function=self.value_function,
preprocessor=self.preprocessor,
memory=self.memory,
discount=self.discount,
initial_alpha=self.initial_alpha,
target_entropy=target_entropy,
optimizer=self.optimizer,
vf_optimizer=self.value_function_optimizer,
alpha_optimizer=self.alpha_optimizer,
q_sync_spec=value_function_sync_spec,
num_q_functions=2 if self.double_q is True else 1
)
extra_optimizers = [self.value_function_optimizer]
if self.alpha_optimizer is not None:
extra_optimizers.append(self.alpha_optimizer)
self.build_options = dict(optimizers=extra_optimizers)
if self.auto_build:
self._build_graph(
[self.root_component], self.input_spaces, optimizer=self.optimizer,
batch_size=self.update_spec["batch_size"],
build_options=self.build_options
)
self.graph_built = True
def set_weights(self, policy_weights, value_function_weights=None):
# TODO: Overrides parent but should this be policy of value function?
return self.graph_executor.execute((self.root_component.set_policy_weights, policy_weights))
def get_weights(self):
return dict(policy_weights=self.graph_executor.execute(self.root_component.get_policy_weights))
def get_action(self, states, internals=None, use_exploration=True, apply_preprocessing=True, extra_returns=None,
time_percentage=None):
# TODO: common pattern - move to Agent
"""
Args:
extra_returns (Optional[Set[str],str]): Optional string or set of strings for additional return
values (besides the actions). Possible values are:
- 'preprocessed_states': The preprocessed states after passing the given states through the
preprocessor stack.
- 'internal_states': The internal states returned by the RNNs in the NN pipeline.
- 'used_exploration': Whether epsilon- or noise-based exploration was used or not.
Returns:
tuple or single value depending on `extra_returns`:
- action
- the preprocessed states
"""
extra_returns = {extra_returns} if isinstance(extra_returns, str) else (extra_returns or set())
# States come in without preprocessing -> use state space.
if apply_preprocessing:
call_method = self.root_component.get_preprocessed_state_and_action
batched_states, remove_batch_rank = self.state_space.force_batch(states)
else:
call_method = self.root_component.action_from_preprocessed_state
batched_states = states
remove_batch_rank = False
#remove_batch_rank = batched_states.ndim == np.asarray(states).ndim + 1
# Increase timesteps by the batch size (number of states in batch).
batch_size = len(batched_states)
self.timesteps += batch_size
# Control, which return value to "pull" (depending on `additional_returns`).
return_ops = [0, 1] if "preprocessed_states" in extra_returns else [0]
ret = force_list(self.graph_executor.execute((
call_method,
[batched_states, not use_exploration], # deterministic = not use_exploration
# 0=preprocessed_states, 1=action
return_ops
)))
# Convert Gumble (relaxed one-hot) sample back into int type for all discrete composite actions.
if isinstance(self.action_space, ContainerSpace):
ret[0] = ret[0].map(
mapping=lambda key, action: np.argmax(action, axis=-1).astype(action.dtype)
if isinstance(self.flat_action_space[key], IntBox) else action
)
elif isinstance(self.action_space, IntBox):
ret[0] = np.argmax(ret[0], axis=-1).astype(self.action_space.dtype)
if remove_batch_rank:
ret[0] = strip_list(ret[0])
if "preprocessed_states" in extra_returns:
return ret[0], ret[1]
else:
return ret[0]
def _observe_graph(self, preprocessed_states, actions, internals, rewards, next_states, terminals):
self.graph_executor.execute((self.root_component.insert_records, [preprocessed_states, actions, rewards, next_states, terminals]))
def update(self, batch=None, time_percentage=None, **kwargs):
if batch is None:
size = self.graph_executor.execute(self.root_component.get_memory_size)
# TODO: is this necessary?
if size < self.batch_size:
return 0.0, 0.0, 0.0
ret = self.graph_executor.execute((self.root_component.update_from_memory, [self.batch_size, time_percentage]))
else:
ret = self.graph_executor.execute((self.root_component.update_from_external_batch, [
batch["states"], batch["actions"], batch["rewards"], batch["terminals"], batch["next_states"],
batch["importance_weights"], time_percentage
]))
return ret["actor_loss"], ret["actor_loss_per_item"], ret["critic_loss"], ret["alpha_loss"]
def reset(self):
"""
Resets our preprocessor, but only if it contains stateful PreprocessLayer Components (meaning
the PreprocessorStack has at least one variable defined).
"""
if self.preprocessing_required and len(self.preprocessor.variables) > 0:
self.graph_executor.execute("reset_preprocessor")
self.graph_executor.execute(self.root_component.reset_targets)
def __repr__(self):
return "SACAgent(double-q={}, initial-alpha={}, target-entropy={})".format(
self.double_q, self.initial_alpha, self.target_entropy
)
|
[
"tensorflow.cond",
"numpy.argmax",
"tensorflow.identity",
"rlgraph.get_backend",
"torch.cat",
"tensorflow.assign",
"tensorflow.one_hot",
"rlgraph.components.ContainerMerger",
"tensorflow.concat",
"tensorflow.no_op",
"torch.exp",
"tensorflow.exp",
"rlgraph.spaces.BoolBox",
"rlgraph.utils.decorators.graph_fn",
"tensorflow.control_dependencies",
"rlgraph.spaces.space_utils.sanity_check_space",
"rlgraph.utils.decorators.rlgraph_api",
"tensorflow.assign_add",
"tensorflow.group",
"rlgraph.components.Memory.from_spec",
"rlgraph.spaces.FloatBox",
"rlgraph.utils.ops.flatten_op",
"rlgraph.components.Synchronizable",
"numpy.log",
"rlgraph.components.loss_functions.sac_loss_function.SACLossFunction",
"rlgraph.utils.ops.DataOpTuple",
"rlgraph.utils.util.strip_list"
] |
[((1376, 1389), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (1387, 1389), False, 'from rlgraph import get_backend\n'), ((5665, 5700), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'must_be_complete': '(False)'}), '(must_be_complete=False)\n', (5676, 5700), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((9067, 9143), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'flatten_ops': '(True)', 'split_ops': '(True)', 'add_auto_key_as_first_param': '(True)'}), '(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)\n', (9075, 9143), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((9395, 9440), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (9403, 9440), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((12222, 12270), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (12233, 12270), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((12445, 12493), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'requires_variable_completeness': '(True)'}), '(requires_variable_completeness=True)\n', (12456, 12493), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((13512, 13568), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'returns': '(1)', 'requires_variable_completeness': '(True)'}), '(returns=1, requires_variable_completeness=True)\n', (13520, 13568), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((14302, 14358), 'rlgraph.utils.decorators.graph_fn', 'graph_fn', ([], {'returns': '(1)', 'requires_variable_completeness': '(True)'}), '(returns=1, requires_variable_completeness=True)\n', (14310, 14358), False, 'from rlgraph.utils.decorators import rlgraph_api, graph_fn\n'), ((1432, 1445), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (1443, 1445), False, 'from rlgraph import get_backend\n'), ((3710, 3812), 'rlgraph.components.loss_functions.sac_loss_function.SACLossFunction', 'SACLossFunction', ([], {'target_entropy': 'target_entropy', 'discount': 'discount', 'num_q_functions': 'num_q_functions'}), '(target_entropy=target_entropy, discount=discount,\n num_q_functions=num_q_functions)\n', (3725, 3812), False, 'from rlgraph.components.loss_functions.sac_loss_function import SACLossFunction\n'), ((3962, 3992), 'rlgraph.components.ContainerMerger', 'ContainerMerger', (['*memory_items'], {}), '(*memory_items)\n', (3977, 3992), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((4100, 4148), 'rlgraph.components.ContainerMerger', 'ContainerMerger', (['*q_names'], {'scope': '"""q_vars_merger"""'}), "(*q_names, scope='q_vars_merger')\n", (4115, 4148), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((9870, 9883), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (9881, 9883), False, 'from rlgraph import get_backend\n'), ((9908, 9927), 'rlgraph.utils.ops.flatten_op', 'flatten_op', (['actions'], {}), '(actions)\n', (9918, 9927), False, 'from rlgraph.utils.ops import flatten_op, DataOpTuple\n'), ((12785, 12798), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (12796, 12798), False, 'from rlgraph import get_backend\n'), ((15388, 15408), 'tensorflow.group', 'tf.group', (['assign_ops'], {}), '(assign_ops)\n', (15396, 15408), True, 'import tensorflow as tf\n'), ((15622, 15680), 'tensorflow.cond', 'tf.cond', (['should_sync'], {'true_fn': 'assign_op', 'false_fn': 'tf.no_op'}), '(should_sync, true_fn=assign_op, false_fn=tf.no_op)\n', (15629, 15680), True, 'import tensorflow as tf\n'), ((15828, 15838), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15836, 15838), True, 'import tensorflow as tf\n'), ((16552, 16598), 'tensorflow.assign', 'tf.assign', (['self.episode_reward', 'episode_reward'], {}), '(self.episode_reward, episode_reward)\n', (16561, 16598), True, 'import tensorflow as tf\n'), ((23282, 23311), 'rlgraph.spaces.FloatBox', 'FloatBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (23290, 23311), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((23337, 23365), 'rlgraph.spaces.BoolBox', 'BoolBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (23344, 23365), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((24614, 24643), 'rlgraph.components.Memory.from_spec', 'Memory.from_spec', (['memory_spec'], {}), '(memory_spec)\n', (24630, 24643), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((4882, 4944), 'rlgraph.spaces.space_utils.sanity_check_space', 'sanity_check_space', (['input_spaces[s]'], {'must_have_batch_rank': '(True)'}), '(input_spaces[s], must_have_batch_rank=True)\n', (4900, 4944), False, 'from rlgraph.spaces.space_utils import sanity_check_space\n'), ((9280, 9365), 'tensorflow.one_hot', 'tf.one_hot', (['env_actions'], {'depth': 'self.env_action_space[key].num_categories', 'axis': '(-1)'}), '(env_actions, depth=self.env_action_space[key].num_categories,\n axis=-1)\n', (9290, 9365), True, 'import tensorflow as tf\n'), ((9597, 9626), 'rlgraph.utils.ops.DataOpTuple', 'DataOpTuple', (['[self.log_alpha]'], {}), '([self.log_alpha])\n', (9608, 9626), False, 'from rlgraph.utils.ops import flatten_op, DataOpTuple\n'), ((10138, 10165), 'tensorflow.concat', 'tf.concat', (['actions'], {'axis': '(-1)'}), '(actions, axis=-1)\n', (10147, 10165), True, 'import tensorflow as tf\n'), ((12846, 12868), 'tensorflow.exp', 'tf.exp', (['self.log_alpha'], {}), '(self.log_alpha)\n', (12852, 12868), True, 'import tensorflow as tf\n'), ((13134, 13202), 'tensorflow.assign_add', 'tf.assign_add', (['self.agent.graph_executor.global_training_timestep', '(1)'], {}), '(self.agent.graph_executor.global_training_timestep, 1)\n', (13147, 13202), True, 'import tensorflow as tf\n'), ((13621, 13634), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (13632, 13634), False, 'from rlgraph import get_backend\n'), ((13665, 13709), 'tensorflow.assign_add', 'tf.assign_add', (['self.steps_since_last_sync', '(1)'], {}), '(self.steps_since_last_sync, 1)\n', (13678, 13709), True, 'import tensorflow as tf\n'), ((13981, 14076), 'tensorflow.cond', 'tf.cond', ([], {'pred': '(inc_op >= self.q_sync_spec.sync_interval)', 'true_fn': 'reset_op', 'false_fn': 'tf.no_op'}), '(pred=inc_op >= self.q_sync_spec.sync_interval, true_fn=reset_op,\n false_fn=tf.no_op)\n', (13988, 14076), True, 'import tensorflow as tf\n'), ((15694, 15735), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[cond_assign_op]'], {}), '([cond_assign_op])\n', (15717, 15735), True, 'import tensorflow as tf\n'), ((15756, 15766), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15764, 15766), True, 'import tensorflow as tf\n'), ((16057, 16070), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (16068, 16070), False, 'from rlgraph import get_backend\n'), ((16101, 16168), 'tensorflow.assign_add', 'tf.assign_add', (['self.agent.graph_executor.global_timestep', 'increment'], {}), '(self.agent.graph_executor.global_timestep, increment)\n', (16114, 16168), True, 'import tensorflow as tf\n'), ((28967, 28985), 'rlgraph.utils.util.strip_list', 'strip_list', (['ret[0]'], {}), '(ret[0])\n', (28977, 28985), False, 'from rlgraph.utils.util import strip_list, force_list\n'), ((5273, 5299), 'numpy.log', 'np.log', (['self.initial_alpha'], {}), '(self.initial_alpha)\n', (5279, 5299), True, 'import numpy as np\n'), ((10223, 10249), 'torch.cat', 'torch.cat', (['actions'], {'dim': '(-1)'}), '(actions, dim=-1)\n', (10232, 10249), False, 'import torch\n'), ((12923, 12948), 'torch.exp', 'torch.exp', (['self.log_alpha'], {}), '(self.log_alpha)\n', (12932, 12948), False, 'import torch\n'), ((13306, 13338), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['op_list'], {}), '(op_list)\n', (13329, 13338), True, 'import tensorflow as tf\n'), ((13451, 13461), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13459, 13461), True, 'import tensorflow as tf\n'), ((13827, 13867), 'tensorflow.assign', 'tf.assign', (['self.steps_since_last_sync', '(0)'], {}), '(self.steps_since_last_sync, 0)\n', (13836, 13867), True, 'import tensorflow as tf\n'), ((14152, 14186), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[sync_op]'], {}), '([sync_op])\n', (14175, 14186), True, 'import tensorflow as tf\n'), ((14211, 14235), 'tensorflow.identity', 'tf.identity', (['should_sync'], {}), '(should_sync)\n', (14222, 14235), True, 'import tensorflow as tf\n'), ((15523, 15560), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[grouped_op]'], {}), '([grouped_op])\n', (15546, 15560), True, 'import tensorflow as tf\n'), ((15585, 15595), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15593, 15595), True, 'import tensorflow as tf\n'), ((3041, 3057), 'rlgraph.components.Synchronizable', 'Synchronizable', ([], {}), '()\n', (3055, 3057), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((3401, 3417), 'rlgraph.components.Synchronizable', 'Synchronizable', ([], {}), '()\n', (3415, 3417), False, 'from rlgraph.components import Component, Synchronizable, Memory, ValueFunction, ContainerMerger, PrioritizedReplay\n'), ((13363, 13373), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13371, 13373), True, 'import tensorflow as tf\n'), ((13889, 13918), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[op]'], {}), '([op])\n', (13912, 13918), True, 'import tensorflow as tf\n'), ((13947, 13957), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (13955, 13957), True, 'import tensorflow as tf\n'), ((24136, 24165), 'rlgraph.spaces.FloatBox', 'FloatBox', ([], {'add_batch_rank': '(True)'}), '(add_batch_rank=True)\n', (24144, 24165), False, 'from rlgraph.spaces import FloatBox, BoolBox, IntBox, ContainerSpace\n'), ((15014, 15076), 'tensorflow.assign', 'tf.assign', (['dest_var', '(tau * source_var + (1.0 - tau) * dest_var)'], {}), '(dest_var, tau * source_var + (1.0 - tau) * dest_var)\n', (15023, 15076), True, 'import tensorflow as tf\n'), ((28856, 28882), 'numpy.argmax', 'np.argmax', (['ret[0]'], {'axis': '(-1)'}), '(ret[0], axis=-1)\n', (28865, 28882), True, 'import numpy as np\n'), ((28642, 28668), 'numpy.argmax', 'np.argmax', (['action'], {'axis': '(-1)'}), '(action, axis=-1)\n', (28651, 28668), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from constants import START_TAG, STOP_TAG, DEVICE
from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor
from data import tag_vocab, max_word_len, char_vocab, word_vocab
class BiLSTM_CRF(nn.Module):
def __init__(
self,
vocab_size,
tag_to_ix,
embedding_dim,
hidden_dim,
char_cnn=False,
char_cnn_stride=2,
char_cnn_kernel=2,
char_embedding_dim=4,
loss="crf_loss",
cost=hamming_loss(),
):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.char_cnn = char_cnn
self.max_word_len = max_word_len
self.loss_type = loss
self.cost = cost
self.word_embeds = nn.Embedding(
vocab_size, embedding_dim, padding_idx=0
)
self.char_cnn_layer = CharCNN(
max_word_len=max_word_len,
embedding_dim=char_embedding_dim,
kernel=char_cnn_kernel,
stride=char_cnn_stride,
)
self.lstm_input_dim = embedding_dim
if char_cnn:
self.lstm_input_dim = (
self.embedding_dim + self.char_cnn_layer.embedding_dim
)
self.lstm = nn.LSTM(
self.lstm_input_dim,
hidden_dim // 2,
num_layers=1,
bidirectional=True,
batch_first=True,
)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size)
)
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
def init_hidden(self, batch):
# cell state and hidden state initialization
# D*num_layers x batch x hidden_dim
# D = 2 if bidirectional=True otherwise 1
return (
torch.randn(2, batch, self.hidden_dim // 2).to(DEVICE),
torch.randn(2, batch, self.hidden_dim // 2).to(DEVICE),
)
def _forward_alg(self, feats, golds=None, cost=None):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000.0).to(
DEVICE
) # 1 x |tag_set|
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.0
# Wrap in a variable so that we will get automatic backprop
forward_var = init_alphas
# Iterate through the sentence: the emission scores
for i, feat in enumerate(feats):
alphas_t = [] # The forward tensors at this timestep
for next_tag in range(self.tagset_size):
# broadcast the emission score: it is the same regardless of
# the previous tag
emit_score = (
feat[next_tag].view(1, -1).expand(1, self.tagset_size)
)
# the ith entry of trans_score is the score of transitioning to
# next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the
# edge (i -> next_tag) before we do log-sum-exp
next_tag_var = None
if cost is not None:
# generate log sum exp(score + cost)
next_tag_var = (
forward_var
+ trans_score
+ emit_score
+ cost(golds[i], next_tag)
)
else:
next_tag_var = forward_var + trans_score + emit_score
assert next_tag_var != None
# The forward variable for this tag is log-sum-exp of all the
# scores.
alphas_t.append(log_sum_exp(next_tag_var).view(1))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentences, seq_lens):
# for getting sentence features from LSTM in tag space
batch_size = len(sentences)
self.hidden = self.init_hidden(batch=batch_size)
# embeds shape: batch x seq_len x emb_dim
embeds = self.word_embeds(sentences)
# character-level embedding
if self.char_cnn:
# generate char-level embedding for each token, go over sequence
char_embeddeds = []
for i in range(sentences.size()[1]):
token_vector = sentences[:, i]
char_tensor = convert_to_char_tensor(
token_vector, word_vocab, char_vocab, self.max_word_len
).to(DEVICE)
char_embedded = self.char_cnn_layer(char_tensor)
char_embedded = torch.transpose(char_embedded, 1, 2)
char_embeddeds.append(char_embedded)
# concatenate all chars together in sequence level
char_embeddeds = torch.cat(char_embeddeds, 1)
# concatenate word and char-level embedding together in embedding dimension
embeds = torch.cat([char_embeddeds, embeds], 2)
packed_embeds = pack_padded_sequence(
embeds, seq_lens, batch_first=True
)
# LSTM output: batch x seq_len x hidden_dim
lstm_out, self.hidden = self.lstm(packed_embeds, self.hidden)
lstm_out, _ = pad_packed_sequence(lstm_out, batch_first=True)
# generate emission score with linear layer
lstm_feats = self.hidden2tag(lstm_out)
# len(sentence) x len(tag_set)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1).to(DEVICE)
tags = torch.cat(
[
torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long).to(
DEVICE
),
tags,
]
)
for i, feat in enumerate(feats):
tag_vocab.idx2token[tags[i + 1].item()]
score = (
score
+ self.transitions[tags[i + 1], tags[i]]
+ feat[tags[i + 1]]
)
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats, golds=None, cost=None):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.full((1, self.tagset_size), -10000.0).to(DEVICE)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = init_vvars
for i, feat in enumerate(feats):
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the
# previous step, plus the score of transitioning
# from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = None
if cost is not None:
# get the cost score
cost_score = torch.full(
(1, self.tagset_size), cost(golds[i], next_tag)
).to(DEVICE)
# add to the score
next_tag_var = (
forward_var + self.transitions[next_tag] + cost_score
)
else:
next_tag_var = forward_var + self.transitions[next_tag]
assert next_tag_var != None
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id].view(1))
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags, seq_lens):
# loss function: negative log likelihood
# emission score: seq_len x batch_size x len(tag_set)
feats_tensor = self._get_lstm_features(sentence, seq_lens)
loss = torch.tensor(0, dtype=torch.long)
# go other batch dimension
for i in range(feats_tensor.size()[0]):
feats = feats_tensor[i, : seq_lens[i], :]
tag_seq = tags[i, : seq_lens[i]]
current_loss = None
if self.loss_type in "softmax_margin_loss":
# soft margin loss = - gold score + normalizer(log_sum_exp (score + cost))
forward_score = self._forward_alg(feats, tag_seq, self.cost)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = forward_score - gold_score
elif self.loss_type == "svm_loss":
# svm loss = - gold score + max(score + cost)
viterbi_score, _ = self._viterbi_decode(
feats, tag_seq, self.cost
)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = viterbi_score - gold_score
elif self.loss_type == "ramp_loss":
# ramp loss = - max(score) + max(score + cost)
viterbi_score, _ = self._viterbi_decode(feats)
viterbi_score_with_cost, _ = self._viterbi_decode(
feats, tag_seq, self.cost
)
current_loss = viterbi_score_with_cost - viterbi_score
elif self.loss_type == "soft_ramp_loss":
# soft ramp loss = - log_sum_exp (score) + log_sum_exp (score + cost)
forward_score = self._forward_alg(feats)
forward_score_with_cost = self._forward_alg(
feats, tag_seq, self.cost
)
current_loss = forward_score_with_cost - forward_score
else:
# crf loss = - gold score + normalizer(log_sum_exp (score))
forward_score = self._forward_alg(feats, tag_seq)
gold_score = self._score_sentence(feats, tag_seq)
current_loss = forward_score - gold_score
assert current_loss != None
loss = loss + current_loss
return loss
def forward(
self, sentence, seq_lens
): # dont confuse this with _forward_alg above.
scores, preds = [], []
# Get the "emission scores" from the BiLSTM
lstm_feats_tensor = self._get_lstm_features(sentence, seq_lens)
for i in range(lstm_feats_tensor.size()[0]):
lstm_feats = lstm_feats_tensor[i, : seq_lens[i], :]
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
scores += [score]
preds += [tag_seq]
return scores, preds
class CharCNN(nn.Module):
def __init__(
self,
stride=2,
kernel=2,
embedding_dim=4,
max_word_len=20,
):
super(CharCNN, self).__init__()
# Parameters regarding text preprocessing
self.embedding_dim = embedding_dim
self.max_word_len = max_word_len
self.vocab_size = len(char_vocab.token2idx)
# Dropout definition
self.dropout = nn.Dropout(0.25)
# CNN parameters definition
self.kernel = kernel
self.stride = stride
self.padding = self.kernel - 1
# Embedding layer definition:
self.embedding = nn.Embedding(
self.vocab_size,
self.embedding_dim,
padding_idx=0,
)
# Convolution layer definition
self.conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=self.kernel,
stride=self.stride,
padding=self.padding,
)
self.output_dim = (
self.max_word_len + 2 * self.padding - (self.kernel - 1) - 1
) // self.stride + 1
# Max pooling layers definition
self.pool = nn.MaxPool1d(self.output_dim, 1)
def forward(self, X):
# X: input token
embedded = self.embedding(X)
embedded = torch.transpose(embedded, 1, 2)
embedded = self.dropout(embedded)
conv_out = self.conv(embedded)
pool_out = self.pool(conv_out)
return pool_out
|
[
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Dropout",
"helper.log_sum_exp",
"helper.convert_to_char_tensor",
"torch.nn.Embedding",
"torch.nn.Conv1d",
"torch.nn.MaxPool1d",
"torch.randn",
"torch.cat",
"torch.full",
"helper.hamming_loss",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.Linear",
"torch.zeros",
"torch.tensor",
"torch.nn.LSTM",
"helper.argmax",
"torch.transpose"
] |
[((592, 606), 'helper.hamming_loss', 'hamming_loss', ([], {}), '()\n', (604, 606), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n'), ((1009, 1063), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {'padding_idx': '(0)'}), '(vocab_size, embedding_dim, padding_idx=0)\n', (1021, 1063), True, 'import torch.nn as nn\n'), ((1500, 1602), 'torch.nn.LSTM', 'nn.LSTM', (['self.lstm_input_dim', '(hidden_dim // 2)'], {'num_layers': '(1)', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(self.lstm_input_dim, hidden_dim // 2, num_layers=1, bidirectional=\n True, batch_first=True)\n', (1507, 1602), True, 'import torch.nn as nn\n'), ((1750, 1789), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'self.tagset_size'], {}), '(hidden_dim, self.tagset_size)\n', (1759, 1789), True, 'import torch.nn as nn\n'), ((4664, 4689), 'helper.log_sum_exp', 'log_sum_exp', (['terminal_var'], {}), '(terminal_var)\n', (4675, 4689), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n'), ((5928, 5984), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['embeds', 'seq_lens'], {'batch_first': '(True)'}), '(embeds, seq_lens, batch_first=True)\n', (5948, 5984), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((6152, 6199), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['lstm_out'], {'batch_first': '(True)'}), '(lstm_out, batch_first=True)\n', (6171, 6199), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((9137, 9157), 'helper.argmax', 'argmax', (['terminal_var'], {}), '(terminal_var)\n', (9143, 9157), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n'), ((9932, 9965), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.long'}), '(0, dtype=torch.long)\n', (9944, 9965), False, 'import torch\n'), ((13049, 13065), 'torch.nn.Dropout', 'nn.Dropout', (['(0.25)'], {}), '(0.25)\n', (13059, 13065), True, 'import torch.nn as nn\n'), ((13264, 13328), 'torch.nn.Embedding', 'nn.Embedding', (['self.vocab_size', 'self.embedding_dim'], {'padding_idx': '(0)'}), '(self.vocab_size, self.embedding_dim, padding_idx=0)\n', (13276, 13328), True, 'import torch.nn as nn\n'), ((13435, 13555), 'torch.nn.Conv1d', 'nn.Conv1d', (['self.embedding_dim', 'self.embedding_dim'], {'kernel_size': 'self.kernel', 'stride': 'self.stride', 'padding': 'self.padding'}), '(self.embedding_dim, self.embedding_dim, kernel_size=self.kernel,\n stride=self.stride, padding=self.padding)\n', (13444, 13555), True, 'import torch.nn as nn\n'), ((13814, 13846), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['self.output_dim', '(1)'], {}), '(self.output_dim, 1)\n', (13826, 13846), True, 'import torch.nn as nn\n'), ((13955, 13986), 'torch.transpose', 'torch.transpose', (['embedded', '(1)', '(2)'], {}), '(embedded, 1, 2)\n', (13970, 13986), False, 'import torch\n'), ((1955, 2002), 'torch.randn', 'torch.randn', (['self.tagset_size', 'self.tagset_size'], {}), '(self.tagset_size, self.tagset_size)\n', (1966, 2002), False, 'import torch\n'), ((5726, 5754), 'torch.cat', 'torch.cat', (['char_embeddeds', '(1)'], {}), '(char_embeddeds, 1)\n', (5735, 5754), False, 'import torch\n'), ((5864, 5902), 'torch.cat', 'torch.cat', (['[char_embeddeds, embeds]', '(2)'], {}), '([char_embeddeds, embeds], 2)\n', (5873, 5902), False, 'import torch\n'), ((2780, 2823), 'torch.full', 'torch.full', (['(1, self.tagset_size)', '(-10000.0)'], {}), '((1, self.tagset_size), -10000.0)\n', (2790, 2823), False, 'import torch\n'), ((5544, 5580), 'torch.transpose', 'torch.transpose', (['char_embedded', '(1)', '(2)'], {}), '(char_embedded, 1, 2)\n', (5559, 5580), False, 'import torch\n'), ((6479, 6493), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (6490, 6493), False, 'import torch\n'), ((7225, 7268), 'torch.full', 'torch.full', (['(1, self.tagset_size)', '(-10000.0)'], {}), '((1, self.tagset_size), -10000.0)\n', (7235, 7268), False, 'import torch\n'), ((8616, 8636), 'helper.argmax', 'argmax', (['next_tag_var'], {}), '(next_tag_var)\n', (8622, 8636), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n'), ((2496, 2539), 'torch.randn', 'torch.randn', (['(2)', 'batch', '(self.hidden_dim // 2)'], {}), '(2, batch, self.hidden_dim // 2)\n', (2507, 2539), False, 'import torch\n'), ((2564, 2607), 'torch.randn', 'torch.randn', (['(2)', 'batch', '(self.hidden_dim // 2)'], {}), '(2, batch, self.hidden_dim // 2)\n', (2575, 2607), False, 'import torch\n'), ((4536, 4555), 'torch.cat', 'torch.cat', (['alphas_t'], {}), '(alphas_t)\n', (4545, 4555), False, 'import torch\n'), ((5318, 5397), 'helper.convert_to_char_tensor', 'convert_to_char_tensor', (['token_vector', 'word_vocab', 'char_vocab', 'self.max_word_len'], {}), '(token_vector, word_vocab, char_vocab, self.max_word_len)\n', (5340, 5397), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n'), ((6561, 6620), 'torch.tensor', 'torch.tensor', (['[self.tag_to_ix[START_TAG]]'], {'dtype': 'torch.long'}), '([self.tag_to_ix[START_TAG]], dtype=torch.long)\n', (6573, 6620), False, 'import torch\n'), ((8915, 8939), 'torch.cat', 'torch.cat', (['viterbivars_t'], {}), '(viterbivars_t)\n', (8924, 8939), False, 'import torch\n'), ((4475, 4500), 'helper.log_sum_exp', 'log_sum_exp', (['next_tag_var'], {}), '(next_tag_var)\n', (4486, 4500), False, 'from helper import argmax, log_sum_exp, hamming_loss, convert_to_char_tensor\n')]
|
"""Provides a QtPluginSorter that allows the user to change plugin call order.
"""
from typing import List, Optional, Union
from qtpy.QtCore import QEvent, Qt, Signal, Slot
from qtpy.QtWidgets import (
QCheckBox,
QComboBox,
QDialog,
QFrame,
QGraphicsOpacityEffect,
QHBoxLayout,
QLabel,
QListWidget,
QListWidgetItem,
QSizePolicy,
QVBoxLayout,
QWidget,
)
from ..plugins import plugin_manager as napari_plugin_manager
from napari_plugin_engine import HookImplementation, HookCaller, PluginManager
from .utils import drag_with_pixmap
class ImplementationListItem(QFrame):
"""A Widget to render each hook implementation item in a ListWidget.
Parameters
----------
item : QListWidgetItem
An item instance from a QListWidget. This will most likely come from
:meth:`QtHookImplementationListWidget.add_hook_implementation_to_list`.
parent : QWidget, optional
The parent widget, by default None
Attributes
----------
plugin_name_label : QLabel
The name of the plugin providing the hook implementation.
enabled_checkbox : QCheckBox
Checkbox to set the ``enabled`` status of the corresponding hook
implementation.
opacity : QGraphicsOpacityEffect
The opacity of the whole widget. When self.enabled_checkbox is
unchecked, the opacity of the item is decreased.
"""
def __init__(self, item: QListWidgetItem, parent: QWidget = None):
super().__init__(parent)
self.setToolTip("Click and drag to change call order")
self.item = item
self.opacity = QGraphicsOpacityEffect(self)
self.setGraphicsEffect(self.opacity)
layout = QHBoxLayout()
self.setLayout(layout)
self.position_label = QLabel()
self.update_position_label()
self.plugin_name_label = QLabel(item.hook_implementation.plugin_name)
self.enabled_checkbox = QCheckBox(self)
self.enabled_checkbox.setToolTip("Uncheck to disable this plugin")
self.enabled_checkbox.stateChanged.connect(self._set_enabled)
self.enabled_checkbox.setChecked(
getattr(item.hook_implementation, 'enabled', True)
)
layout.addWidget(self.position_label)
layout.addWidget(self.enabled_checkbox)
layout.addWidget(self.plugin_name_label)
layout.setStretch(2, 1)
layout.setContentsMargins(0, 0, 0, 0)
def _set_enabled(self, state: Union[bool, int]):
"""Set the enabled state of this hook implementation to ``state``."""
self.item.hook_implementation.enabled = bool(state)
self.opacity.setOpacity(1 if state else 0.5)
def update_position_label(self, order=None):
"""Update the label showing the position of this item in the list.
Parameters
----------
order : list, optional
A HookOrderType list ... unused by this function, but here for ease
of signal connection, by default None.
"""
position = self.item.listWidget().indexFromItem(self.item).row() + 1
self.position_label.setText(str(position))
class QtHookImplementationListWidget(QListWidget):
"""A ListWidget to display & sort the call order of a hook implementation.
This class will usually be instantiated by a
:class:`~napari._qt.qt_plugin_sorter.QtPluginSorter`. Each item in the list
will be rendered as a :class:`ImplementationListItem`.
Parameters
----------
parent : QWidget, optional
Optional parent widget, by default None
hook : HookCaller, optional
The ``HookCaller`` for which to show implementations. by default None
(i.e. no hooks shown)
Attributes
----------
hook_caller : HookCaller or None
The current ``HookCaller`` instance being shown in the list.
"""
order_changed = Signal(list) # emitted when the user changes the order.
def __init__(
self,
parent: Optional[QWidget] = None,
hook_caller: Optional[HookCaller] = None,
):
super().__init__(parent)
self.setDefaultDropAction(Qt.MoveAction)
self.setDragEnabled(True)
self.setDragDropMode(self.InternalMove)
self.setSelectionMode(self.SingleSelection)
self.setAcceptDrops(True)
self.setSpacing(1)
self.setMinimumHeight(1)
self.setSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding
)
self.order_changed.connect(self.permute_hook)
self.hook_caller: Optional[HookCaller] = None
self.set_hook_caller(hook_caller)
def set_hook_caller(self, hook_caller: Optional[HookCaller]):
"""Set the list widget to show hook implementations for ``hook_caller``.
Parameters
----------
hook_caller : HookCaller, optional
A ``HookCaller`` for which to show implementations. by default None
(i.e. no hooks shown)
"""
self.clear()
self.hook_caller = hook_caller
if not hook_caller:
return
# _nonwrappers returns hook implementations in REVERSE call order
# so we reverse them here to show them in the list in the order in
# which they get called.
for hook_implementation in reversed(hook_caller._nonwrappers):
self.append_hook_implementation(hook_implementation)
def append_hook_implementation(
self, hook_implementation: HookImplementation
):
"""Add a list item for ``hook_implementation`` with a custom widget.
Parameters
----------
hook_implementation : HookImplementation
The hook implementation object to add to the list.
"""
item = QListWidgetItem(parent=self)
item.hook_implementation = hook_implementation
self.addItem(item)
widg = ImplementationListItem(item, parent=self)
item.setSizeHint(widg.sizeHint())
self.order_changed.connect(widg.update_position_label)
self.setItemWidget(item, widg)
def dropEvent(self, event: QEvent):
"""Triggered when the user moves & drops one of the items in the list.
Parameters
----------
event : QEvent
The event that triggered the dropEvent.
"""
super().dropEvent(event)
order = [self.item(r).hook_implementation for r in range(self.count())]
self.order_changed.emit(order)
def startDrag(self, supportedActions: Qt.DropActions):
drag = drag_with_pixmap(self)
drag.exec_(supportedActions, Qt.MoveAction)
@Slot(list)
def permute_hook(self, order: List[HookImplementation]):
"""Rearrage the call order of the hooks for the current hook impl.
Parameters
----------
order : list
A list of str, hook_implementation, or module_or_class, with the
desired CALL ORDER of the hook implementations.
"""
if not self.hook_caller:
return
self.hook_caller.bring_to_front(order)
class QtPluginSorter(QDialog):
"""Dialog that allows a user to change the call order of plugin hooks.
A main QComboBox lets the user pick which hook specification they would
like to reorder. Then a :class:`QtHookImplementationListWidget` shows the
current call order for all implementations of the current hook
specification. The user may then reorder them, or disable them by checking
the checkbox next to each hook implementation name.
Parameters
----------
plugin_manager : PluginManager, optional
An instance of a PluginManager. by default, the main
:class:`~napari.plugins.manager.PluginManager` instance
parent : QWidget, optional
Optional parent widget, by default None
initial_hook : str, optional
If provided the QComboBox at the top of the dialog will be set to
this hook, by default None
firstresult_only : bool, optional
If True, only hook specifications that declare the "firstresult"
option will be included. (these are hooks for which only the first
non None result is returned). by default True (because it makes
less sense to sort hooks where we just collect all results anyway)
https://pluggy.readthedocs.io/en/latest/#first-result-only
Attributes
----------
hook_combo_box : QComboBox
A dropdown menu to select the current hook.
hook_list : QtHookImplementationListWidget
The list widget that displays (and allows sorting of) all of the hook
implementations for the currently selected hook.
"""
NULL_OPTION = 'select hook... '
def __init__(
self,
plugin_manager: PluginManager = napari_plugin_manager,
*,
parent: Optional[QWidget] = None,
initial_hook: Optional[str] = None,
firstresult_only: bool = True,
):
super().__init__(parent)
self.setWindowModality(Qt.NonModal)
self.plugin_manager = plugin_manager
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.hook_combo_box = QComboBox()
self.hook_combo_box.addItem(self.NULL_OPTION)
# populate comboBox with all of the hooks known by the plugin manager
hooks = []
for name, hook_caller in plugin_manager.hooks.items():
if firstresult_only:
# if the firstresult_only option is set
# we only want to include hook_specifications that declare the
# "firstresult" option as True.
if not hook_caller.spec.opts.get('firstresult', False):
continue
hooks.append(name)
self.hook_combo_box.addItems(hooks)
self.hook_combo_box.setToolTip(
"select the hook specification to reorder"
)
self.hook_combo_box.activated[str].connect(self.set_current_hook)
self.hook_list = QtHookImplementationListWidget(parent=self)
title = QLabel('Plugin Sorter')
title.setObjectName("h2")
self.layout.addWidget(title)
instructions = QLabel(
'Select a hook to rearrange, then drag and '
'drop plugins into the desired call order. '
'\nDisable plugins by unchecking their checkbox.'
)
instructions.setWordWrap(True)
self.layout.addWidget(instructions)
self.layout.addWidget(self.hook_combo_box)
self.layout.addWidget(self.hook_list)
if initial_hook is not None:
self.hook_combo_box.setCurrentText(initial_hook)
self.set_current_hook(initial_hook)
def set_current_hook(self, hook: str):
"""Change the hook specification shown in the list widget.
Parameters
----------
hook : str
Name of the new hook specification to show.
"""
if hook == self.NULL_OPTION:
hook_caller = None
else:
hook_caller = getattr(self.plugin_manager.hooks, hook)
self.hook_list.set_hook_caller(hook_caller)
|
[
"qtpy.QtWidgets.QCheckBox",
"qtpy.QtWidgets.QHBoxLayout",
"qtpy.QtWidgets.QLabel",
"qtpy.QtWidgets.QVBoxLayout",
"qtpy.QtWidgets.QGraphicsOpacityEffect",
"qtpy.QtWidgets.QListWidgetItem",
"qtpy.QtCore.Signal",
"qtpy.QtCore.Slot",
"qtpy.QtWidgets.QComboBox"
] |
[((3900, 3912), 'qtpy.QtCore.Signal', 'Signal', (['list'], {}), '(list)\n', (3906, 3912), False, 'from qtpy.QtCore import QEvent, Qt, Signal, Slot\n'), ((6659, 6669), 'qtpy.QtCore.Slot', 'Slot', (['list'], {}), '(list)\n', (6663, 6669), False, 'from qtpy.QtCore import QEvent, Qt, Signal, Slot\n'), ((1630, 1658), 'qtpy.QtWidgets.QGraphicsOpacityEffect', 'QGraphicsOpacityEffect', (['self'], {}), '(self)\n', (1652, 1658), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((1721, 1734), 'qtpy.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1732, 1734), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((1797, 1805), 'qtpy.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (1803, 1805), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((1877, 1921), 'qtpy.QtWidgets.QLabel', 'QLabel', (['item.hook_implementation.plugin_name'], {}), '(item.hook_implementation.plugin_name)\n', (1883, 1921), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((1954, 1969), 'qtpy.QtWidgets.QCheckBox', 'QCheckBox', (['self'], {}), '(self)\n', (1963, 1969), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((5793, 5821), 'qtpy.QtWidgets.QListWidgetItem', 'QListWidgetItem', ([], {'parent': 'self'}), '(parent=self)\n', (5808, 5821), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((9129, 9142), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (9140, 9142), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((9209, 9220), 'qtpy.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (9218, 9220), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((10093, 10116), 'qtpy.QtWidgets.QLabel', 'QLabel', (['"""Plugin Sorter"""'], {}), "('Plugin Sorter')\n", (10099, 10116), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n'), ((10212, 10366), 'qtpy.QtWidgets.QLabel', 'QLabel', (['"""Select a hook to rearrange, then drag and drop plugins into the desired call order. \nDisable plugins by unchecking their checkbox."""'], {}), '(\n """Select a hook to rearrange, then drag and drop plugins into the desired call order. \nDisable plugins by unchecking their checkbox."""\n )\n', (10218, 10366), False, 'from qtpy.QtWidgets import QCheckBox, QComboBox, QDialog, QFrame, QGraphicsOpacityEffect, QHBoxLayout, QLabel, QListWidget, QListWidgetItem, QSizePolicy, QVBoxLayout, QWidget\n')]
|
import unittest
from unittest import mock
from easybill_rest import Client
from easybill_rest.resources.resource_logins import ResourceLogins
from easybill_rest.tests.test_case_abstract import EasybillRestTestCaseAbstract
class TestResourceLogins(unittest.TestCase, EasybillRestTestCaseAbstract):
def setUp(self) -> None:
mocked_object = mock.Mock()
mocked_object.call = mock.Mock(return_value={})
self.mocked_object = ResourceLogins(mocked_object)
def test_get_endpoint(self) -> None:
self.assertEqual("/logins", Client('').logins().get_resource_endpoint())
def test_get_logins(self) -> None:
self.assertTrue(isinstance(
self.mocked_object.get_logins({"page": "2"}), dict))
def test_get_login(self) -> None:
self.assertTrue(isinstance(self.mocked_object.get_login("3"), dict))
@staticmethod
def get_suite() -> unittest.TestSuite:
return unittest.TestSuite(map(TestResourceLogins, [
'test_get_endpoint',
'test_get_logins',
'test_get_login',
]))
|
[
"easybill_rest.resources.resource_logins.ResourceLogins",
"unittest.mock.Mock",
"easybill_rest.Client"
] |
[((354, 365), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (363, 365), False, 'from unittest import mock\n'), ((395, 421), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': '{}'}), '(return_value={})\n', (404, 421), False, 'from unittest import mock\n'), ((451, 480), 'easybill_rest.resources.resource_logins.ResourceLogins', 'ResourceLogins', (['mocked_object'], {}), '(mocked_object)\n', (465, 480), False, 'from easybill_rest.resources.resource_logins import ResourceLogins\n'), ((559, 569), 'easybill_rest.Client', 'Client', (['""""""'], {}), "('')\n", (565, 569), False, 'from easybill_rest import Client\n')]
|
import os
from PIL import Image
import numpy as np
## 图像数据集的均值与方差的计算
root_path = '../train_data'
_filename = os.listdir(root_path)
filename = []
for _file in _filename:
if not _file.endswith('.txt'):
filename.append(_file)
#均值之和
R_channel_m = 0
G_channel_m = 0
B_channel_m = 0
#方差之和
R_channel_s = 0
G_channel_s = 0
B_channel_s = 0
num = len(filename)
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1] #转换为BGR
img = img.astype(np.float32) / 225
B_channel_m = B_channel_m + np.sum(img[:, :, 0])/(img.shape[0]* img.shape[1])
G_channel_m = G_channel_m + np.sum(img[:, :, 1])/(img.shape[0]* img.shape[1])
R_channel_m = R_channel_m + np.sum(img[:, :, 2])/(img.shape[0]* img.shape[1])
B_mean = B_channel_m / num
G_mean = G_channel_m / num
R_mean = R_channel_m / num
for i in range(len(filename)):
img = Image.open(os.path.join(root_path, filename[i]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1]
img = img.astype(np.float32) / 225
B_channel_s = B_channel_s + np.sum(np.power(img[:, :, 0]-R_mean, 2) )/(img.shape[0]* img.shape[1])
G_channel_s = G_channel_s + np.sum(np.power(img[:, :, 1]-G_mean, 2) )/(img.shape[0]* img.shape[1])
R_channel_s = R_channel_s + np.sum(np.power(img[:, :, 2]-B_mean, 2) )/(img.shape[0]* img.shape[1])
B_std = np.sqrt(B_channel_s/num)
G_std = np.sqrt(G_channel_s/num)
R_std = np.sqrt(R_channel_s/num)
with open('mean_std.txt','w')as f:
text = "B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean) + '\n' + "B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std)
f.write(text)
print("B_mean is %f, G_mean is %f, R_mean is %f" % (B_mean, G_mean, R_mean))
print("B_std is %f, G_std is %f, R_std is %f" % (B_std, G_std, R_std))
|
[
"numpy.sum",
"numpy.power",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.sqrt"
] |
[((112, 133), 'os.listdir', 'os.listdir', (['root_path'], {}), '(root_path)\n', (122, 133), False, 'import os\n'), ((1446, 1472), 'numpy.sqrt', 'np.sqrt', (['(B_channel_s / num)'], {}), '(B_channel_s / num)\n', (1453, 1472), True, 'import numpy as np\n'), ((1479, 1505), 'numpy.sqrt', 'np.sqrt', (['(G_channel_s / num)'], {}), '(G_channel_s / num)\n', (1486, 1505), True, 'import numpy as np\n'), ((1512, 1538), 'numpy.sqrt', 'np.sqrt', (['(R_channel_s / num)'], {}), '(R_channel_s / num)\n', (1519, 1538), True, 'import numpy as np\n'), ((498, 511), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (506, 511), True, 'import numpy as np\n'), ((1048, 1061), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1056, 1061), True, 'import numpy as np\n'), ((421, 457), 'os.path.join', 'os.path.join', (['root_path', 'filename[i]'], {}), '(root_path, filename[i])\n', (433, 457), False, 'import os\n'), ((971, 1007), 'os.path.join', 'os.path.join', (['root_path', 'filename[i]'], {}), '(root_path, filename[i])\n', (983, 1007), False, 'import os\n'), ((619, 639), 'numpy.sum', 'np.sum', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (625, 639), True, 'import numpy as np\n'), ((701, 721), 'numpy.sum', 'np.sum', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (707, 721), True, 'import numpy as np\n'), ((783, 803), 'numpy.sum', 'np.sum', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (789, 803), True, 'import numpy as np\n'), ((1167, 1201), 'numpy.power', 'np.power', (['(img[:, :, 0] - R_mean)', '(2)'], {}), '(img[:, :, 0] - R_mean, 2)\n', (1175, 1201), True, 'import numpy as np\n'), ((1270, 1304), 'numpy.power', 'np.power', (['(img[:, :, 1] - G_mean)', '(2)'], {}), '(img[:, :, 1] - G_mean, 2)\n', (1278, 1304), True, 'import numpy as np\n'), ((1373, 1407), 'numpy.power', 'np.power', (['(img[:, :, 2] - B_mean)', '(2)'], {}), '(img[:, :, 2] - B_mean, 2)\n', (1381, 1407), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from pydefect.cli.vasp.make_unitcell import make_unitcell_from_vasp
from pymatgen.io.vasp import Vasprun, Outcar
def test_unitcell(vasp_files):
"""
HEAD OF MICROSCOPIC STATIC DIELECTRIC TENSOR (INDEPENDENT PARTICLE, excluding Hartree and local field effects)
------------------------------------------------------
1.269877 0.000000 -0.000000
0.000000 1.269877 0.000000
0.000000 0.000000 1.269877
------------------------------------------------------
MACROSCOPIC STATIC DIELECTRIC TENSOR (including local field effects in DFT)
------------------------------------------------------
1.255879 0.000000 -0.000000
-0.000000 1.255879 0.000000
-0.000000 0.000000 1.255879
------------------------------------------------------
"""
path = vasp_files / "unitcell_He_solid"
unitcell = make_unitcell_from_vasp(
vasprun_band=Vasprun(path / "vasprun-band.xml"),
outcar_band=Outcar(path / "OUTCAR-band"),
outcar_dielectric_clamped=Outcar(path / "OUTCAR-dielectric"),
outcar_dielectric_ionic=Outcar(path / "OUTCAR-dielectric"),
)
assert unitcell.vbm == -10.3168
assert unitcell.cbm == 1.2042
assert unitcell.ele_dielectric_const[0][0] == 1.255879
assert unitcell.ion_dielectric_const[0][0] == 0.0
|
[
"pymatgen.io.vasp.Vasprun",
"pymatgen.io.vasp.Outcar"
] |
[((1057, 1091), 'pymatgen.io.vasp.Vasprun', 'Vasprun', (["(path / 'vasprun-band.xml')"], {}), "(path / 'vasprun-band.xml')\n", (1064, 1091), False, 'from pymatgen.io.vasp import Vasprun, Outcar\n'), ((1113, 1141), 'pymatgen.io.vasp.Outcar', 'Outcar', (["(path / 'OUTCAR-band')"], {}), "(path / 'OUTCAR-band')\n", (1119, 1141), False, 'from pymatgen.io.vasp import Vasprun, Outcar\n'), ((1177, 1211), 'pymatgen.io.vasp.Outcar', 'Outcar', (["(path / 'OUTCAR-dielectric')"], {}), "(path / 'OUTCAR-dielectric')\n", (1183, 1211), False, 'from pymatgen.io.vasp import Vasprun, Outcar\n'), ((1245, 1279), 'pymatgen.io.vasp.Outcar', 'Outcar', (["(path / 'OUTCAR-dielectric')"], {}), "(path / 'OUTCAR-dielectric')\n", (1251, 1279), False, 'from pymatgen.io.vasp import Vasprun, Outcar\n')]
|
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from skimage import color
from PIL import Image
import matplotlib.pyplot as plt
from cnn_model import Model
# from cnn_model2 import Model as Model_unet
import pickle
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", type=str, required=False,
help="path to input black and white image")
parser.add_argument('--use_gpu', action='store_true', default=False,
help='whether to use GPU')
return parser.parse_args()
def preprocess_training_set(train):
processed_x = []
processed_y = []
for image in train:
l, ab = preprocess_image(image)
processed_x.append(l)
processed_y.append(ab)
return processed_x, processed_y
def preprocess_image(img, height=256, width=256):
"""Return the light intensity part of an image, resized and converted to tensor"""
# image = Image.open(img).convert('RGB')
# image_r = image.resize((width, height))
image_r_np = np.array(img) / 255.0
# Convert image to Lab format
image_lab = color.rgb2lab(image_r_np)
# Extract L dimension
image_l = image_lab[:,:,0]
image_ab = image_lab[:,:,1:]
# Convert to tensor and add relevant dimensions
image_l = image_l[None,:,:]
return image_l, image_ab
def postprocess_tens(orig_img, ab, mode='bilinear'):
# orig_img 1 x 1 x H_orig x W_orig
# ab 1 x 2 x H x W
HW_orig = orig_img.shape[2:]
HW = ab.shape[2:]
# Resize if needed
if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]):
ab_orig = F.interpolate(ab, size=HW_orig, mode=mode)
else:
ab_orig = ab
out_lab_orig = torch.cat((orig_img, ab_orig), dim=1)
out_lab_orig = out_lab_orig.data.cpu().numpy()
return color.lab2rgb(out_lab_orig.transpose((0,2,3,1)))
args = parse_arguments()
# image_dict = unpickle('C:\\Users\\karee\\Desktop\\ChromaPy\\data\\cifar-10-python\\cifar-10-batches-py\\data_batch_1')
# print(image_dict[b'data'])
(X, y), (x_test, y_test) = cifar10.load_data()
# Split data into training and validation
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
og_image = x_train[0:10]
x_train, y_train = preprocess_training_set(x_train[:10])
x_val, y_val = preprocess_training_set(x_val[:10])
tensor_x_train = torch.Tensor(x_train).float()
tensor_x_val = torch.Tensor(x_val).float()
tensor_y_train = torch.Tensor(y_train).permute(0,3,1,2).float()
tensor_y_val = torch.Tensor(y_val).permute(0,3,1,2).float()
# Dataset dictionary
dsets = {
"train": data.TensorDataset(tensor_x_train,tensor_y_train),
"val": data.TensorDataset(tensor_x_val,tensor_y_val)}
dataloaders = {x : data.DataLoader(dsets[x], batch_size=6, shuffle=True)
for x in ['train', 'val']}
dataset_sizes = {x : len(dsets[x]) for x in ["train","val"]}
# model_unet = Model_unet(1,2)
# model_unet_ft = model_unet.fit(dataloaders,1)
# ab_out = model_unet_ft.forward(tensor_x_train[0:5])
model = Model()
model_ft = model.fit(dataloaders, 1)
ab_out = model_ft.forward(tensor_x_train[0:5])
image_new = postprocess_tens(tensor_x_train[0:5], ab_out)
f, axarr = plt.subplots(2,2)
axarr[0,0].imshow(og_image[0])
axarr[0,1].imshow(image_new[0])
axarr[1,0].imshow(og_image[1])
axarr[1,1].imshow(image_new[1])
plt.show()
|
[
"cnn_model.Model",
"matplotlib.pyplot.show",
"keras.datasets.cifar10.load_data",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"torch.cat",
"torch.Tensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"torch.nn.functional.interpolate",
"matplotlib.pyplot.subplots",
"skimage.color.rgb2lab"
] |
[((2261, 2280), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2278, 2280), False, 'from keras.datasets import cifar10\n'), ((2357, 2411), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (2373, 2411), False, 'from sklearn.model_selection import train_test_split\n'), ((3237, 3244), 'cnn_model.Model', 'Model', ([], {}), '()\n', (3242, 3244), False, 'from cnn_model import Model\n'), ((3400, 3418), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3412, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3552, 3554), True, 'import matplotlib.pyplot as plt\n'), ((487, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (510, 512), False, 'import argparse\n'), ((1330, 1355), 'skimage.color.rgb2lab', 'color.rgb2lab', (['image_r_np'], {}), '(image_r_np)\n', (1343, 1355), False, 'from skimage import color\n'), ((1909, 1946), 'torch.cat', 'torch.cat', (['(orig_img, ab_orig)'], {'dim': '(1)'}), '((orig_img, ab_orig), dim=1)\n', (1918, 1946), False, 'import torch\n'), ((2807, 2857), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['tensor_x_train', 'tensor_y_train'], {}), '(tensor_x_train, tensor_y_train)\n', (2825, 2857), False, 'from torch.utils import data\n'), ((2869, 2915), 'torch.utils.data.TensorDataset', 'data.TensorDataset', (['tensor_x_val', 'tensor_y_val'], {}), '(tensor_x_val, tensor_y_val)\n', (2887, 2915), False, 'from torch.utils import data\n'), ((2936, 2989), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dsets[x]'], {'batch_size': '(6)', 'shuffle': '(True)'}), '(dsets[x], batch_size=6, shuffle=True)\n', (2951, 2989), False, 'from torch.utils import data\n'), ((1258, 1271), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1266, 1271), True, 'import numpy as np\n'), ((1815, 1857), 'torch.nn.functional.interpolate', 'F.interpolate', (['ab'], {'size': 'HW_orig', 'mode': 'mode'}), '(ab, size=HW_orig, mode=mode)\n', (1828, 1857), True, 'import torch.nn.functional as F\n'), ((2565, 2586), 'torch.Tensor', 'torch.Tensor', (['x_train'], {}), '(x_train)\n', (2577, 2586), False, 'import torch\n'), ((2610, 2629), 'torch.Tensor', 'torch.Tensor', (['x_val'], {}), '(x_val)\n', (2622, 2629), False, 'import torch\n'), ((2655, 2676), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (2667, 2676), False, 'import torch\n'), ((2717, 2736), 'torch.Tensor', 'torch.Tensor', (['y_val'], {}), '(y_val)\n', (2729, 2736), False, 'import torch\n')]
|
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractUser
from django.core.validators import EmailValidator
from django.contrib.auth.validators import UnicodeUsernameValidator
class UserManager(BaseUserManager):
def validate_email(self, email):
""" Verify email arguemnt and return normalised value
:param email: expect str
:returns: normalised email str if correct
:raises ValueError: invalid param email
:raises Exception: existing email
"""
if email is None:
raise ValueError("Missing email value")
elif type(email) is not str:
raise ValueError("Invalid email value, expect str")
normalized_email = self.normalize_email(email)
existing_email = \
self.model.objects.filter(email=normalized_email).first()
if existing_email:
raise Exception("This email is already assigned to another User")
return normalized_email
def create_user(self, email, name, password=None):
""" Creates and saves a User
:param email: expect str
:param name: expect str
:param password: expect str or None, default None
:returns: User model
"""
user = self.model(
email=self.validate_email(email),
name=name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password=None):
""" Creates and saves a User with superuser privileges
:param email: expect str
:param name: expect str
:param password: expect str or None, default None
:returns: User model
"""
user = self.model(
email=self.validate_email(email),
name=name
)
user.set_password(password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
""" User model class (AbstractUser with modified properties)
removes: username, first_name, last_name
adds: name
"""
email = models.EmailField(
verbose_name="email address",
error_messages={
'unique': "A user with that email already exists.",
},
help_text="Required. 150 characters or fewer.",
max_length=150,
unique=True,
validators=[EmailValidator],
)
username = None
first_name = None
last_name = None
name = models.CharField(
verbose_name="name",
max_length=150,
help_text=(
"Required. 150 characters or fewer. "
"Letters, digits and @/./+/-/_ only."
),
validators=[UnicodeUsernameValidator]
)
objects = UserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["name"]
class Meta:
db_table = "users"
|
[
"django.db.models.CharField",
"django.db.models.EmailField"
] |
[((2185, 2420), 'django.db.models.EmailField', 'models.EmailField', ([], {'verbose_name': '"""email address"""', 'error_messages': "{'unique': 'A user with that email already exists.'}", 'help_text': '"""Required. 150 characters or fewer."""', 'max_length': '(150)', 'unique': '(True)', 'validators': '[EmailValidator]'}), "(verbose_name='email address', error_messages={'unique':\n 'A user with that email already exists.'}, help_text=\n 'Required. 150 characters or fewer.', max_length=150, unique=True,\n validators=[EmailValidator])\n", (2202, 2420), False, 'from django.db import models\n'), ((2560, 2745), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""name"""', 'max_length': '(150)', 'help_text': '"""Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only."""', 'validators': '[UnicodeUsernameValidator]'}), "(verbose_name='name', max_length=150, help_text=\n 'Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',\n validators=[UnicodeUsernameValidator])\n", (2576, 2745), False, 'from django.db import models\n')]
|
import math
# Triangle Solver
print("Welcome to the Right Triangle Solver App.")
side_a = float(input("\nWhat is the first leg of the triangle: "))
side_b = float(input("What is the second leg of the triangle: "))
# Calculations
side_c = math.sqrt(side_a**2 + side_b**2)
side_c = round(side_c, 3)
area = 0.5 * side_a * side_b
area = round(area, 3)
# Summary
print("\nFor a triangle with legs of " + str(side_a) + " and " +
str(side_b) + " the hypotenuse is " + str(side_c))
print("For a triangle with legs of " + str(side_a) + " and " +
str(side_b) + " the area is " + str(area))
|
[
"math.sqrt"
] |
[((243, 279), 'math.sqrt', 'math.sqrt', (['(side_a ** 2 + side_b ** 2)'], {}), '(side_a ** 2 + side_b ** 2)\n', (252, 279), False, 'import math\n')]
|
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if "darwin" in sys.platform.lower():
# To avoid multiple runs of the model code
# https://pythonspeed.com/articles/python-multiprocessing/
import multiprocessing
multiprocessing.set_start_method('fork')
import logging
import portpicker
import requests
import torch.distributed
import pkg_resources
import adaptdl.collective
import adaptdl.env
import semver
from .epoch import current_epoch, finished_epochs, remaining_epochs_until
from .data import current_dataloader, AdaptiveDataLoader, ElasticSampler
from .parallel import AdaptiveDataParallel
from .accumulator import Accumulator
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
def version_check(version):
if semver.VersionInfo.isvalid(version) and \
version != "0.0.0":
return True
else:
return False
def init_process_group(backend):
url = adaptdl.env.supervisor_url()
if url:
key = adaptdl.env.job_id()
group = adaptdl.env.num_restarts()
while True:
response = requests.get(url=f"{url}/discover/{key}/{group}")
if response.status_code != 408: # Timeout.
break
response.raise_for_status()
master_addr = response.json()[0]
sched_version = adaptdl.env.adaptdl_sched_version()
trainer_version = pkg_resources.get_distribution("adaptdl").version
# if version_check(sched_version) and version_check(trainer_version):
# trainer_ver_maj = semver.VersionInfo.parse(trainer_version).major
# sched_ver_maj = semver.VersionInfo.parse(sched_version).major
# if trainer_ver_maj != sched_ver_maj:
# raise Exception('adaptdl version {} is incompatible with'
# 'scheduler version {}'.format(trainer_version,
# sched_version))
else:
master_addr = adaptdl.env.master_addr()
master_port = adaptdl.env.master_port()
# Initialize collective module.
adaptdl.collective.initialize(master_addr, master_port)
# Initialize torch.distributed.
torch_port = adaptdl.collective.broadcast(portpicker.pick_unused_port())
init_method = "tcp://{}:{}?rank={}&world_size={}".format(
master_addr, torch_port, adaptdl.env.replica_rank(),
adaptdl.env.num_replicas())
LOG.info("Initializing torch.distributed using %s", init_method)
torch.distributed.init_process_group(backend, init_method)
LOG.info("torch.distributed initialized")
__all__ = [
"init_process_group",
"current_epoch",
"finished_epochs",
"remaining_epochs_until",
"current_dataloader",
"AdaptiveDataLoader",
"ElasticSampler",
"AdaptiveDataParallel",
"Accumulator",
]
|
[
"pkg_resources.get_distribution",
"logging.basicConfig",
"multiprocessing.set_start_method",
"portpicker.pick_unused_port",
"semver.VersionInfo.isvalid",
"sys.platform.lower",
"requests.get",
"logging.getLogger"
] |
[((1214, 1253), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1233, 1253), False, 'import logging\n'), ((1260, 1287), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1277, 1287), False, 'import logging\n'), ((626, 646), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (644, 646), False, 'import sys\n'), ((789, 829), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""fork"""'], {}), "('fork')\n", (821, 829), False, 'import multiprocessing\n'), ((1352, 1387), 'semver.VersionInfo.isvalid', 'semver.VersionInfo.isvalid', (['version'], {}), '(version)\n', (1378, 1387), False, 'import semver\n'), ((2830, 2859), 'portpicker.pick_unused_port', 'portpicker.pick_unused_port', ([], {}), '()\n', (2857, 2859), False, 'import portpicker\n'), ((1684, 1733), 'requests.get', 'requests.get', ([], {'url': 'f"""{url}/discover/{key}/{group}"""'}), "(url=f'{url}/discover/{key}/{group}')\n", (1696, 1733), False, 'import requests\n'), ((1975, 2016), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""adaptdl"""'], {}), "('adaptdl')\n", (2005, 2016), False, 'import pkg_resources\n')]
|
import myutils
from torch.nn import Module, Parameter
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
class TripletLoss(Module):
def __init__(self, instance, margin=1.0):
super(TripletLoss, self).__init__()
self.margin = margin
self.instance = instance
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~adjacency
mask_ap = (adjacency.float() - torch.eye(nB).cuda()).long()
mask_an = adjacency_not.long()
dist_ap = (dist[mask_ap == 1]).view(-1, 1)
dist_an = (dist[mask_an == 1]).view(nB, -1)
dist_an = dist_an.repeat(1, self.instance - 1)
dist_an = dist_an.view(nB * (self.instance - 1), nB - self.instance)
num_loss = dist_an.size(0) * dist_an.size(1)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), self.margin + dist_ap - dist_an)) / num_loss
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
class TripletSemihardLoss(Module):
def __init__(self, margin=0.2):
super(TripletSemihardLoss, self).__init__()
self.margin = margin
def forward(self, inputs, targets, normalized=True):
norm_temp = inputs.norm(dim=1, p=2, keepdim=True)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
nB = inputs.size(0)
idx_ = torch.arange(0, nB, dtype=torch.long)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(nB, nB)
dist = dist + dist.t()
# use squared
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12)
temp_euclidean_score = dist * 1.0
adjacency = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
adjacency_not = ~ adjacency
dist_tile = dist.repeat(nB, 1)
mask = (adjacency_not.repeat(nB, 1)) * (dist_tile > (dist.transpose(0, 1).contiguous().view(-1, 1)))
mask_final = (mask.float().sum(dim=1, keepdim=True) > 0).view(nB, nB).transpose(0, 1)
# negatives_outside: smallest D_an where D_an > D_ap
temp1 = (dist_tile - dist_tile.max(dim=1, keepdim=True)[0]) * (mask.float())
negtives_outside = temp1.min(dim=1, keepdim=True)[0] + dist_tile.max(dim=1, keepdim=True)[0]
negtives_outside = negtives_outside.view(nB, nB).transpose(0, 1)
# negatives_inside: largest D_an
temp2 = (dist - dist.min(dim=1, keepdim=True)[0]) * (adjacency_not.float())
negtives_inside = temp2.max(dim=1, keepdim=True)[0] + dist.min(dim=1, keepdim=True)[0]
negtives_inside = negtives_inside.repeat(1, nB)
semi_hard_negtives = torch.where(mask_final, negtives_outside, negtives_inside)
loss_mat = self.margin + dist - semi_hard_negtives
mask_positives = adjacency.float() - torch.eye(nB).cuda()
mask_positives = mask_positives.detach()
num_positives = torch.sum(mask_positives)
triplet_loss = torch.sum(
torch.max(torch.tensor(0, dtype=torch.float).cuda(), loss_mat * mask_positives)) / num_positives
final_loss = triplet_loss * 1.0
with torch.no_grad():
assert normalized == True
cos_theta = torch.mm(inputs, inputs.t())
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
avg_ap = cos_theta[(mask.float() - torch.eye(nB).cuda()) == 1].mean()
avg_an = cos_theta[mask.float() == 0].mean()
return final_loss, avg_ap, avg_an
def cross_entropy(logits, target, size_average=True):
if size_average:
return torch.mean(torch.sum(- target * F.log_softmax(logits, -1), -1))
else:
return torch.sum(torch.sum(- target * F.log_softmax(logits, -1), -1))
class NpairLoss(Module):
def __init__(self):
super(NpairLoss, self).__init__()
def forward(self, inputs, targets, normalized=False):
nB = inputs.size(0)
norm_temp = inputs.norm(p=2, dim=1, keepdim=True)
inputs_n = inputs.div(norm_temp.expand_as(inputs))
mm_logits = torch.mm(inputs_n, inputs_n.t()).detach()
mask = targets.expand(nB, nB).eq(targets.expand(nB, nB).t())
cos_ap = mm_logits[(mask.float() - torch.eye(nB).float().cuda()) == 1].view(nB, -1)
cos_an = mm_logits[mask != 1].view(nB, -1)
avg_ap = torch.mean(cos_ap)
avg_an = torch.mean(cos_an)
if normalized:
inputs = inputs.div(norm_temp.expand_as(inputs))
inputs = inputs * 5.0
labels = targets.view(-1).cpu().numpy()
pids = np.unique(labels)
anchor_idx = []
positive_idx = []
for i in pids:
ap_idx = np.where(labels == i)[0]
anchor_idx.append(ap_idx[0])
positive_idx.append(ap_idx[1])
anchor = inputs[anchor_idx, :]
positive = inputs[positive_idx, :]
batch_size = anchor.size(0)
target = torch.from_numpy(pids).cuda()
target = target.view(target.size(0), 1)
target = (target == torch.transpose(target, 0, 1)).float()
target = target / torch.sum(target, dim=1, keepdim=True).float()
logit = torch.matmul(anchor, torch.transpose(positive, 0, 1))
loss_ce = cross_entropy(logit, target)
loss = loss_ce * 1.0
return loss, avg_ap, avg_an
class MultiSimilarityLoss(Module):
def __init__(self):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.margin = 0.1
self.scale_pos = 2.0
self.scale_neg = 40.0
def forward(self, feats, labels):
norm = feats.norm(dim=1, p=2, keepdim=True)
feats = feats.div(norm.expand_as(feats))
labels = labels.view(-1)
assert feats.size(0) == labels.size(0), \
f"feats.size(0): {feats.size(0)} is not equal to labels.size(0): {labels.size(0)}"
batch_size = feats.size(0)
sim_mat = torch.matmul(feats, torch.t(feats))
epsilon = 1e-5
loss = list()
avg_aps = list()
avg_ans = list()
for i in range(batch_size):
pos_pair_ = sim_mat[i][labels == labels[i]]
pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]
neg_pair_ = sim_mat[i][labels != labels[i]]
if len(neg_pair_) < 1 or len(pos_pair_) < 1:
continue
avg_aps.append(pos_pair_.mean())
avg_ans.append(neg_pair_.mean())
neg_pair = neg_pair_[neg_pair_ + self.margin > torch.min(pos_pair_)]
pos_pair = pos_pair_[pos_pair_ - self.margin < torch.max(neg_pair_)]
if len(neg_pair) < 1 or len(pos_pair) < 1:
continue
# weighting step
pos_loss = 1.0 / self.scale_pos * torch.log(
1 + torch.sum(torch.exp(-self.scale_pos * (pos_pair - self.thresh))))
neg_loss = 1.0 / self.scale_neg * torch.log(
1 + torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
loss.append(pos_loss + neg_loss)
if len(loss) == 0:
print('with ms loss = 0 !')
loss = torch.zeros([], requires_grad=True).cuda()
else:
loss = sum(loss) / batch_size
loss = loss.view(-1)
avg_ap = sum(avg_aps) / batch_size
avg_an = sum(avg_ans) / batch_size
return loss, avg_ap, avg_an
|
[
"torch.mean",
"torch.t",
"torch.from_numpy",
"torch.eye",
"torch.where",
"torch.exp",
"numpy.where",
"torch.max",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.pow",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"torch.sum",
"torch.min",
"numpy.unique",
"torch.transpose"
] |
[((564, 601), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (576, 601), False, 'import torch\n'), ((2262, 2299), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (2274, 2299), False, 'import torch\n'), ((3520, 3578), 'torch.where', 'torch.where', (['mask_final', 'negtives_outside', 'negtives_inside'], {}), '(mask_final, negtives_outside, negtives_inside)\n', (3531, 3578), False, 'import torch\n'), ((3779, 3804), 'torch.sum', 'torch.sum', (['mask_positives'], {}), '(mask_positives)\n', (3788, 3804), False, 'import torch\n'), ((5208, 5226), 'torch.mean', 'torch.mean', (['cos_ap'], {}), '(cos_ap)\n', (5218, 5226), False, 'import torch\n'), ((5244, 5262), 'torch.mean', 'torch.mean', (['cos_an'], {}), '(cos_an)\n', (5254, 5262), False, 'import torch\n'), ((5446, 5463), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (5455, 5463), True, 'import numpy as np\n'), ((1502, 1517), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1515, 1517), False, 'import torch\n'), ((4011, 4026), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4024, 4026), False, 'import torch\n'), ((6063, 6094), 'torch.transpose', 'torch.transpose', (['positive', '(0)', '(1)'], {}), '(positive, 0, 1)\n', (6078, 6094), False, 'import torch\n'), ((6828, 6842), 'torch.t', 'torch.t', (['feats'], {}), '(feats)\n', (6835, 6842), False, 'import torch\n'), ((5559, 5580), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (5567, 5580), True, 'import numpy as np\n'), ((5806, 5828), 'torch.from_numpy', 'torch.from_numpy', (['pids'], {}), '(pids)\n', (5822, 5828), False, 'import torch\n'), ((3685, 3698), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (3694, 3698), False, 'import torch\n'), ((4497, 4522), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (4510, 4522), True, 'import torch.nn.functional as F\n'), ((4585, 4610), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (4598, 4610), True, 'import torch.nn.functional as F\n'), ((5913, 5942), 'torch.transpose', 'torch.transpose', (['target', '(0)', '(1)'], {}), '(target, 0, 1)\n', (5928, 5942), False, 'import torch\n'), ((5978, 6016), 'torch.sum', 'torch.sum', (['target'], {'dim': '(1)', 'keepdim': '(True)'}), '(target, dim=1, keepdim=True)\n', (5987, 6016), False, 'import torch\n'), ((7383, 7403), 'torch.min', 'torch.min', (['pos_pair_'], {}), '(pos_pair_)\n', (7392, 7403), False, 'import torch\n'), ((7464, 7484), 'torch.max', 'torch.max', (['neg_pair_'], {}), '(neg_pair_)\n', (7473, 7484), False, 'import torch\n'), ((8014, 8049), 'torch.zeros', 'torch.zeros', (['[]'], {'requires_grad': '(True)'}), '([], requires_grad=True)\n', (8025, 8049), False, 'import torch\n'), ((618, 638), 'torch.pow', 'torch.pow', (['inputs', '(2)'], {}), '(inputs, 2)\n', (627, 638), False, 'import torch\n'), ((2316, 2336), 'torch.pow', 'torch.pow', (['inputs', '(2)'], {}), '(inputs, 2)\n', (2325, 2336), False, 'import torch\n'), ((946, 959), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (955, 959), False, 'import torch\n'), ((1360, 1394), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.float'}), '(0, dtype=torch.float)\n', (1372, 1394), False, 'import torch\n'), ((3862, 3896), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.float'}), '(0, dtype=torch.float)\n', (3874, 3896), False, 'import torch\n'), ((7684, 7737), 'torch.exp', 'torch.exp', (['(-self.scale_pos * (pos_pair - self.thresh))'], {}), '(-self.scale_pos * (pos_pair - self.thresh))\n', (7693, 7737), False, 'import torch\n'), ((7827, 7879), 'torch.exp', 'torch.exp', (['(self.scale_neg * (neg_pair - self.thresh))'], {}), '(self.scale_neg * (neg_pair - self.thresh))\n', (7836, 7879), False, 'import torch\n'), ((1730, 1743), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (1739, 1743), False, 'import torch\n'), ((4239, 4252), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (4248, 4252), False, 'import torch\n'), ((5090, 5103), 'torch.eye', 'torch.eye', (['nB'], {}), '(nB)\n', (5099, 5103), False, 'import torch\n')]
|
"""
GraphQL + Relay interface to Rough Trade Calendar data.
"""
import django_filters
import graphene
import graphene.relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from rough_trade_calendar import models
class CountConnection(graphene.Connection):
"""A connection which supports Relay's totalCount field."""
total_count = graphene.Int()
def resolve_total_count(self, *args): # pylint: disable=unused-argument
return self.length # pylint: disable=no-member
class Meta:
abstract = True
class EventFilterSet(django_filters.FilterSet):
"""Filter and order events by start_at."""
start_after = django_filters.DateTimeFilter("start_at", "gt")
start_before = django_filters.DateTimeFilter("start_at", "lt")
order_by = django_filters.OrderingFilter(fields={"start_at": "startAt"})
class Meta:
model = models.Event
fields = ["start_after", "start_before"]
class Event(DjangoObjectType):
"""An event."""
class Meta:
model = models.Event
fields = [
"id",
"name",
"description",
"url",
"image_url",
"start_at",
"location",
]
filterset_class = EventFilterSet
interfaces = [graphene.relay.Node]
connection_class = CountConnection
class Location(DjangoObjectType):
"""A location."""
class Meta:
model = models.Location
fields = ["id", "name", "timezone", "events"]
interfaces = [graphene.relay.Node]
connection_class = CountConnection
filter_fields = {"name": ["exact", "contains"]}
class Query(graphene.ObjectType):
all_locations = DjangoFilterConnectionField(Location, description="All locations.")
schema = graphene.Schema(query=Query)
|
[
"django_filters.OrderingFilter",
"graphene_django.filter.DjangoFilterConnectionField",
"django_filters.DateTimeFilter",
"graphene.Int",
"graphene.Schema"
] |
[((1846, 1874), 'graphene.Schema', 'graphene.Schema', ([], {'query': 'Query'}), '(query=Query)\n', (1861, 1874), False, 'import graphene\n'), ((403, 417), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (415, 417), False, 'import graphene\n'), ((709, 756), 'django_filters.DateTimeFilter', 'django_filters.DateTimeFilter', (['"""start_at"""', '"""gt"""'], {}), "('start_at', 'gt')\n", (738, 756), False, 'import django_filters\n'), ((776, 823), 'django_filters.DateTimeFilter', 'django_filters.DateTimeFilter', (['"""start_at"""', '"""lt"""'], {}), "('start_at', 'lt')\n", (805, 823), False, 'import django_filters\n'), ((839, 900), 'django_filters.OrderingFilter', 'django_filters.OrderingFilter', ([], {'fields': "{'start_at': 'startAt'}"}), "(fields={'start_at': 'startAt'})\n", (868, 900), False, 'import django_filters\n'), ((1767, 1834), 'graphene_django.filter.DjangoFilterConnectionField', 'DjangoFilterConnectionField', (['Location'], {'description': '"""All locations."""'}), "(Location, description='All locations.')\n", (1794, 1834), False, 'from graphene_django.filter import DjangoFilterConnectionField\n')]
|
#!/usr/bin/env python
import rospy
import numpy
from duckietown_msgs.msg import FSMState, AprilTags, BoolStamped
from std_msgs.msg import String, Int16 #Imports msg
class SRTurnsNode(object):
def __init__(self):
# Save the name of the node
self.node_name = rospy.get_name()
self.turn_type = -1
rospy.loginfo("[%s] Initialzing." %(self.node_name))
# Setup publishers
self.pub_turn_type = rospy.Publisher("~turn_type",Int16, queue_size=1, latch=True)
# Setup subscribers
self.sub_topic_mode = rospy.Subscriber("~mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialzed." %(self.node_name))
self.rate = rospy.Rate(30) # 10hz
def cbMode(self, mode_msg):
#print mode_msg
self.fsm_mode = mode_msg.state
if(self.fsm_mode == "INTERSECTION_CONTROL"):
# return only straight and right turn
availableTurns = [1,2]
#now randomly choose a possible direction
if(len(availableTurns)>0):
randomIndex = numpy.random.randint(len(availableTurns))
chosenTurn = availableTurns[randomIndex]
self.turn_type = chosenTurn
self.pub_turn_type.publish(self.turn_type)
rospy.loginfo("[%s] possible turns %s." %(self.node_name,availableTurns))
rospy.loginfo("[%s] Turn type now: %i" %(self.node_name,self.turn_type))
else:
self.turn_type = -1
self.pub_turn_type.publish(self.turn_type)
rospy.loginfo("[%s] Turn type: %i" %(self.node_name, self.turn_type))
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
if __name__ == '__main__':
# Initialize the node with rospy
rospy.init_node('sr_turns_node', anonymous=False)
# Create the NodeName object
node = SRTurnsNode()
# Setup proper shutdown behavior
rospy.on_shutdown(node.on_shutdown)
# Keep it spinning to keep the node alive
rospy.spin()
|
[
"rospy.Subscriber",
"rospy.Publisher",
"rospy.Rate",
"rospy.loginfo",
"rospy.on_shutdown",
"rospy.init_node",
"rospy.get_name",
"rospy.spin"
] |
[((1818, 1867), 'rospy.init_node', 'rospy.init_node', (['"""sr_turns_node"""'], {'anonymous': '(False)'}), "('sr_turns_node', anonymous=False)\n", (1833, 1867), False, 'import rospy\n'), ((1970, 2005), 'rospy.on_shutdown', 'rospy.on_shutdown', (['node.on_shutdown'], {}), '(node.on_shutdown)\n', (1987, 2005), False, 'import rospy\n'), ((2056, 2068), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2066, 2068), False, 'import rospy\n'), ((278, 294), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (292, 294), False, 'import rospy\n'), ((332, 383), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Initialzing.' % self.node_name)"], {}), "('[%s] Initialzing.' % self.node_name)\n", (345, 383), False, 'import rospy\n'), ((442, 504), 'rospy.Publisher', 'rospy.Publisher', (['"""~turn_type"""', 'Int16'], {'queue_size': '(1)', 'latch': '(True)'}), "('~turn_type', Int16, queue_size=1, latch=True)\n", (457, 504), False, 'import rospy\n'), ((563, 625), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~mode"""', 'FSMState', 'self.cbMode'], {'queue_size': '(1)'}), "('~mode', FSMState, self.cbMode, queue_size=1)\n", (579, 625), False, 'import rospy\n'), ((642, 692), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Initialzed.' % self.node_name)"], {}), "('[%s] Initialzed.' % self.node_name)\n", (655, 692), False, 'import rospy\n'), ((715, 729), 'rospy.Rate', 'rospy.Rate', (['(30)'], {}), '(30)\n', (725, 729), False, 'import rospy\n'), ((1694, 1747), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Shutting down.' % self.node_name)"], {}), "('[%s] Shutting down.' % self.node_name)\n", (1707, 1747), False, 'import rospy\n'), ((1588, 1658), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Turn type: %i' % (self.node_name, self.turn_type))"], {}), "('[%s] Turn type: %i' % (self.node_name, self.turn_type))\n", (1601, 1658), False, 'import rospy\n'), ((1312, 1387), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] possible turns %s.' % (self.node_name, availableTurns))"], {}), "('[%s] possible turns %s.' % (self.node_name, availableTurns))\n", (1325, 1387), False, 'import rospy\n'), ((1402, 1476), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Turn type now: %i' % (self.node_name, self.turn_type))"], {}), "('[%s] Turn type now: %i' % (self.node_name, self.turn_type))\n", (1415, 1476), False, 'import rospy\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.relaxation.py
#
# Copyright (C) 2012-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes NMR relaxation and related data
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
################################## FUNCTIONS ##################################
def spawn(function):
def run_function(queue_in, queue_out):
while True:
i, argument = queue_in.get()
if i is None:
break # 'None' signals that queue is empty
queue_out.put((i, function(argument)))
return run_function
def multiprocess_map(function, arguments, n_processes=1):
"""
Runs a *function* with *arguments* using *n_processes* Meant
as a replacement for multiproccessing.Pool.imap_unordered,
which can only accept module-level functions.
**Arguments:**
:*function*: Function to run
:*arguments*: Iterable of arguments to pass to function
:*n_processes: Number of processes to use
**Returns:**
:*results*: List of results returned from *function*
.. todo:
- Does this work, or can it be made to smoothly work, with more
complex arguments?
- Accept multiple functions, in addition to arguments
- Additional improvements likely possible
"""
from multiprocessing import Queue, Process
# Initialize queues
queue_in = Queue(1)
queue_out = Queue()
# Initialize processes and link to input and output queues
processes = [Process(target=spawn(function), args=(queue_in, queue_out))
for i in range(n_processes)]
for p in processes:
p.daemon = True
p.start()
# Construct input queue, including 'None' signals to terminate
input = [queue_in.put((i, argument)) for i, argument in
enumerate(arguments)]
for i in range(n_processes):
queue_in.put((None, None))
# Retrieve output queue
output = [queue_out.get() for i in range(len(input))]
# Rejoin processes and return results
for p in processes:
p.join()
return [x for i, x in sorted(output)]
def process_ired(infiles, outfile, indexfile=None, **kwargs):
"""
"""
from os import devnull
import re
from subprocess import Popen, PIPE
import pandas as pd
import numpy as np
r1r2noe_datasets = []
s2_datasets = []
# Load data
for i, infile in enumerate(infiles):
with open(devnull, "w") as fnull:
fields = Popen("head -n 1 {0}".format(infile), stdout=PIPE,
stderr=fnull, shell=True).stdout.read().strip()
re_t1t2noe = re.compile(
"^#Vec\s+[\w_]+\[T1\]\s+[\w_]+\[T2\]\s+[\w_]+\[NOE\]$")
re_s2 = re.compile("^#Vec\s+[\w_]+\[S2\]$")
if re.match(re_t1t2noe, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0,
names=["r1", "r2", "noe"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
raw_data["r1"] = 1 / raw_data["r1"]
raw_data["r2"] = 1 / raw_data["r2"]
r1r2noe_datasets.append(raw_data)
elif re.match(re_s2, fields):
raw_data = np.loadtxt(infile, dtype=np.float32)
read_csv_kw = kwargs.get("read_csv_kw",
dict(delim_whitespace=True, header=0, index_col=0, names=["s2"]))
raw_data = pd.read_csv(infile, **read_csv_kw)
s2_datasets.append(raw_data)
else:
raise Exception()
if indexfile is not None:
residue = np.loadtxt(indexfile, dtype=np.str).flatten()
# Process data
items = []
fmt = []
if indexfile is not None:
items.append(("residue", residue))
fmt.append("%12s")
else:
fmt.append("%12d")
if len(r1r2noe_datasets) >= 2:
r1r2noe_mean = pd.concat(r1r2noe_datasets).groupby(level=0).mean()
r1r2noe_std = pd.concat(r1r2noe_datasets).groupby(level=0).std()
items.extend([("r1", r1r2noe_mean["r1"]), ("r1 se", r1r2noe_std["r1"]),
("r2", r1r2noe_mean["r2"]), ("r2 se", r1r2noe_std["r2"]),
("noe", r1r2noe_mean["noe"]), ("noe se", r1r2noe_std["noe"])])
fmt.extend(
["%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f", "%11.5f"])
elif len(r1r2noe_datasets) == 1:
r1r2noe_mean = r1r2noe_datasets[0]
items.extend([("r1", r1r2noe_mean["r1"]), ("r2", r1r2noe_mean["r2"]),
("noe", r1r2noe_mean["noe"])])
fmt.extend(["%11.5f", "%11.5f", "%11.5f"])
if len(s2_datasets) >= 2:
s2_mean = pd.concat(s2_datasets).groupby(level=0).mean()
s2_std = pd.concat(s2_datasets).groupby(level=0).std()
items.extend([("s2", s2_mean["s2"]), ("s2 se", s2_std["s2"])])
fmt.extend(["%11.5f", "%11.5f"])
elif len(s2_datasets) == 1:
s2_mean = s2_datasets[0]
items.extend([("s2", s2_mean["s2"])])
fmt.extend(["%11.5f"])
data = pd.DataFrame.from_items(items)
if indexfile is not None:
data.set_index("residue", inplace=True)
else:
data.index.name = "vector"
columns = [data.index.name] + list(data.columns.values)
header = "{0:<10s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
np.savetxt(outfile, np.column_stack((data.index.values, data.values)),
fmt=fmt, header=header, comments='#')
def process_error(sim_infiles, exp_infiles, outfile, **kwargs):
"""
"""
import pandas as pd
import numpy as np
if len(sim_infiles) != len(exp_infiles):
raise ValueError("""Number of simulation input files must
match number of experimental input files, as they are treated
pairwise. {0} simulation input file(s) and {1} experiment input
file(s) provided.""".format(len(sim_infiles), len(exp_infiles)))
# Work through each pair of infiles
errs = []
final_index = None
for sim_infile, exp_infile in zip(sim_infiles, exp_infiles):
print("Comparing simulation infile '{0}' ".format(
sim_infile) + "with experimental infile '{0}':".format(exp_infile))
# Load infiles and select shared indexes and columns
sim = pd.read_csv(sim_infile, delim_whitespace=True, index_col=0)
exp = pd.read_csv(exp_infile, delim_whitespace=True, index_col=0)
overlap = sim.index.intersection(exp.index)
if final_index is None:
final_index = exp.index
final_index = final_index.union(overlap)
sim = sim.loc[overlap]
exp = exp.loc[overlap]
err_cols = [c for c in sim.columns.values if
not c.endswith(" se") and c in exp.columns.values]
err_se_cols = [c + " se" for c in err_cols if
c + " se" in sim.columns.values and c + " se" in
exp.columns.values]
print(" Files share fields {0} and {1} for {2} residues".format(
str(map(str, err_cols)).replace("'", ""),
str(map(str, err_se_cols)).replace("'", ""), len(overlap)))
# Calculate error of available fields
err = pd.DataFrame(0, index=overlap,
columns=[x for t in zip(err_cols, err_se_cols) for x in t])
err[err_cols] = (
np.abs(exp[err_cols] - sim[err_cols]) / np.abs(exp[err_cols]))
# Calculate uncertainty of error of available fields
if len(err_se_cols) != 0:
err[err_se_cols] = 0
# //@formatter:off
err[err_se_cols] = np.sqrt(
(err[err_cols].values) ** 2 *
((np.sqrt(exp[err_se_cols].values ** 2 +
sim[err_se_cols].values ** 2) /
(exp[err_cols].values - sim[err_cols].values)) ** 2 +
(exp[err_se_cols].values / exp[ err_cols].values) ** 2))
# //@formatter:on
errs.append(err)
# Determine final columns and indexes
final_cols = []
final_index = sorted(final_index, key=lambda x: int(x.split(":")[1]))
for err in errs:
for col in err.columns.values:
if not col in final_cols:
final_cols.append(col)
# Sum the columns
final = pd.DataFrame(0.0, index=final_index, columns=final_cols)
counts = pd.DataFrame(0, index=final_index, columns=final_cols)
for err in errs:
for col in err.columns.values:
if not col.endswith(" se"):
final[col].loc[err.index] += err[col].loc[err.index]
else:
final[col].loc[err.index] += err[col].loc[err.index] ** 2
counts[col].loc[err.index] += 1
# Average the columns
print("Averaging fields:")
for col in final_cols:
if not col.endswith(" se"):
print(" Averaging field '{0}'".format(col))
final[col] /= counts[col]
else:
print(" Progagating uncertainty for field '{0}'".format(col))
final[col] = np.sqrt(final[col]) / counts[col]
# Write outfile
print(
"Writing outfile '{0}' with fields ".format(outfile) + "{0} for ".format(
str(map(str, final_cols)).replace("'", "")) + "{0} residues".format(
len(final_index)))
header = "residue "
for col in final_cols:
header += "{0:>12s}".format(col)
fmt = ["%12s"] + ["%11.5f"] * len(final_cols)
np.savetxt(outfile, np.column_stack((final.index.values, final.values)),
fmt=fmt, header=header, comments='#')
def process_relax(relax_type, peaklist, infiles, delays, error_method,
n_synth_datasets, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(delays) != len(infiles):
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
for infile, delay in zip(infiles, delays):
if verbose >= 1:
print("Loading intensities from '{0}'".format(infile))
parameters, intensity = nmrglue.pipe.read(infile)
hydrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity,
dim=0).ppm_scale()
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
relax["{0} ms".format(delay)] = relax.apply(calc_intensity, axis=1)
# Calculate relaxation rates
delays = np.array(delays, np.float64) / 1000
def calc_relax(peak, **kwargs):
if verbose >= 1:
print("Calculating relaxation for {0}".format(peak.name))
def model_function(delay, intensity, relaxation):
return intensity * np.exp(-1 * delay * relaxation)
I = np.array(peak.filter(regex=(".*ms")).values, np.float64)
I0, R = curve_fit(model_function, delays, I, p0=(I[0], 1.0))[0]
# Calculate error
if error_method == "rmse":
error = np.sqrt(np.mean((I - model_function(delays, I0, R)) ** 2))
elif error_method == "mae":
error = np.mean(np.sqrt((I - model_function(delays, I0, R)) ** 2))
# Construct synthetic relaxation profiles
synth_datasets = np.zeros((n_synth_datasets, I.size))
for i, I_mean in enumerate(model_function(delays, I0, R)):
synth_datasets[:, i] = np.random.normal(I_mean, error,
n_synth_datasets)
def synth_fit_decay(synth_intensity):
try:
synth_I0, synth_R = \
curve_fit(model_function, delays, synth_intensity,
p0=(I0, R))[0]
return synth_R
except RuntimeError:
if verbose >= 1:
print("Unable to calculate standard error for {0}".format(
peak.name))
return np.nan
# Calculate standard error
synth_Rs = multiprocess_map(synth_fit_decay, synth_datasets, 16)
R_se = np.std(synth_Rs)
return pd.Series([I0, R, R_se])
# Calculate relaxation rates and standard errors
fit = relax.apply(calc_relax, axis=1)
fit.columns = ["I0", relax_type, relax_type + " se"]
relax = relax.join(fit)
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * len(delays) + ["%11d",
"%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_hetnoe(peaklist, infiles, outfile, verbose=1, debug=0, **kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import nmrglue
import numpy as np
import pandas as pd
# Process arguments
processed_infiles = []
for infile in infiles:
processed_infiles += glob(expandvars(infile))
infiles = processed_infiles
if len(infiles) != 2:
raise ()
peaklist = expandvars(peaklist)
outfile = expandvars(outfile)
# Load peaklist
if verbose >= 1:
print("Loading peaklist from '{0}'".format(peaklist))
def convert_name(name):
return "{0}:{1}".format(name[-4:-1].upper(), name[2:-4])
relax = pd.read_csv(peaklist, sep="\t", usecols=[2, 3, 4], index_col=2,
converters={4: convert_name}, names=["1H", "15N", "residue"], skiprows=1)
# Load peak intensities from spectra
def calc_intensity(peak, **kwargs):
H_index = np.argmin((hydrogen - peak["1H"]) ** 2)
N_index = np.argmin((nitrogen - peak["15N"]) ** 2)
return intensity[N_index, H_index]
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[0]))
parameters, intensity = nmrglue.pipe.read(infiles[0])
hydrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=1).ppm_scale()
nitrogen = nmrglue.pipe.make_uc(parameters, intensity, dim=0).ppm_scale()
hydrogen += 0.0612858
nitrogen += 0.08399
relax["sat"] = relax.apply(calc_intensity, axis=1)
sat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(sat_se)
sat_se = 54588.8
print(sat_se)
if verbose >= 1:
print("Loading intensities from '{0}'".format(infiles[1]))
parameters, intensity = nmrglue.pipe.read(infiles[1])
relax["nosat"] = relax.apply(calc_intensity, axis=1)
nosat_se = intensity[np.logical_and(intensity > -intensity.std(),
intensity < intensity.std())].std()
print(nosat_se)
nosat_se = 58479.8
print(nosat_se)
relax["noe"] = relax["sat"] / relax["nosat"]
relax["noe se"] = np.sqrt(
(sat_se / relax["sat"]) ** 2 + (nosat_se / relax["nosat"]) ** 2) * relax[
"noe"]
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
fmt = ["%12s", "%11.4f", "%11.4f"] + ["%11d"] * 2 + ["%11.4f", "%11.4f"]
np.savetxt(outfile, np.column_stack((relax.index.values, relax.values)),
fmt=fmt, header=header, comments='#')
def process_pre(dia_infile, para_infile, outfile, verbose=1, debug=0,
**kwargs):
"""
"""
from glob import glob
from os.path import expandvars
import numpy as np
import pandas as pd
# Process arguments
dia_infile = glob(expandvars(dia_infile))[0]
para_infile = glob(expandvars(para_infile))[0]
if verbose >= 1:
print(
"Loading diamagnetic relaxation rates from '{0}'".format(dia_infile))
dia_relax = pd.read_csv(dia_infile, index_col=0, delimiter=r"\s\s+")
dia_relax.index.name = "residue"
dia_relax.rename(
columns={"I0": "dia I0", "I0 se": "dia I0 se", "r2": "dia r2",
"r2 se": "dia r2 se", }, inplace=True)
if verbose >= 1:
print("Loading paramagnetic relaxation rates from '{0}'".format(
para_infile))
para_relax = pd.read_csv(para_infile, index_col=0, delimiter=r"\s\s+")
para_relax.index.name = "residue"
para_relax.rename(
columns={"I0": "para I0", "I0 se": "para I0 se", "r2": "para r2",
"r2 se": "para r2 se", }, inplace=True)
relax = dia_relax[
["1H", "15N", "dia I0", "dia I0 se", "dia r2", "dia r2 se"]]
relax = pd.concat(
(relax, para_relax[["para I0", "para I0 se", "para r2", "para r2 se"]]),
axis=1)
# //@formatter:off
relax["I/I0"] = relax["para I0"] / relax["dia I0"]
relax["I/I0 se"] = np.sqrt(relax["I/I0"] ** 2 * \
((relax["para I0 se"] / relax["para I0"]) ** 2 + \
(relax["dia I0 se"] / relax["dia I0"]) ** 2))
relax["r20/r2"] = relax["dia r2"] / relax["para r2"]
relax["r20/r2 se"] = np.sqrt(relax["r20/r2"] ** 2 * \
((relax["dia r2 se"] / relax["dia r2"]) ** 2 + \
(relax["para r2 se"] / relax["para r2"]) ** 2))
relax["rho2"] = relax["para r2"] - relax["dia r2"]
relax["rho2 se"] = np.sqrt(
relax["para r2 se"] ** 2 + relax["dia r2 se"] ** 2)
# //@formatter:on
# Write outfile
if verbose >= 1:
print("Writing outfile '{0}'".format(outfile))
columns = [relax.index.name] + list(relax.columns.values)
header = "{0:<11s}".format(columns.pop(0))
for column in columns:
header += "{0:>12s}".format(column)
with open(outfile, "w") as out:
relax["dia I0"][np.isnan(relax["dia I0"])] = 0
relax["dia I0 se"][np.isnan(relax["dia I0 se"])] = 0
relax["para I0"][np.isnan(relax["para I0"])] = 0
relax["para I0 se"][np.isnan(relax["para I0 se"])] = 0
out.write("#" + header + "\n")
for residue in relax.index:
# This is an abonomination. Why is this the least painfil way to
# write a decent text file.
row = relax.loc[residue]
out.write("{0:12s} {1:11.2f} {2:11.1f} {3:11d} {4:11d} "
"{5:11.2f} {6:11.2f} {7:11d} {8:11d} {9:11.2f} "
"{10:11.2f} {11:11.3f} {12:11.3f} {13:11.3f} "
"{14:11.3f} {15:11.2f} {16:11.2f}\n".format(residue,
row["1H"], row["15N"], int(row["dia I0"]), int(row["dia I0 se"]),
row["dia r2"], row["dia r2 se"], int(row["para I0"]),
int(row["para I0 se"]), row["para r2"], row["para r2 se"],
row["I/I0"], row["I/I0 se"], row["r20/r2"], row["r20/r2 se"],
row["rho2"], row["rho2 se"]))
#################################### MAIN #####################################
if __name__ == "__main__":
import argparse
# Prepare argument parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
subparsers = parser.add_subparsers(dest="mode", description="")
# Prepare iRED subparser
ired_subparser = subparsers.add_parser(name="ired",
help="Process iRED data")
ired_subparser.set_defaults(function=process_ired)
input_group = ired_subparser.add_argument_group("input")
action_group = ired_subparser.add_argument_group("action")
output_group = ired_subparser.add_argument_group("output")
input_group.add_argument("-infile", required=True, dest="infiles",
nargs="+", type=str, help="""cpptraj output file(s) from
which to load datasets; may be plain text or compressed""")
input_group.add_argument("-indexfile", required=False, type=str,
help="""Text file from which to load residue names; if
omitted will be taken from columns of first infile""")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare error subparser
error_subparser = subparsers.add_parser(name="error", help="""Calculates
error of simulated relaxation relative to experiment""",
description="""Calculates error of simulated relaxation relative to
experiment. The intended use case is to break down errors relative to
experimental data collected at multiple magnetic fields or by multiple
groups, error(residue, measurement, magnet/group), into a form that
is easier to visualize and communicate, error(residue, measurement).
Reads in a series of input files containing simulated data and a
series of files containing corresponding experimental data. These
files are treated in pairs and the error between all data points
present in both(e.g. row 'GLN:2', column 'r1') calculated. Columns
ending in '_se' are treated as uncertainties, and are propogated into
uncertainties in the resulting errors rather than being averaged.
Take caution when processing datasets uncertainties alongside those
that do (experimental uncertainties are not always reported), as
the resulting uncertainties in the residuals will be incorrect.""")
error_subparser.set_defaults(function=process_error)
input_group = error_subparser.add_argument_group("input")
action_group = error_subparser.add_argument_group("action")
output_group = error_subparser.add_argument_group("output")
input_group.add_argument("-sim_infile", required=True, dest="sim_infiles",
nargs="+", type=str,
help="input file(s) from which to load simulation datasets")
input_group.add_argument("-exp_infile", required=True, dest="exp_infiles",
nargs="+", type=str,
help="input file(s) from which to load experimental datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="Text file to which processed data will be output")
# Prepare relax subparser
relax_subparser = subparsers.add_parser(name="relax",
help="Process experimental R1 or R2 relaxation data")
relax_subparser.set_defaults(function=process_relax)
input_group = relax_subparser.add_argument_group("input")
action_group = relax_subparser.add_argument_group("action")
output_group = relax_subparser.add_argument_group("output")
relax_type = input_group.add_mutually_exclusive_group()
relax_type.add_argument("--r1", action="store_const", const="r1",
default="r1", dest="relax_type", help="process R1 relaxation data")
relax_type.add_argument("--r2", action="store_const", const="r2",
default="r1", dest="relax_type", help="process R2 relaxation data")
relax_type.add_argument("--pre-dia", action="store_const", const="dia",
default="r1", dest="relax_type",
help="process PRE diamagnetic relaxation data")
relax_type.add_argument("--pre-para", action="store_const", const="para",
default="r1", dest="relax_type",
help="process PRE paramagnetic relaxation data")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs="+", type=str,
help="NMR spectra (NMRPipe format)")
input_group.add_argument("-delay", required=True, dest="delays",
metavar="DELAY", nargs="+", type=str,
help="delays (ms); number of delays must match number of infiles")
action_group.add_argument("-synthetics", required=False,
dest="n_synth_datasets", default=100, type=int,
help="number of synthetic datasets to use to calculate error")
error_method = action_group.add_mutually_exclusive_group()
error_method.add_argument("--rmse", action="store_const", const="rmse",
default="rmse", dest="error_method",
help="use root mean square error to generate synthetic datasets")
error_method.add_argument("--mae", action="store_const", const="mae",
default="rmse", dest="error_method",
help="use mean absolute error to generate synthetic datasets")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare hetnoe subparser
hetnoe_subparser = subparsers.add_parser(name="hetnoe",
help="Process experimental heteronuclear NOE relaxation data")
hetnoe_subparser.set_defaults(function=process_hetnoe)
input_group = hetnoe_subparser.add_argument_group("input")
action_group = hetnoe_subparser.add_argument_group("action")
output_group = hetnoe_subparser.add_argument_group("output")
input_group.add_argument("-peaklist", required=True, type=str,
help="peak list (exported from ccpnmr)")
input_group.add_argument("-infile", required=True, dest="infiles",
metavar="INFILE", nargs=2, type=str, help="NMR spectra (NMRPipe format)")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Prepare pre subparser
pre_subparser = subparsers.add_parser(name="pre",
help="Process experimental heteronuclear NOE relaxation data")
pre_subparser.set_defaults(function=process_pre)
input_group = pre_subparser.add_argument_group("input")
action_group = pre_subparser.add_argument_group("action")
output_group = pre_subparser.add_argument_group("output")
input_group.add_argument("-dia", required=True, dest="dia_infile",
metavar="DIA_INFILE", type=str, help="Diamagnetic relaxation rates")
input_group.add_argument("-para", required=True, dest="para_infile",
metavar="PARA_INFILE", type=str, help="Paramagnetic relaxation rates")
output_group.add_argument("-outfile", required=True, type=str,
help="text file to which processed data will be output")
# Verbosity
for p in subparsers.choices.values():
verbosity = p.add_mutually_exclusive_group()
verbosity.add_argument("-v", "--verbose", action="count", default=1,
help="enable verbose output, may be specified more than once")
verbosity.add_argument("-q", "--quiet", action="store_const", const=0,
default=1, dest="verbose", help="disable verbose output")
# Parse arguments and run selected function
kwargs = vars(parser.parse_args())
kwargs.pop("function")(**kwargs)
|
[
"numpy.abs",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.argmin",
"numpy.isnan",
"numpy.exp",
"multiprocessing.Queue",
"numpy.random.normal",
"pandas.DataFrame",
"numpy.std",
"numpy.loadtxt",
"pandas.concat",
"re.match",
"scipy.optimize.curve_fit",
"os.path.expandvars",
"pandas.Series",
"re.compile",
"nmrglue.pipe.read",
"numpy.zeros",
"pandas.DataFrame.from_items",
"numpy.array",
"numpy.column_stack",
"nmrglue.pipe.make_uc",
"numpy.sqrt"
] |
[((1673, 1681), 'multiprocessing.Queue', 'Queue', (['(1)'], {}), '(1)\n', (1678, 1681), False, 'from multiprocessing import Queue, Process\n'), ((1698, 1705), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1703, 1705), False, 'from multiprocessing import Queue, Process\n'), ((5319, 5349), 'pandas.DataFrame.from_items', 'pd.DataFrame.from_items', (['items'], {}), '(items)\n', (5342, 5349), True, 'import pandas as pd\n'), ((8562, 8618), 'pandas.DataFrame', 'pd.DataFrame', (['(0.0)'], {'index': 'final_index', 'columns': 'final_cols'}), '(0.0, index=final_index, columns=final_cols)\n', (8574, 8618), True, 'import pandas as pd\n'), ((8632, 8686), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'final_index', 'columns': 'final_cols'}), '(0, index=final_index, columns=final_cols)\n', (8644, 8686), True, 'import pandas as pd\n'), ((10392, 10412), 'os.path.expandvars', 'expandvars', (['peaklist'], {}), '(peaklist)\n', (10402, 10412), False, 'from os.path import expandvars\n'), ((10427, 10446), 'os.path.expandvars', 'expandvars', (['outfile'], {}), '(outfile)\n', (10437, 10446), False, 'from os.path import expandvars\n'), ((10658, 10802), 'pandas.read_csv', 'pd.read_csv', (['peaklist'], {'sep': '"""\t"""', 'usecols': '[2, 3, 4]', 'index_col': '(2)', 'converters': '{(4): convert_name}', 'names': "['1H', '15N', 'residue']", 'skiprows': '(1)'}), "(peaklist, sep='\\t', usecols=[2, 3, 4], index_col=2, converters=\n {(4): convert_name}, names=['1H', '15N', 'residue'], skiprows=1)\n", (10669, 10802), True, 'import pandas as pd\n'), ((14293, 14313), 'os.path.expandvars', 'expandvars', (['peaklist'], {}), '(peaklist)\n', (14303, 14313), False, 'from os.path import expandvars\n'), ((14328, 14347), 'os.path.expandvars', 'expandvars', (['outfile'], {}), '(outfile)\n', (14338, 14347), False, 'from os.path import expandvars\n'), ((14559, 14703), 'pandas.read_csv', 'pd.read_csv', (['peaklist'], {'sep': '"""\t"""', 'usecols': '[2, 3, 4]', 'index_col': '(2)', 'converters': '{(4): convert_name}', 'names': "['1H', '15N', 'residue']", 'skiprows': '(1)'}), "(peaklist, sep='\\t', usecols=[2, 3, 4], index_col=2, converters=\n {(4): convert_name}, names=['1H', '15N', 'residue'], skiprows=1)\n", (14570, 14703), True, 'import pandas as pd\n'), ((15062, 15091), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infiles[0]'], {}), '(infiles[0])\n', (15079, 15091), False, 'import nmrglue\n'), ((15638, 15667), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infiles[1]'], {}), '(infiles[1])\n', (15655, 15667), False, 'import nmrglue\n'), ((17036, 17093), 'pandas.read_csv', 'pd.read_csv', (['dia_infile'], {'index_col': '(0)', 'delimiter': '"""\\\\s\\\\s+"""'}), "(dia_infile, index_col=0, delimiter='\\\\s\\\\s+')\n", (17047, 17093), True, 'import pandas as pd\n'), ((17406, 17464), 'pandas.read_csv', 'pd.read_csv', (['para_infile'], {'index_col': '(0)', 'delimiter': '"""\\\\s\\\\s+"""'}), "(para_infile, index_col=0, delimiter='\\\\s\\\\s+')\n", (17417, 17464), True, 'import pandas as pd\n'), ((17752, 17846), 'pandas.concat', 'pd.concat', (["(relax, para_relax[['para I0', 'para I0 se', 'para r2', 'para r2 se']])"], {'axis': '(1)'}), "((relax, para_relax[['para I0', 'para I0 se', 'para r2',\n 'para r2 se']]), axis=1)\n", (17761, 17846), True, 'import pandas as pd\n'), ((17958, 18085), 'numpy.sqrt', 'np.sqrt', (["(relax['I/I0'] ** 2 * ((relax['para I0 se'] / relax['para I0']) ** 2 + (\n relax['dia I0 se'] / relax['dia I0']) ** 2))"], {}), "(relax['I/I0'] ** 2 * ((relax['para I0 se'] / relax['para I0']) ** 2 +\n (relax['dia I0 se'] / relax['dia I0']) ** 2))\n", (17965, 18085), True, 'import numpy as np\n'), ((18181, 18310), 'numpy.sqrt', 'np.sqrt', (["(relax['r20/r2'] ** 2 * ((relax['dia r2 se'] / relax['dia r2']) ** 2 + (\n relax['para r2 se'] / relax['para r2']) ** 2))"], {}), "(relax['r20/r2'] ** 2 * ((relax['dia r2 se'] / relax['dia r2']) ** 2 +\n (relax['para r2 se'] / relax['para r2']) ** 2))\n", (18188, 18310), True, 'import numpy as np\n'), ((18402, 18461), 'numpy.sqrt', 'np.sqrt', (["(relax['para r2 se'] ** 2 + relax['dia r2 se'] ** 2)"], {}), "(relax['para r2 se'] ** 2 + relax['dia r2 se'] ** 2)\n", (18409, 18461), True, 'import numpy as np\n'), ((20067, 20163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (20090, 20163), False, 'import argparse\n'), ((2899, 2977), 're.compile', 're.compile', (['"""^#Vec\\\\s+[\\\\w_]+\\\\[T1\\\\]\\\\s+[\\\\w_]+\\\\[T2\\\\]\\\\s+[\\\\w_]+\\\\[NOE\\\\]$"""'], {}), "('^#Vec\\\\s+[\\\\w_]+\\\\[T1\\\\]\\\\s+[\\\\w_]+\\\\[T2\\\\]\\\\s+[\\\\w_]+\\\\[NOE\\\\]$')\n", (2909, 2977), False, 'import re\n'), ((2993, 3032), 're.compile', 're.compile', (['"""^#Vec\\\\s+[\\\\w_]+\\\\[S2\\\\]$"""'], {}), "('^#Vec\\\\s+[\\\\w_]+\\\\[S2\\\\]$')\n", (3003, 3032), False, 'import re\n'), ((3040, 3068), 're.match', 're.match', (['re_t1t2noe', 'fields'], {}), '(re_t1t2noe, fields)\n', (3048, 3068), False, 'import re\n'), ((5676, 5725), 'numpy.column_stack', 'np.column_stack', (['(data.index.values, data.values)'], {}), '((data.index.values, data.values))\n', (5691, 5725), True, 'import numpy as np\n'), ((6583, 6642), 'pandas.read_csv', 'pd.read_csv', (['sim_infile'], {'delim_whitespace': '(True)', 'index_col': '(0)'}), '(sim_infile, delim_whitespace=True, index_col=0)\n', (6594, 6642), True, 'import pandas as pd\n'), ((6657, 6716), 'pandas.read_csv', 'pd.read_csv', (['exp_infile'], {'delim_whitespace': '(True)', 'index_col': '(0)'}), '(exp_infile, delim_whitespace=True, index_col=0)\n', (6668, 6716), True, 'import pandas as pd\n'), ((9745, 9796), 'numpy.column_stack', 'np.column_stack', (['(final.index.values, final.values)'], {}), '((final.index.values, final.values))\n', (9760, 9796), True, 'import numpy as np\n'), ((11015, 11040), 'nmrglue.pipe.read', 'nmrglue.pipe.read', (['infile'], {}), '(infile)\n', (11032, 11040), False, 'import nmrglue\n'), ((11566, 11594), 'numpy.array', 'np.array', (['delays', 'np.float64'], {}), '(delays, np.float64)\n', (11574, 11594), True, 'import numpy as np\n'), ((12330, 12366), 'numpy.zeros', 'np.zeros', (['(n_synth_datasets, I.size)'], {}), '((n_synth_datasets, I.size))\n', (12338, 12366), True, 'import numpy as np\n'), ((13107, 13123), 'numpy.std', 'np.std', (['synth_Rs'], {}), '(synth_Rs)\n', (13113, 13123), True, 'import numpy as np\n'), ((13140, 13164), 'pandas.Series', 'pd.Series', (['[I0, R, R_se]'], {}), '([I0, R, R_se])\n', (13149, 13164), True, 'import pandas as pd\n'), ((13750, 13801), 'numpy.column_stack', 'np.column_stack', (['(relax.index.values, relax.values)'], {}), '((relax.index.values, relax.values))\n', (13765, 13801), True, 'import numpy as np\n'), ((14803, 14842), 'numpy.argmin', 'np.argmin', (["((hydrogen - peak['1H']) ** 2)"], {}), "((hydrogen - peak['1H']) ** 2)\n", (14812, 14842), True, 'import numpy as np\n'), ((14861, 14901), 'numpy.argmin', 'np.argmin', (["((nitrogen - peak['15N']) ** 2)"], {}), "((nitrogen - peak['15N']) ** 2)\n", (14870, 14901), True, 'import numpy as np\n'), ((15972, 16044), 'numpy.sqrt', 'np.sqrt', (["((sat_se / relax['sat']) ** 2 + (nosat_se / relax['nosat']) ** 2)"], {}), "((sat_se / relax['sat']) ** 2 + (nosat_se / relax['nosat']) ** 2)\n", (15979, 16044), True, 'import numpy as np\n'), ((16472, 16523), 'numpy.column_stack', 'np.column_stack', (['(relax.index.values, relax.values)'], {}), '((relax.index.values, relax.values))\n', (16487, 16523), True, 'import numpy as np\n'), ((3093, 3129), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'dtype': 'np.float32'}), '(infile, dtype=np.float32)\n', (3103, 3129), True, 'import numpy as np\n'), ((3314, 3348), 'pandas.read_csv', 'pd.read_csv', (['infile'], {}), '(infile, **read_csv_kw)\n', (3325, 3348), True, 'import pandas as pd\n'), ((3504, 3527), 're.match', 're.match', (['re_s2', 'fields'], {}), '(re_s2, fields)\n', (3512, 3527), False, 'import re\n'), ((7644, 7681), 'numpy.abs', 'np.abs', (['(exp[err_cols] - sim[err_cols])'], {}), '(exp[err_cols] - sim[err_cols])\n', (7650, 7681), True, 'import numpy as np\n'), ((7684, 7705), 'numpy.abs', 'np.abs', (['exp[err_cols]'], {}), '(exp[err_cols])\n', (7690, 7705), True, 'import numpy as np\n'), ((10272, 10290), 'os.path.expandvars', 'expandvars', (['infile'], {}), '(infile)\n', (10282, 10290), False, 'from os.path import expandvars\n'), ((11292, 11331), 'numpy.argmin', 'np.argmin', (["((hydrogen - peak['1H']) ** 2)"], {}), "((hydrogen - peak['1H']) ** 2)\n", (11301, 11331), True, 'import numpy as np\n'), ((11354, 11394), 'numpy.argmin', 'np.argmin', (["((nitrogen - peak['15N']) ** 2)"], {}), "((nitrogen - peak['15N']) ** 2)\n", (11363, 11394), True, 'import numpy as np\n'), ((11942, 11994), 'scipy.optimize.curve_fit', 'curve_fit', (['model_function', 'delays', 'I'], {'p0': '(I[0], 1.0)'}), '(model_function, delays, I, p0=(I[0], 1.0))\n', (11951, 11994), False, 'from scipy.optimize import curve_fit\n'), ((12469, 12518), 'numpy.random.normal', 'np.random.normal', (['I_mean', 'error', 'n_synth_datasets'], {}), '(I_mean, error, n_synth_datasets)\n', (12485, 12518), True, 'import numpy as np\n'), ((14183, 14201), 'os.path.expandvars', 'expandvars', (['infile'], {}), '(infile)\n', (14193, 14201), False, 'from os.path import expandvars\n'), ((15107, 15157), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(1)'}), '(parameters, intensity, dim=1)\n', (15127, 15157), False, 'import nmrglue\n'), ((15185, 15235), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(0)'}), '(parameters, intensity, dim=0)\n', (15205, 15235), False, 'import nmrglue\n'), ((16825, 16847), 'os.path.expandvars', 'expandvars', (['dia_infile'], {}), '(dia_infile)\n', (16835, 16847), False, 'from os.path import expandvars\n'), ((16875, 16898), 'os.path.expandvars', 'expandvars', (['para_infile'], {}), '(para_infile)\n', (16885, 16898), False, 'from os.path import expandvars\n'), ((18828, 18853), 'numpy.isnan', 'np.isnan', (["relax['dia I0']"], {}), "(relax['dia I0'])\n", (18836, 18853), True, 'import numpy as np\n'), ((18886, 18914), 'numpy.isnan', 'np.isnan', (["relax['dia I0 se']"], {}), "(relax['dia I0 se'])\n", (18894, 18914), True, 'import numpy as np\n'), ((18945, 18971), 'numpy.isnan', 'np.isnan', (["relax['para I0']"], {}), "(relax['para I0'])\n", (18953, 18971), True, 'import numpy as np\n'), ((19005, 19034), 'numpy.isnan', 'np.isnan', (["relax['para I0 se']"], {}), "(relax['para I0 se'])\n", (19013, 19034), True, 'import numpy as np\n'), ((3552, 3588), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'dtype': 'np.float32'}), '(infile, dtype=np.float32)\n', (3562, 3588), True, 'import numpy as np\n'), ((3744, 3778), 'pandas.read_csv', 'pd.read_csv', (['infile'], {}), '(infile, **read_csv_kw)\n', (3755, 3778), True, 'import pandas as pd\n'), ((3912, 3947), 'numpy.loadtxt', 'np.loadtxt', (['indexfile'], {'dtype': 'np.str'}), '(indexfile, dtype=np.str)\n', (3922, 3947), True, 'import numpy as np\n'), ((9326, 9345), 'numpy.sqrt', 'np.sqrt', (['final[col]'], {}), '(final[col])\n', (9333, 9345), True, 'import numpy as np\n'), ((11060, 11110), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(1)'}), '(parameters, intensity, dim=1)\n', (11080, 11110), False, 'import nmrglue\n'), ((11152, 11202), 'nmrglue.pipe.make_uc', 'nmrglue.pipe.make_uc', (['parameters', 'intensity'], {'dim': '(0)'}), '(parameters, intensity, dim=0)\n', (11172, 11202), False, 'import nmrglue\n'), ((11824, 11855), 'numpy.exp', 'np.exp', (['(-1 * delay * relaxation)'], {}), '(-1 * delay * relaxation)\n', (11830, 11855), True, 'import numpy as np\n'), ((12655, 12717), 'scipy.optimize.curve_fit', 'curve_fit', (['model_function', 'delays', 'synth_intensity'], {'p0': '(I0, R)'}), '(model_function, delays, synth_intensity, p0=(I0, R))\n', (12664, 12717), False, 'from scipy.optimize import curve_fit\n'), ((4201, 4228), 'pandas.concat', 'pd.concat', (['r1r2noe_datasets'], {}), '(r1r2noe_datasets)\n', (4210, 4228), True, 'import pandas as pd\n'), ((4275, 4302), 'pandas.concat', 'pd.concat', (['r1r2noe_datasets'], {}), '(r1r2noe_datasets)\n', (4284, 4302), True, 'import pandas as pd\n'), ((4943, 4965), 'pandas.concat', 'pd.concat', (['s2_datasets'], {}), '(s2_datasets)\n', (4952, 4965), True, 'import pandas as pd\n'), ((5007, 5029), 'pandas.concat', 'pd.concat', (['s2_datasets'], {}), '(s2_datasets)\n', (5016, 5029), True, 'import pandas as pd\n'), ((7968, 8036), 'numpy.sqrt', 'np.sqrt', (['(exp[err_se_cols].values ** 2 + sim[err_se_cols].values ** 2)'], {}), '(exp[err_se_cols].values ** 2 + sim[err_se_cols].values ** 2)\n', (7975, 8036), True, 'import numpy as np\n')]
|
"""
Certain periodic packets are sent by connected MUDs (is-alive, user-cache,
etc). The IMC2 protocol assumes that each connected MUD will capture these and
populate/maintain their own lists of other servers connected. This module
contains stuff like this.
"""
from time import time
class IMC2Mud(object):
"""
Stores information about other games connected to our current IMC2 network.
"""
def __init__(self, packet):
self.name = packet.origin
self.versionid = packet.optional_data.get('versionid', None)
self.networkname = packet.optional_data.get('networkname', None)
self.url = packet.optional_data.get('url', None)
self.host = packet.optional_data.get('host', None)
self.port = packet.optional_data.get('port', None)
self.sha256 = packet.optional_data.get('sha256', None)
# This is used to determine when a Mud has fallen into inactive status.
self.last_updated = time()
class IMC2MudList(object):
"""
Keeps track of other MUDs connected to the IMC network.
"""
def __init__(self):
# Mud list is stored in a dict, key being the IMC Mud name.
self.mud_list = {}
def get_mud_list(self):
"""
Returns a sorted list of connected Muds.
"""
muds = self.mud_list.items()
muds.sort()
return [value for key, value in muds]
def update_mud_from_packet(self, packet):
"""
This grabs relevant info from the packet and stuffs it in the
Mud list for later retrieval.
"""
mud = IMC2Mud(packet)
self.mud_list[mud.name] = mud
def remove_mud_from_packet(self, packet):
"""
Removes a mud from the Mud list when given a packet.
"""
mud = IMC2Mud(packet)
try:
del self.mud_list[mud.name]
except KeyError:
# No matching entry, no big deal.
pass
class IMC2Channel(object):
"""
Stores information about channels available on the network.
"""
def __init__(self, packet):
self.localname = packet.optional_data.get('localname', None)
self.name = packet.optional_data.get('channel', None)
self.level = packet.optional_data.get('level', None)
self.owner = packet.optional_data.get('owner', None)
self.policy = packet.optional_data.get('policy', None)
self.last_updated = time()
class IMC2ChanList(object):
"""
Keeps track of other MUDs connected to the IMC network.
"""
def __init__(self):
# Chan list is stored in a dict, key being the IMC Mud name.
self.chan_list = {}
def get_channel_list(self):
"""
Returns a sorted list of cached channels.
"""
channels = self.chan_list.items()
channels.sort()
return [value for key, value in channels]
def update_channel_from_packet(self, packet):
"""
This grabs relevant info from the packet and stuffs it in the
channel list for later retrieval.
"""
channel = IMC2Channel(packet)
self.chan_list[channel.name] = channel
def remove_channel_from_packet(self, packet):
"""
Removes a channel from the Channel list when given a packet.
"""
channel = IMC2Channel(packet)
try:
del self.chan_list[channel.name]
except KeyError:
# No matching entry, no big deal.
pass
|
[
"time.time"
] |
[((979, 985), 'time.time', 'time', ([], {}), '()\n', (983, 985), False, 'from time import time\n'), ((2495, 2501), 'time.time', 'time', ([], {}), '()\n', (2499, 2501), False, 'from time import time\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import VoigtFit
import pickle
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### This command prepares the line regions:
# First the data are interactively normalized
# Then regions which should not be fitted are masked interactively too
dataset.prepare_dataset()
# Save the dataset so you don't have to normalize and mask every time:
VoigtFit.SaveDataSet('test.dataset', dataset)
### The dataset which was defined above can be loaded like this:
# In this case, comment out lines 18-41
#dataset = VoigtFit.LoadDataSet('test.dataset')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components('ZnII', 'FeII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components('CrII', 'FeII', logN=13.6, ref_comp=1)
dataset.copy_components('MgI', 'FeII', logN=12.4, ref_comp=1)
dataset.prepare_dataset()
popt, chi2 = dataset.fit(verbose=True)
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_abundance()
#### Remove parameter links
#### The links may result in error when loadning the parameters later.
for par in popt.params.values():
par.expr = None
for par in dataset.pars.values():
par.expr = None
pickle.dump(popt.params, open('example_best_fit.pars','w'))
VoigtFit.SaveDataSet('example_fit.dataset', dataset)
|
[
"VoigtFit.DataSet",
"numpy.loadtxt",
"VoigtFit.SaveDataSet"
] |
[((424, 458), 'numpy.loadtxt', 'np.loadtxt', (['UVB_fname'], {'unpack': '(True)'}), '(UVB_fname, unpack=True)\n', (434, 458), True, 'import numpy as np\n'), ((487, 521), 'numpy.loadtxt', 'np.loadtxt', (['VIS_fname'], {'unpack': '(True)'}), '(VIS_fname, unpack=True)\n', (497, 521), True, 'import numpy as np\n'), ((533, 556), 'VoigtFit.DataSet', 'VoigtFit.DataSet', (['z_DLA'], {}), '(z_DLA)\n', (549, 556), False, 'import VoigtFit\n'), ((1251, 1296), 'VoigtFit.SaveDataSet', 'VoigtFit.SaveDataSet', (['"""test.dataset"""', 'dataset'], {}), "('test.dataset', dataset)\n", (1271, 1296), False, 'import VoigtFit\n'), ((3951, 4003), 'VoigtFit.SaveDataSet', 'VoigtFit.SaveDataSet', (['"""example_fit.dataset"""', 'dataset'], {}), "('example_fit.dataset', dataset)\n", (3971, 4003), False, 'import VoigtFit\n')]
|
import mock
import pytest
from nengo_spinnaker.utils import application
@pytest.mark.parametrize("app_name", ["Arthur", "Robin"])
def test_get_application(app_name):
with mock.patch.object(application, "pkg_resources") as pkg_resources:
pkg_resources.resource_filename.return_value = "Camelot"
# Get the application filename
assert application.get_application(app_name) == "Camelot"
pkg_resources.resource_filename.assert_called_once_with(
"nengo_spinnaker", "binaries/nengo_{}.aplx".format(app_name)
)
|
[
"pytest.mark.parametrize",
"mock.patch.object",
"nengo_spinnaker.utils.application.get_application"
] |
[((76, 132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""app_name"""', "['Arthur', 'Robin']"], {}), "('app_name', ['Arthur', 'Robin'])\n", (99, 132), False, 'import pytest\n'), ((178, 225), 'mock.patch.object', 'mock.patch.object', (['application', '"""pkg_resources"""'], {}), "(application, 'pkg_resources')\n", (195, 225), False, 'import mock\n'), ((364, 401), 'nengo_spinnaker.utils.application.get_application', 'application.get_application', (['app_name'], {}), '(app_name)\n', (391, 401), False, 'from nengo_spinnaker.utils import application\n')]
|
from functions import _read, write_lines
import re
a, b = _read("1.en"), _read("1.ne")
# For English
# Joins an incomplete line to the line above
i = 1
while i < len(a):
if re.match("^([a-z0-9])+[^0-9i\.\)]", a[i]):
a[i-1] = a[i-1].strip() + ' ' + a[i].strip()
del(a[i])
else:
i += 1
# Joins a numeral line to the next line
i = 0
while i < len(a)-1:
if len(a[i]) < 3 and re.match("^([a-z0-9]){1,2}[\.\)]\s*", a[i]):
a[i] = a[i].strip() + ' ' + a[i+1].strip()
del(a[i+1])
i += 1
write_lines(a, "1_bpf.en")
# For Nepali
# Removes lines with only purnabiraams
i = 0
while i < len(b):
if re.match("^\।", b[i]):
del(b[i])
i += 1
# Joins a numeral line to the next line
i = 0
while i < len(b)-1:
if len(b[i]) < 3 and re.match("^([a-z0-9]){1,2}[\.\)]\s*", b[i]):
b[i] = b[i].strip() + ' ' + b[i+1].strip()
del(b[i+1])
i += 1
write_lines(b, "1_bpf.ne")
|
[
"functions._read",
"re.match",
"functions.write_lines"
] |
[((521, 547), 'functions.write_lines', 'write_lines', (['a', '"""1_bpf.en"""'], {}), "(a, '1_bpf.en')\n", (532, 547), False, 'from functions import _read, write_lines\n'), ((894, 920), 'functions.write_lines', 'write_lines', (['b', '"""1_bpf.ne"""'], {}), "(b, '1_bpf.ne')\n", (905, 920), False, 'from functions import _read, write_lines\n'), ((62, 75), 'functions._read', '_read', (['"""1.en"""'], {}), "('1.en')\n", (67, 75), False, 'from functions import _read, write_lines\n'), ((77, 90), 'functions._read', '_read', (['"""1.ne"""'], {}), "('1.ne')\n", (82, 90), False, 'from functions import _read, write_lines\n'), ((187, 230), 're.match', 're.match', (['"""^([a-z0-9])+[^0-9i\\\\.\\\\)]"""', 'a[i]'], {}), "('^([a-z0-9])+[^0-9i\\\\.\\\\)]', a[i])\n", (195, 230), False, 'import re\n'), ((637, 659), 're.match', 're.match', (['"""^\\\\।"""', 'b[i]'], {}), "('^\\\\।', b[i])\n", (645, 659), False, 'import re\n'), ((403, 449), 're.match', 're.match', (['"""^([a-z0-9]){1,2}[\\\\.\\\\)]\\\\s*"""', 'a[i]'], {}), "('^([a-z0-9]){1,2}[\\\\.\\\\)]\\\\s*', a[i])\n", (411, 449), False, 'import re\n'), ((776, 822), 're.match', 're.match', (['"""^([a-z0-9]){1,2}[\\\\.\\\\)]\\\\s*"""', 'b[i]'], {}), "('^([a-z0-9]){1,2}[\\\\.\\\\)]\\\\s*', b[i])\n", (784, 822), False, 'import re\n')]
|
from time import time
import os
import numpy as np
from scipy.stats import multivariate_normal
from experiments.lnpdfs.create_target_lnpfs import build_Goodwin_grad
from sampler.SVGD.python.svgd import SVGD as SVGD
unknown_params = [1, 2] + np.arange(4, 12).tolist()
num_dimensions = len(unknown_params)
seed=1
target_lnpdf = build_Goodwin_grad(unknown_params, seed=seed, sigma=np.sqrt(0.2),
parameters=np.array([10., 1.97, 0.46, 0.53,
0.02878028, 0.13585575, 1.57070286, 0.75737477,
0.28929913, 1.52671658, 1.26995194, 1.89562767]))
def dlnpdf(theta):
input = np.atleast_2d(theta)
dlnpdf.counter += len(input)
return target_lnpdf(input)[1]
dlnpdf.counter = 0
def sample(n_samps, n_iter, epsilon, path):
if path is not None:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
prior = multivariate_normal(np.zeros((num_dimensions)), np.eye(num_dimensions))
x0 = prior.rvs(n_samps)
start = time()
samples = SVGD().update(x0, dlnpdf, n_iter=n_iter, stepsize=epsilon, path=path)
end = time()
np.savez(path, samples=samples, wallclocktime=end-start, nfevals=dlnpdf.counter)
print("done")
if __name__ == '__main__':
sample(100, 100, 1e-2, "/tmp/svgd_frisk_test")
|
[
"numpy.atleast_2d",
"os.makedirs",
"os.path.dirname",
"numpy.zeros",
"os.path.exists",
"time.time",
"numpy.array",
"numpy.arange",
"numpy.eye",
"numpy.savez",
"sampler.SVGD.python.svgd.SVGD",
"numpy.sqrt"
] |
[((711, 731), 'numpy.atleast_2d', 'np.atleast_2d', (['theta'], {}), '(theta)\n', (724, 731), True, 'import numpy as np\n'), ((1126, 1132), 'time.time', 'time', ([], {}), '()\n', (1130, 1132), False, 'from time import time\n'), ((1227, 1233), 'time.time', 'time', ([], {}), '()\n', (1231, 1233), False, 'from time import time\n'), ((1238, 1325), 'numpy.savez', 'np.savez', (['path'], {'samples': 'samples', 'wallclocktime': '(end - start)', 'nfevals': 'dlnpdf.counter'}), '(path, samples=samples, wallclocktime=end - start, nfevals=dlnpdf.\n counter)\n', (1246, 1325), True, 'import numpy as np\n'), ((379, 391), 'numpy.sqrt', 'np.sqrt', (['(0.2)'], {}), '(0.2)\n', (386, 391), True, 'import numpy as np\n'), ((438, 573), 'numpy.array', 'np.array', (['[10.0, 1.97, 0.46, 0.53, 0.02878028, 0.13585575, 1.57070286, 0.75737477, \n 0.28929913, 1.52671658, 1.26995194, 1.89562767]'], {}), '([10.0, 1.97, 0.46, 0.53, 0.02878028, 0.13585575, 1.57070286, \n 0.75737477, 0.28929913, 1.52671658, 1.26995194, 1.89562767])\n', (446, 573), True, 'import numpy as np\n'), ((907, 928), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (922, 928), False, 'import os\n'), ((1034, 1058), 'numpy.zeros', 'np.zeros', (['num_dimensions'], {}), '(num_dimensions)\n', (1042, 1058), True, 'import numpy as np\n'), ((1062, 1084), 'numpy.eye', 'np.eye', (['num_dimensions'], {}), '(num_dimensions)\n', (1068, 1084), True, 'import numpy as np\n'), ((242, 258), 'numpy.arange', 'np.arange', (['(4)', '(12)'], {}), '(4, 12)\n', (251, 258), True, 'import numpy as np\n'), ((944, 967), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (958, 967), False, 'import os\n'), ((981, 1001), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (992, 1001), False, 'import os\n'), ((1147, 1153), 'sampler.SVGD.python.svgd.SVGD', 'SVGD', ([], {}), '()\n', (1151, 1153), True, 'from sampler.SVGD.python.svgd import SVGD as SVGD\n')]
|
from scipy import stats
import sys
import utils.name_convention as name
from similarity.SimTopicLists import SimTopicLists
if len(sys.argv) <= 1:
src = "pp_reuters"
else:
src = sys.argv[1]
stl = SimTopicLists()
distance_list, rank_list = [], []
jtotal, ktotal, cos_total, kl_total, bha_total = [], [], [], [], []
jtotal_rank, ktotal_rank, costotal_rank, kltotal_rank, bhatotal_rank = [], [], [], [], []
for corpus_type in ["tfidf", "bow", "binary"]:
for topics_count in [10,20,30,40,50]:
dname = name.get_output_dir(corpus_type, topics_count, src)
ofile = open(dname + "/sim_jaccard.txt", "r")
jlist = stl.read_distance_list(ofile)
jtotal.extend(jlist)
ofile = open(dname + "/sim_kendall.txt", "r")
klist = stl.read_distance_list(ofile)
ktotal.extend(klist)
ofile = open(dname + "/sim_cosine.txt", "r")
cos_list = stl.read_distance_list(ofile)
cos_total.extend(cos_list)
ofile = open(dname + "/sim_kl.txt", "r")
kl_list = stl.read_distance_list(ofile)
kl_total.extend(kl_list)
ofile = open(dname + "/sim_bha.txt", "r")
bha_list = stl.read_distance_list(ofile)
bha_total.extend(bha_list)
jrank = stl.give_dist_names(jlist, topics_count, corpus_type)
jtotal_rank.extend(jrank)
krank = stl.give_dist_names(klist, topics_count, corpus_type)
ktotal_rank.extend(krank)
cos_rank = stl.give_dist_names(cos_list, topics_count, corpus_type)
costotal_rank.extend(cos_rank)
kl_rank = stl.give_dist_names(kl_list, topics_count, corpus_type)
kltotal_rank.extend(kl_rank)
bha_rank = stl.give_dist_names(bha_list, topics_count, corpus_type)
bhatotal_rank.extend(bha_rank)
distance_list.append(("jaccard", jtotal))
distance_list.append(("kendall", ktotal))
distance_list.append(("cos", cos_total))
distance_list.append(("kl", kl_total))
distance_list.append(("bha", bha_total))
jtotal_rank = list(sorted(jtotal_rank, key=lambda x:x[1]))
jtotal_rank = [v[0] for v in jtotal_rank]
ktotal_rank = list(sorted(ktotal_rank, key=lambda x:x[1]))
ktotal_rank = [v[0] for v in ktotal_rank]
costotal_rank = list(sorted(costotal_rank, key=lambda x:x[1]))
costotal_rank = [v[0] for v in costotal_rank]
kltotal_rank = list(sorted(kltotal_rank, key=lambda x:x[1]))
kltotal_rank = [v[0] for v in kltotal_rank]
bhatotal_rank = list(sorted(bhatotal_rank, key=lambda x:x[1]))
bhatotal_rank = [v[0] for v in bhatotal_rank]
rank_list.append(("jaccard", jtotal_rank))
rank_list.append(("kendall", ktotal_rank))
rank_list.append(("cos", costotal_rank))
rank_list.append(("kl", kltotal_rank))
rank_list.append(("bha", bhatotal_rank))
ofile = open("sim_correlation.txt", "w")
for index, list1 in enumerate(distance_list[1:]):
for list2 in distance_list[:index+1]:
sim_values1 = list1[1]
sim_values2 = list2[1]
ofile.write(list1[0]+" " + list2[0]+" : ")
ofile.write(str(stats.pearsonr(sim_values1, sim_values2))+"\n")
ofile = open("sim_rank.txt","w")
for index, list1 in enumerate(rank_list[1:]):
for list2 in rank_list[:index+1]:
sim_values1 = list1[1]
sim_values2 = list2[1]
ofile.write(list1[0]+" " + list2[0]+" : ")
ofile.write(str(stats.kendalltau(sim_values1, sim_values2))+"\n")
|
[
"scipy.stats.kendalltau",
"utils.name_convention.get_output_dir",
"similarity.SimTopicLists.SimTopicLists",
"scipy.stats.pearsonr"
] |
[((205, 220), 'similarity.SimTopicLists.SimTopicLists', 'SimTopicLists', ([], {}), '()\n', (218, 220), False, 'from similarity.SimTopicLists import SimTopicLists\n'), ((520, 571), 'utils.name_convention.get_output_dir', 'name.get_output_dir', (['corpus_type', 'topics_count', 'src'], {}), '(corpus_type, topics_count, src)\n', (539, 571), True, 'import utils.name_convention as name\n'), ((3003, 3043), 'scipy.stats.pearsonr', 'stats.pearsonr', (['sim_values1', 'sim_values2'], {}), '(sim_values1, sim_values2)\n', (3017, 3043), False, 'from scipy import stats\n'), ((3308, 3350), 'scipy.stats.kendalltau', 'stats.kendalltau', (['sim_values1', 'sim_values2'], {}), '(sim_values1, sim_values2)\n', (3324, 3350), False, 'from scipy import stats\n')]
|
#!/usr/bin/env python
# -*- coding: utf=8 -*-
import click
import fortytwocli.init as init_
import fortytwocli.status as status_
import fortytwocli.project as project
import fortytwocli.util as util
import fortytwocli.ipCalc as ip
@click.group()
def fourtyTwo():
pass
@fourtyTwo.command(help="initializes settings.")
def init():
init_.init()
@fourtyTwo.command(help="shows your status.")
def status():
try:
util.checkConfigExists()
status_.showStatus()
except Exception as e:
click.secho(str(e), fg='red')
@fourtyTwo.command(name="clone-project", help="clone project.")
def cloneProject():
try:
util.checkConfigExists()
project.cloneProject()
except Exception as e:
click.secho(str(e), fg='red')
@fourtyTwo.command(name="ip", help="launch ip address calculator.")
def ipCalc():
ip.calc()
def main():
fourtyTwo()
|
[
"fortytwocli.init.init",
"fortytwocli.status.showStatus",
"fortytwocli.project.cloneProject",
"fortytwocli.ipCalc.calc",
"click.group",
"fortytwocli.util.checkConfigExists"
] |
[((236, 249), 'click.group', 'click.group', ([], {}), '()\n', (247, 249), False, 'import click\n'), ((343, 355), 'fortytwocli.init.init', 'init_.init', ([], {}), '()\n', (353, 355), True, 'import fortytwocli.init as init_\n'), ((866, 875), 'fortytwocli.ipCalc.calc', 'ip.calc', ([], {}), '()\n', (873, 875), True, 'import fortytwocli.ipCalc as ip\n'), ((435, 459), 'fortytwocli.util.checkConfigExists', 'util.checkConfigExists', ([], {}), '()\n', (457, 459), True, 'import fortytwocli.util as util\n'), ((468, 488), 'fortytwocli.status.showStatus', 'status_.showStatus', ([], {}), '()\n', (486, 488), True, 'import fortytwocli.status as status_\n'), ((657, 681), 'fortytwocli.util.checkConfigExists', 'util.checkConfigExists', ([], {}), '()\n', (679, 681), True, 'import fortytwocli.util as util\n'), ((690, 712), 'fortytwocli.project.cloneProject', 'project.cloneProject', ([], {}), '()\n', (710, 712), True, 'import fortytwocli.project as project\n')]
|
from unittest import TestCase
from grid_path.string_permutations import Permutations
class TestPermutations(TestCase):
def test_get_permutations_with_empty_string(self):
self.assertEqual(Permutations('').get_permutations(), set(['']))
def test_get_permutations_with_one_letter_word(self):
self.assertEqual(Permutations('A').get_permutations(), set(['A']))
def test_get_permutations_with_two_letters_word(self):
self.assertEqual(Permutations('AB').get_permutations(), set(['AB', 'BA']))
def test_get_permutations_with_three_letters_word(self):
self.assertEqual(Permutations('ABC').get_permutations(), set(['ABC', 'ACB', 'BAC', 'BCA', 'CAB', 'CBA']))
def test_get_permutations_with_same_letter_word(self):
self.assertEqual(Permutations('AA').get_permutations(), set(['AA']))
|
[
"grid_path.string_permutations.Permutations"
] |
[((201, 217), 'grid_path.string_permutations.Permutations', 'Permutations', (['""""""'], {}), "('')\n", (213, 217), False, 'from grid_path.string_permutations import Permutations\n'), ((333, 350), 'grid_path.string_permutations.Permutations', 'Permutations', (['"""A"""'], {}), "('A')\n", (345, 350), False, 'from grid_path.string_permutations import Permutations\n'), ((468, 486), 'grid_path.string_permutations.Permutations', 'Permutations', (['"""AB"""'], {}), "('AB')\n", (480, 486), False, 'from grid_path.string_permutations import Permutations\n'), ((614, 633), 'grid_path.string_permutations.Permutations', 'Permutations', (['"""ABC"""'], {}), "('ABC')\n", (626, 633), False, 'from grid_path.string_permutations import Permutations\n'), ((789, 807), 'grid_path.string_permutations.Permutations', 'Permutations', (['"""AA"""'], {}), "('AA')\n", (801, 807), False, 'from grid_path.string_permutations import Permutations\n')]
|
#!/usr/bin/env python3
# -*- mode: python; coding: utf-8 -*-
# By HarJIT in 2020. MIT/Expat licence.
import os, xml.dom.minidom, shutil, re, glob
svgpresattrs = ("alignment-baseline", "baseline-shift", "clip", "clip-path", "clip-rule", "color",
"color-interpolation", "color-interpolation-filters", "color-profile", "color-rendering", "cursor",
"direction", "display", "dominant-baseline", "enable-background", "fill", "fill-opacity",
"fill-rule", "filter", "flood-color", "flood-opacity", "font-family", "font-size",
"font-size-adjust", "font-stretch", "font-style", "font-variant", "font-weight",
"glyph-orientation-horizontal", "glyph-orientation-vertical", "image-rendering", "kerning",
"letter-spacing", "lighting-color", "marker-end", "marker-mid", "marker-start", "mask", "opacity",
"overflow", "pointer-events", "shape-rendering", "solid-color", "solid-opacity", "stop-color",
"stop-opacity", "stroke", "stroke-dasharray", "stroke-dashoffset", "stroke-linecap",
"stroke-linejoin", "stroke-miterlimit", "stroke-opacity", "stroke-width", "text-anchor",
"text-decoration", "text-rendering", "transform", "unicode-bidi", "vector-effect",
"visibility", "word-spacing", "writing-mode")
needlessline = re.compile("(?m)^\s*\n")
def has_real_dc(document):
if document.getElementsByTagName("cc:license"):
return True
elif document.getElementsByTagName("cc:License"):
return True
elif document.getElementsByTagName("dc:contributor"):
return True
elif document.getElementsByTagName("cc:Agent"):
return True
elif document.getElementsByTagName("cc:permits"):
return True
elif document.getElementsByTagName("cc:requires"):
return True
return False
for pn in glob.glob("**/*.svg", recursive=True):
i = os.path.basename(pn)
if "draft" in i.casefold():
continue
document = xml.dom.minidom.parse(pn)
changed = False
keep_metadata = has_real_dc(document)
retain_ns = ["xmlns:xlink"]
if keep_metadata:
retain_ns.extend(["xmlns:rdf", "xmlns:cc", "xmlns:dc"])
for element in document.getElementsByTagName("*"):
if element.nodeName == "metadata" and not keep_metadata:
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
elif element.nodeName == "defs":
if (not element.childNodes) or (len(element.childNodes) == 1 and
element.firstChild.nodeName == "#text" and
not element.firstChild.wholeText.strip()):
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
elif element.nodeName.startswith(("inkscape:", "sodipodi:")):
print(i, "removing", element.nodeName)
changed = True
element.parentNode.removeChild(element)
#
if element.hasAttribute("style"):
# Rip SVG pres. attributes out of inline CSS, replacing any overridden attributes
# Note: this will bork on quoted ; in values, which I don't expect to occur.
stylelist = element.getAttribute("style").strip(";").split(";")
styleout = ""
for style in stylelist:
if ":" not in style:
continue # nvm
name, val = style.split(":", 1)
if name in svgpresattrs:
print(i, "attributising", name)
changed = True
element.setAttribute(name.strip(), val.strip())
elif "inkscape" in name:
print(i, "removing", name)
changed = True
pass
else:
print(i, "retaining", name)
changed = True
styleout += style + ";"
if not styleout:
element.removeAttribute("style")
else:
element.setAttribute("style", styleout)
for attr in list(element.attributes.keys())[:]:
if attr.startswith("stroke-") and not element.hasAttribute("stroke") and not (element.nodeName == "g"):
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif attr.startswith("inkscape:") or attr.startswith("sodipodi:"):
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif attr.startswith("xmlns:") and attr not in retain_ns:
print(i, "removing", attr)
changed = True
element.removeAttribute(attr)
elif (element.nodeName == "svg") and (attr == "version"):
print(i, "removing", attr)
changed = True
element.removeAttribute("version")
elif attr == "fill-opacity" and element.getAttribute("fill-opacity") == "1":
print(i, "removing", attr)
changed = True
element.removeAttribute("fill-opacity")
if element.hasAttribute("stroke"):
print(i, "has stroke")
if element.hasAttribute("id") and ((not element.parentNode) or
element.parentNode.nodeName != "defs"):
# Autogenerated ID rubbish
if re.compile(r"^{}\d+$".format(element.nodeName)).match(element.getAttribute("id")):
print(i, "removing ID", element.getAttribute("id"))
changed = True
element.removeAttribute("id")
if changed:
shutil.move(pn, pn + "~")
with open(pn, "w") as f:
x = document.toxml().replace("<?xml version=\"1.0\" ?>", "")
f.write("".join(needlessline.split(x)))
os.unlink(pn + "~")
|
[
"os.unlink",
"os.path.basename",
"shutil.move",
"glob.glob",
"re.compile"
] |
[((1217, 1242), 're.compile', 're.compile', (['"""(?m)^\\\\s*\n"""'], {}), "('(?m)^\\\\s*\\n')\n", (1227, 1242), False, 'import os, xml.dom.minidom, shutil, re, glob\n'), ((1743, 1780), 'glob.glob', 'glob.glob', (['"""**/*.svg"""'], {'recursive': '(True)'}), "('**/*.svg', recursive=True)\n", (1752, 1780), False, 'import os, xml.dom.minidom, shutil, re, glob\n'), ((1790, 1810), 'os.path.basename', 'os.path.basename', (['pn'], {}), '(pn)\n', (1806, 1810), False, 'import os, xml.dom.minidom, shutil, re, glob\n'), ((5661, 5686), 'shutil.move', 'shutil.move', (['pn', "(pn + '~')"], {}), "(pn, pn + '~')\n", (5672, 5686), False, 'import os, xml.dom.minidom, shutil, re, glob\n'), ((5857, 5876), 'os.unlink', 'os.unlink', (["(pn + '~')"], {}), "(pn + '~')\n", (5866, 5876), False, 'import os, xml.dom.minidom, shutil, re, glob\n')]
|
from sets import Set
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.forms import widgets, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from snapboard.models import Category, UserSettings
class PostForm(forms.Form):
post = forms.CharField(
label = '',
widget=forms.Textarea(attrs={
'rows':'8',
'cols':'120',
}),
)
private = forms.CharField(
label=_("Recipients"),
max_length=150,
widget=forms.TextInput(),
required=False,
)
def clean_private(self):
recipients = self.cleaned_data['private']
if len(recipients.strip()) < 1:
return []
recipients = filter(lambda x: len(x.strip()) > 0, recipients.split(','))
recipients = Set([x.strip() for x in recipients]) # string of usernames
u = User.objects.filter(username__in=recipients).order_by('username')
if len(u) != len(recipients):
u_set = Set([str(x.username) for x in u])
u_diff = recipients.difference(u_set)
raise ValidationError(ungettext(
"The following is not a valid user:", "The following are not valid user(s): ",
len(u_diff)) + ' '.join(u_diff))
return u
class ThreadForm(forms.Form):
# def __init__( self, *args, **kwargs ):
# super( ThreadForm, self ).__init__( *args, **kwargs )
# self.fields['category'] = forms.ChoiceField(
# label = _('Category'),
# choices = [(str(x.id), x.label) for x in Category.objects.all()]
# )
# # this is here to set the order
# category = forms.CharField(label=_('Category'))
subject = forms.CharField(max_length=80,
label=_('Subject'),
widget=forms.TextInput(
attrs={
'size': '80',
})
)
post = forms.CharField(widget=forms.Textarea(
attrs={
'rows':'8',
'cols': '80',
}),
label=_('Message')
)
# def clean_category(self):
# id = int(self.cleaned_data['category'])
# return id
class UserSettingsForm(forms.ModelForm):
def __init__(self, *pa, **ka):
user = ka.pop('user')
self.user = user
super(UserSettingsForm, self).__init__(*pa, **ka)
self.fields['frontpage_filters'].choices = [
(cat.id, cat.label) for cat in Category.objects.all() if
cat.can_read(user)
]
frontpage_filters = forms.MultipleChoiceField(label=_('Front page categories'))
class Meta:
model = UserSettings
exclude = ('user',)
def clean_frontpage_filters(self):
frontpage_filters = [cat for cat in (Category.objects.get(pk=id) for id in
self.cleaned_data['frontpage_filters']) if cat.can_read(self.user)]
return frontpage_filters
class LoginForm(forms.Form):
username = forms.CharField(max_length=30, label=_("Username"))
password = forms.CharField(widget=widgets.PasswordInput, label=_("Password"))
def clean_password(self):
scd = self.cleaned_data
self.user = authenticate(username=scd['username'], password=scd['password'])
if self.user is not None:
if self.user.is_active:
return self.cleaned_data['password']
else:
raise ValidationError(_('Your account has been disabled.'))
else:
raise ValidationError(_('Your username or password were incorrect.'))
class InviteForm(forms.Form):
user = forms.CharField(max_length=30, label=_('Username'))
def clean_user(self):
user = self.cleaned_data['user']
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
raise ValidationError(_('Unknown username'))
return user
class AnwserInvitationForm(forms.Form):
decision = forms.ChoiceField(label=_('Answer'), choices=((0, _('Decline')), (1, _('Accept'))))
# vim: ai ts=4 sts=4 et sw=4
|
[
"django.contrib.auth.models.User.objects.get",
"snapboard.models.Category.objects.all",
"django.forms.TextInput",
"django.contrib.auth.models.User.objects.filter",
"snapboard.models.Category.objects.get",
"django.contrib.auth.authenticate",
"django.utils.translation.ugettext_lazy",
"django.forms.Textarea"
] |
[((3394, 3458), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': "scd['username']", 'password': "scd['password']"}), "(username=scd['username'], password=scd['password'])\n", (3406, 3458), False, 'from django.contrib.auth import authenticate\n'), ((442, 492), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': '8', 'cols': '120'}"}), "(attrs={'rows': '8', 'cols': '120'})\n", (456, 492), False, 'from django import forms\n'), ((598, 613), 'django.utils.translation.ugettext_lazy', '_', (['"""Recipients"""'], {}), "('Recipients')\n", (599, 613), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((662, 679), 'django.forms.TextInput', 'forms.TextInput', ([], {}), '()\n', (677, 679), False, 'from django import forms\n'), ((1952, 1964), 'django.utils.translation.ugettext_lazy', '_', (['"""Subject"""'], {}), "('Subject')\n", (1953, 1964), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1985, 2022), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'size': '80'}"}), "(attrs={'size': '80'})\n", (2000, 2022), False, 'from django import forms\n'), ((2127, 2176), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': '8', 'cols': '80'}"}), "(attrs={'rows': '8', 'cols': '80'})\n", (2141, 2176), False, 'from django import forms\n'), ((2255, 2267), 'django.utils.translation.ugettext_lazy', '_', (['"""Message"""'], {}), "('Message')\n", (2256, 2267), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2790, 2816), 'django.utils.translation.ugettext_lazy', '_', (['"""Front page categories"""'], {}), "('Front page categories')\n", (2791, 2816), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3214, 3227), 'django.utils.translation.ugettext_lazy', '_', (['"""Username"""'], {}), "('Username')\n", (3215, 3227), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3296, 3309), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (3297, 3309), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3852, 3865), 'django.utils.translation.ugettext_lazy', '_', (['"""Username"""'], {}), "('Username')\n", (3853, 3865), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3967, 3998), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'user'}), '(username=user)\n', (3983, 3998), False, 'from django.contrib.auth.models import User\n'), ((4190, 4201), 'django.utils.translation.ugettext_lazy', '_', (['"""Answer"""'], {}), "('Answer')\n", (4191, 4201), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1039, 1083), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username__in': 'recipients'}), '(username__in=recipients)\n', (1058, 1083), False, 'from django.contrib.auth.models import User\n'), ((2665, 2687), 'snapboard.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (2685, 2687), False, 'from snapboard.models import Category, UserSettings\n'), ((3725, 3771), 'django.utils.translation.ugettext_lazy', '_', (['"""Your username or password were incorrect."""'], {}), "('Your username or password were incorrect.')\n", (3726, 3771), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2977, 3004), 'snapboard.models.Category.objects.get', 'Category.objects.get', ([], {'pk': 'id'}), '(pk=id)\n', (2997, 3004), False, 'from snapboard.models import Category, UserSettings\n'), ((3639, 3675), 'django.utils.translation.ugettext_lazy', '_', (['"""Your account has been disabled."""'], {}), "('Your account has been disabled.')\n", (3640, 3675), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4067, 4088), 'django.utils.translation.ugettext_lazy', '_', (['"""Unknown username"""'], {}), "('Unknown username')\n", (4068, 4088), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4216, 4228), 'django.utils.translation.ugettext_lazy', '_', (['"""Decline"""'], {}), "('Decline')\n", (4217, 4228), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4235, 4246), 'django.utils.translation.ugettext_lazy', '_', (['"""Accept"""'], {}), "('Accept')\n", (4236, 4246), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
#!/usr/bin/env python
"""
Reads and writes weight data files.
!!! Weight data files must be in Image row ordering (0, 0) in the top-left. !!!
"""
from __future__ import print_function, division
from numpy import *
import gzip
def read_tex_from_file(ioFile):
'''
Reads a .data file into memory.
Inputs:
ioFile: a file for the .data file
Returns:
width-by-height-by-#channels numpy float32 array of data
width-by-height numpy boolean array where True values correspond to
values where weights are zero in all channels.
'''
f = gzip.GzipFile(fileobj=ioFile, mode='rb')
# fromfile() is a numpy function
# UPDATE: We can't use fromfile() on a gzip file object. We have to read
# it first and then use frombuffer().
# http://stackoverflow.com/questions/15966335/efficient-numpy-
# fromfile-on-zipped-files
# NOTE: I should make a dtype('')
header = f.read(3 * uint32().itemsize)
width, height, channels = frombuffer(header, uint32, 3)
# Make a mask.
# Since every pixel in the model should have some weight, the mask can
# be True if any non-zero weight every appears for a pixel.
mask = zeros((width, height), dtype = bool)
# This is inefficient. We could read it at once, but I don't want to think
# about making sure the channel-wise memory layout is what numpy wants.
result = zeros((width, height, channels), dtype = float32)
for chan in range(channels):
data = f.read(width * height * float32().itemsize)
data = frombuffer(data, float32, width * height).reshape(width, height)
# Update the mask with any nonzero entries.
mask = logical_or(mask, data != 0)
result[:, :, chan] = data
result = result[::-1]
return result, mask
def read_tex_from_path(path):
'''
Reads a .data file into memory.
Inputs:
path: a path to the .data file
Returns:
width-by-height-by-#channels numpy float32 array of data
width-by-height numpy boolean array where True values correspond to
values where weights are zero in all channels.
'''
print('+ Loading:', path)
with file(path, 'rb') as f:
result, mask = read_tex_from_path(f)
print('- Loaded:', path)
return result, mask
def write_tex_to_file(ioFile, data):
'''
Saves a .data to the given file.
Inputs:
ioFile: a File at which to save the .data file
data: width-by-height-by-#channels numpy float32 array of data
'''
data = data[::-1]
f = gzip.GzipFile(fileobj=ioFile, mode='wb')
header = zeros(3, dtype = uint32)
header[:] = data.shape
f.write(getbuffer(header))
channel = zeros((data.shape[0], data.shape[1]), dtype = float32)
for ch in range(data.shape[2]):
channel[:] = data[:, :, ch]
f.write(getbuffer(channel))
def write_tex_to_path(path, data):
'''
Saves a .data to disk.
Inputs:
path: a path at which to save the .data file
data: width-by-height-by-#channels numpy float32 array of data
'''
print('+ Saving:', path)
with file(path, 'wb') as f:
write_tex_to_file(f, data)
print('- Saved:', path)
def normalize_data(data, mask = None):
'''
Normalize the width-by-height-by-#channels array `data`, optionally
ignoring values for which `mask` is True. Modifies `data` in place and
returns None.
'''
if mask is None:
data /= data.sum(axis = 2)[:, :, newaxis]
else:
assert mask.shape == data.shape[:2]
data[mask] /= data.sum(axis = 2)[mask][..., newaxis]
if __name__ == '__main__':
import sys
def usage():
print("Usage:", sys.argv[0], "path/to/tex1.data path/to/tex2.data",
file = sys.stderr)
sys.exit(-1)
if len(sys.argv) != 3:
usage()
path1, path2 = sys.argv[1:]
tex1, mask1 = read_tex_from_path(path1)
tex2, mask2 = read_tex_from_path(path2)
assert tex1.shape == tex2.shape
assert mask1.shape == mask2.shape
assert all(mask1 == mask2)
tex1 = tex1[mask1]
tex2 = tex2[mask2]
# This is pretty memory intensive, so let's be efficient.
# diff:
# diff = tex1 - tex2
diff = tex1
subtract(tex1, tex2, diff)
# Don't use tex1 anymore, it's been reused as diff.
del tex1
# absolute difference:
# abs_diff = abs(tex1-tex2)
abs_diff = diff
absolute(diff, abs_diff)
# Don't use diff anymore, it's been reused as abs_diff.
del diff
total_abs_diff = abs_diff.sum()
print('Total absolute difference:', total_abs_diff)
print('Average absolute difference:',
total_abs_diff / prod(abs_diff.shape))
print('Median absolute difference:', median(abs_diff))
print('Maximum absolute difference:', abs_diff.max())
print('Minimum absolute difference:', abs_diff.min())
# difference, squared:
# abs_diff2 = abs_diff**2
abs_diff2 = abs_diff
square(abs_diff, abs_diff2)
# Don't use abs_diff anymore, it's been reused as abs_diff2.
del abs_diff
avg_abs_diff2 = average(abs_diff2)
print('Mean squared error:', avg_abs_diff2)
print('Root mean squared error:', sqrt(avg_abs_diff2))
|
[
"sys.exit",
"gzip.GzipFile"
] |
[((592, 632), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'ioFile', 'mode': '"""rb"""'}), "(fileobj=ioFile, mode='rb')\n", (605, 632), False, 'import gzip\n'), ((2608, 2648), 'gzip.GzipFile', 'gzip.GzipFile', ([], {'fileobj': 'ioFile', 'mode': '"""wb"""'}), "(fileobj=ioFile, mode='wb')\n", (2621, 2648), False, 'import gzip\n'), ((3856, 3868), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3864, 3868), False, 'import sys\n')]
|
# Generated by Django 2.0 on 2018-01-20 23:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customer', '0013_auto_20180120_2322'),
]
operations = [
migrations.AddField(
model_name='pspuser',
name='pending_deposit',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='customer.Deposit'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((376, 489), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""customer.Deposit"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='customer.Deposit')\n", (393, 489), False, 'from django.db import migrations, models\n')]
|
"""
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from discord.ext import commands
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
characters = "!@#$%&*"
numbers = "1234567890"
email_fun = [
'69420', '8008135', 'eatsA$$', 'PeekABoo',
'TheShire', 'isFAT', 'Dumb_man', 'Ruthless_gamer',
'Sexygirl69', 'Loyalboy69', 'likesButts'
]
passwords = [
'<PASSWORD>', '<PASSWORD>', '<PASSWORD>',
'<PASSWORD>', '<PASSWORD>',
'<PASSWORD>', '<PASSWORD>',
'SayHelloToMyLittleFriend', 'ImUnderYourBed',
'TellMyWifeILoveHer', '<PASSWORD>', '<PASSWORD>', 'IKnewYouWouldHackIntoMyAccount',
'BestPasswordE<PASSWORD>', '<PASSWORD>', 'VoteMikuniUwU'
]
DMs = [
"send nudes please", "i invited Mikuni and i got a cookie",
"i hope my mum doesn't find my nudes folder",
"please dont bully me", "https://youtu.be/oHg5SJYRHA0",
"i like bananas", "i use discord in light mode",
"if you are reading this u shud vote Mikuni", "send feet pics when",
"sUbScRiBe To mY yOuTuBe ChAnNeL", "the impostor is sus", "python makes me horny"
]
discord_servers = [
"Sons of Virgins", "Small Benis Gang", "Gamers United",
"Anime Server 69420", "Cornhub", "<NAME>"
]
def gen_random_string(l_: int):
uwu = ""
for i in range(l_ + 1):
uwu += random.choice((letters + numbers))
return uwu
async def send_random_tip(ctx: commands.Context, msg: str, chances: int):
if random.randint(1, chances) == chances:
return await ctx.send(f"**Pro Tip:** {msg}")
else:
pass
|
[
"random.randint",
"random.choice"
] |
[((1828, 1860), 'random.choice', 'random.choice', (['(letters + numbers)'], {}), '(letters + numbers)\n', (1841, 1860), False, 'import random\n'), ((1961, 1987), 'random.randint', 'random.randint', (['(1)', 'chances'], {}), '(1, chances)\n', (1975, 1987), False, 'import random\n')]
|
import numpy as np
import hypers as hp
class TestLearning:
def setup(self):
self.n3 = np.random.rand(10, 10, 30)
self.n4 = np.random.rand(10, 10, 10, 30)
self.n5 = np.random.rand(10, 10, 10, 2, 30)
self.h3 = hp.hparray(self.n3)
self.h4 = hp.hparray(self.n4)
self.h5 = hp.hparray(self.n5)
self.arrays = (self.h3, self.h4, self.h5)
def test_abundance(self):
for array in self.arrays:
ucls = array.abundance.ucls
nnls = array.abundance.nnls
fcls = array.abundance.fcls
for amethod in (ucls, nnls, fcls):
spec1d = np.random.rand(array.shape[-1])
_ = amethod.calculate(spec1d)
assert amethod.map.shape == array.shape[:-1] + (1,)
spec2d = np.random.rand(array.shape[-1], 3)
_ = amethod.calculate(spec2d)
assert amethod.map.shape == array.shape[:-1] + (3,)
|
[
"numpy.random.rand",
"hypers.hparray"
] |
[((100, 126), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(30)'], {}), '(10, 10, 30)\n', (114, 126), True, 'import numpy as np\n'), ((145, 175), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(30)'], {}), '(10, 10, 10, 30)\n', (159, 175), True, 'import numpy as np\n'), ((194, 227), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(2)', '(30)'], {}), '(10, 10, 10, 2, 30)\n', (208, 227), True, 'import numpy as np\n'), ((247, 266), 'hypers.hparray', 'hp.hparray', (['self.n3'], {}), '(self.n3)\n', (257, 266), True, 'import hypers as hp\n'), ((285, 304), 'hypers.hparray', 'hp.hparray', (['self.n4'], {}), '(self.n4)\n', (295, 304), True, 'import hypers as hp\n'), ((323, 342), 'hypers.hparray', 'hp.hparray', (['self.n5'], {}), '(self.n5)\n', (333, 342), True, 'import hypers as hp\n'), ((652, 683), 'numpy.random.rand', 'np.random.rand', (['array.shape[-1]'], {}), '(array.shape[-1])\n', (666, 683), True, 'import numpy as np\n'), ((824, 858), 'numpy.random.rand', 'np.random.rand', (['array.shape[-1]', '(3)'], {}), '(array.shape[-1], 3)\n', (838, 858), True, 'import numpy as np\n')]
|
import pytest
from page_object.page.MainPage import MainPage
class TestSelfChoice(object):
def test_price(self):
main = MainPage()
assert main.click_self_choice()
|
[
"page_object.page.MainPage.MainPage"
] |
[((135, 145), 'page_object.page.MainPage.MainPage', 'MainPage', ([], {}), '()\n', (143, 145), False, 'from page_object.page.MainPage import MainPage\n')]
|
#! /usr/bin/env python
"""
runcalsaa.py - Module to perform SAA correction in the CALNIC pipeline
(After CALNICA, before CALNICB) by running the PEDSUB, BEP, and SAACLEAN tasks.
PEDSUB is run only to improve the calculations of the SAA persistence and BEP
signature; no pedestal correction is actually applied to the final output
image.
USAGE: runcalsaa.py [-d] ipppssoot_raw.fits
Alternative USAGE: python
import runcalsaa
status=runcalsaa.run('ipppssoot_raw.fits')
RETURN VALUES:
It will return status codes to indicate completion status:
0 = successful completion with correction applied
4 = successful completion with no correction applied
1 = failed gracefully with exception
3 = aborted gracefully based on self-diagnostic
REQUIRED INPUT FILES:
Although these files are not specified on the command line, they
must be available for the script to succeed.
In the working directory:
ipppssoot_cal.fits
The association file specified in SAA_DARK
The _raw files specified in that association file
As specified in the _cal file header:
SAACNTAB
PEDSBTAB
FLATFILE
As specified in the post-SAA exposure file headers:
MASKFILE
SAADFILE
OUTPUT FILES & EFFECTS:
The ipppssoot_cal.fits file may be replaced.
The SAADONE keyword in the ipppssoot_cal.fits file is updated.
The BEPDONE keyword in the ipppssoot_cal.fits file is updated.
The ipppssoot_trl.txt file is appended to.
INTERIM FILES:
A _psb.fits file is created temporarily, but removed by the script.
A _ped2.fits file is created temporarily, but removed by the script.
@author: <NAME>, <NAME>
@version: 0.4 (3-Jul-2006)
0.5 (13-Aug-2008)
1.0 (26-Jan-2009)
1.1 (29-Jan-2009)
1.2 (25-Mar-2009)
1.3 (15-Jun-2010)
1.4.2 (5-NOv-2013) MLS: changed return codes for opus
"""
from __future__ import print_function
import os,time,sys
from pyraf import iraf
from iraf import stsdas, hst_calib, nicmos,ctools
from iraf import saaclean
from nictools import nic_rem_persist
from astropy.io import fits as pyfits
import numpy as N
__version__ = '1.4.2'
__vdate__ = '25-Nov-2013'
__trlmarker__ = '*** CALNIC RUNCALSAA Processing Version %s %s ***\n'%(__version__,__vdate__)
"""
These return codes have been changed as requested by opus so that they can detect a return
value of 1 as a real error for the shell script, see #1078
"""
_success = 0
_none = 4
_error = 1
_abort = 3
# Constants relevant to saaclean
statdict_saaclean = {'none':_none,'low only':_success,'high only':_success,
'both':_success,'n/a':_none,'aborted':_abort}
donestring = {_none:'OMITTED',_success:'PERFORMED',_abort:'SKIPPED',
_error:'SKIPPED'}
def run(rawname,debug=False):
#............................................................
# Setup
#............................................................
saadone = _none
bepdone = _none
if '_raw' not in rawname:
print("""ERROR: this script takes ipppssoot_raw.fits file as input:
you provided %s"""%rawname)
return
# Define file names
calname = rawname.replace('_raw','_cal')
pedname = rawname.replace('_raw','_ped')
pedname2 = rawname.replace('_raw','_ped2')
outname = rawname.replace('_raw','_scn_applied')
saapername = rawname.replace('_raw','_spr')
pedtrlname = rawname.replace('_raw.fits','_pedsb_trl.txt')
F_A = calname
F_B = pedname
F_C = outname
F_D = pedname2
# Establish connection to the trailer file
trlname = rawname.replace('_raw.fits','_trl.txt')
Trl = open( trlname,'a')
Trl.write(_timestamp('RUNCALSAA starting'))
Trl.write(__trlmarker__)
# Open the calfile header and determine whether the script should run
f = pyfits.open(calname)
prihdr = f[0].header
# Get some things from the calfile header
saaparname = f[0].header['saacntab']
pedparname = f[0].header['pedsbtab']
camera = f[0].header['camera']
# Trap the case where no PEDSBTAB was provided, as this reference file is
# required for running PEDSUB.
if pedparname == 'N/A':
# No PEDSUB reference file, so turn off all processing.
dosaa=False
saadone=_abort
dobep=False
bepdone=_abort
else:
if 'saacorr' in prihdr:
dosaa = (prihdr['saacorr'] == 'PERFORM')
else:
dosaa = False
saadone = _abort
if 'bepcorr' in prihdr:
dobep = (prihdr['bepcorr'] == 'PERFORM')
else:
dobep = False
bepdone = _abort
if ((dosaa or dobep) and (f[0].header['flatdone'] == 'PERFORMED') and (f[0].header['flatfile'] != 'N/A')):
pass # keep running
else:
Trl.write(_timestamp('RUNCALSAA omitted'))
Trl.close()
set_keys_final( _abort, _abort, F_A, donestring, saapername)
# No files to delete
f.close()
return _none
f.close()
try: # get pedsub pars for SAACLEAN, BEP, or both
kwpars = get_pedsub_pars( camera, pedparname, Trl, F_A, saapername, debug=debug)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring, saapername )
# no copy to final as it already is cal, no files to delete
return _abort
if (dosaa):
if (f[0].header['saadone'] == 'PERFORMED'):
saadone = _abort
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
else: # f[0].header['saadone'] != 'PERFORMED'):
try: # for do_pedsub
do_pedsub(pedparname, Trl, pedtrlname, F_A, F_B, kwpars, saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final( _abort, _abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
saadone, F_S1 = do_saaclean(F_B, F_A, F_C, trlname, saaparname, camera, saapername, Trl, debug=debug)
else: # dosaa is False
F_S1 = F_A # set file that is the final for 'stage 1' to file F_A
if (dobep):
try:
do_pedsub(pedparname, Trl, pedtrlname, F_S1, F_D, kwpars,saapername)
except Exception as e:
handle_exception(e, Trl, [], debug = debug)
set_keys_final(_abort,_abort, F_A, donestring,saapername )
# no copy to final as it already is cal, no files to delete
return _abort
bepdone, F_Final = do_bright_ep( F_D, F_S1, Trl, donestring, debug=debug )
else: # dobep is False
F_Final = F_S1
set_keys_final(saadone, bepdone, F_S1, donestring, saapername)
os.rename( F_Final, calname)
Trl.write(_timestamp('RUNCALSAA completed'))
Trl.close()
return _success
def set_keys_final(saadone, bepdone, F_Final, donestring, saapername):
""" Set values for saadone and bepdone in the final cal file
@param saadone: value of key SAADONE
@type saadone: string
@param bepdone: value of key BEPDONE
@type bepdone: string
@param F_Final: name of final cal file
@type F_Final: string
@param donestring: mapping of strings for done keys
@type donestring: dict
@param saapername: name of persistence model created by SAACLEAN
@type saapername: string
"""
fh = pyfits.open( F_Final, mode = 'update' )
fh[0].header.update('saadone',donestring[saadone])
fh[0].header.update('bepdone',donestring[bepdone])
if saapername != None:
fh[0].header.update('SAACRMAP',saapername)
fh.close()
def get_pedsub_pars( camera, pedparname, Trl, pedsub_file, saapername, debug=False ):
""" Get keyword parameter values for pedsub
@param camera: camera number
@type camera: int
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedsub_file: name of file with pedsub pars
@type pedsub_file: string
@param saapername: name of file for SAA persistence image
@type saapername: string
@return: kwpars
@rtype: dict
"""
# Get params from the pedsubtab
try:
kwpars = getkwpars(camera,iraf.osfn(pedparname))
except Exception as e:
set_keys_final(_error,_error, pedsub_file, donestring,saapername)
handle_exception(e, Trl, [], debug = debug)
return _error
return kwpars
def do_pedsub( pedparname, Trl, pedtrlname, file_1, file_2, kwpars, saapername):
""" Call pedsub
@param pedparname: parameter file name
@type pedparname: string
@param Trl: trailer file name
@type Trl: string
@param pedtrlname: pedsub's trailer file name
@type pedtrlname: string
@param file_1: name of input cal file
@type file_1: string
@param file_2: name of output ped file
@type file_2: string
@param kwpars: keyword params for pedsub
@type kwpars: dict
@param saapername: name of file for SAA persistence image
@type saapername: string
"""
pedsub_complete='=== PEDSUB finished'
# Timestamp the trailer file
Trl.write(_timestamp('PEDSUB starting with paramas from %s'%pedparname))
# Run pedsub with output directed to special file
iraf.flprcache()
iraf.pedsub.unlearn()
iraf.pedsub(input = file_1, output = file_2, Stdout = pedtrlname, **kwpars)
# Examine task output & append to trailer file
pedout = open( pedtrlname )
for line in pedout:
Trl.write( line )
pedout.close()
os.remove(pedtrlname)
if not line.startswith(pedsub_complete):
raise PedsubError
def do_saaclean( calcimage, targimage, output, trlname, saaparname, camera, saapername, Trl, debug=False):
""" Call saaclean
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param trlname: trailer file name
@type trlname: string
@param saaparname: file name for SAACLEAN pars
@type saaparname: string
@param camera: camera number
@type camera: int
@param saapername: file name for SAACLEAN persistence
@type saapername: string
@param Trl: trailer file
@type Trl: string
@return: saadone, stage 1 file
@rtype: int, string
"""
Trl.write(_timestamp('SAACLEAN starting from pars in %s'%saaparname))
# Get the task parameters from the saacntab
try:
kwpars = getkwpars( camera,iraf.osfn(saaparname) )
except Exception as e:
handle_exception( e, Trl, [calcimage], debug=debug )
saadone = _error
return saadone, targimage
#
# Run the saaclean task
try:
iraf.saaclean.unlearn()
iraf.saaclean(calcimage = calcimage,
targimage = targimage,
output = output,
saaperfile = saapername,
Stderr = Trl, **kwpars)
retstat = statdict_saaclean[ iraf.saaclean.applied ]
if not debug:
if retstat == _abort:
saadone = _abort
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN aborted'))
if os.path.exists(output): os.remove(output)
elif retstat == _none:
saadone = _none
F_S1 = targimage # set file that is the final for 'stage 1' to file targimage
Trl.write(_timestamp('SAACLEAN omitted'))
if os.path.exists(output): os.remove(output)
else: # retstat is SUCCESS
saadone = _success
F_S1 = output # set file that is the final for 'stage 1'
Trl.write(_timestamp('SAACLEAN completed'))
fh_targ = pyfits.open(targimage, mode='update')
fh_targ[0].header.update(key = 'SAACRMAP', value = saapername )
fh_targ.close()
else:
saadone = retstat
if retstat == _abort or retstat == _none:
F_S1 = targimage
else:
F_S1 = output
os.rename( targimage,targimage.replace('_cal.','_orig_cal.'))
os.rename( output,targimage )
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
# Return end of phase 1 final file
return saadone, F_S1
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage) because 2nd pedsub will need to write to it
handle_exception(e, Trl, [calcimage, output], debug = debug)
saadone = _error
F_S1 = targimage
return saadone, targimage
def do_bright_ep( calcimage, targimage, Trl, donestring, debug=False):
""" Do bright earth persistence correction
@param calcimage: calc file name
@type calimage: string
@param targimage: target file name
@type targimage: string
@param Trl: trailer file name
@type Trl: string
@return: bepdone, final cal file
@rtype: int, string
"""
Trl.write(_timestamp('BEP starting' ))
# Run the nic_rem_persist task
try:
# When nic_rem_persist reset sys.stdout, IPython did not pick up on the
# change back when nrp.persist() completed, and shut down the entire IPython
# session when Trl.close() was called.
# We need to manage sys.stdout here to allow IPython to recognize that
# we are resetting it back before closing the Trl file.
sys.orig_stdout = sys.stdout
sys.stdout = Trl
nrp = nic_rem_persist.NicRemPersist( calcfile = calcimage, targfile = targimage, run_stdout = None) # set task's stdout to trailer file
nrp_stat = nrp.persist()
bepdone = nrp_stat
if (donestring[nrp_stat] == 'OMITTED'):
Trl.write(_timestamp('BEP aborted'))
elif (donestring[nrp_stat] == 'PERFORMED'):
Trl.write(_timestamp('BEP completed'))
else:
Trl.write(_timestamp('BEP skipped'))
# Set sys.stdout back to normal now that all Trl messages have been written out
sys.stdout = sys.orig_stdout
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
return bepdone, targimage
# If nic_rem_persist fails, we can't proceed. End with an error.
except Exception as e:
if os.path.exists( calcimage ):
os.remove( calcimage) # remove ped file (calcimage)
handle_exception(e, Trl, [calcimage], debug = debug)
# Reset sys.stdout back to normal...
sys.stdout = sys.orig_stdout
bepdone = _none
return bepdone, targimage
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PedsubError(Exception):
def __str__(self):
return "PEDSUB ended with error"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Utility functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def handle_exception(e,trl,files_to_delete,debug=False):
""" Print various useful information to various useful places """
print(str(e))
trl.write(_timestamp("Encountered exception"))
trl.write(str(e))
if not debug:
trl.write('\n Cleaning up interim files \n')
#Clean up files
for fname in files_to_delete:
if os.path.isfile(fname):
os.remove(fname)
trl.write(_timestamp('RUNCALSAA completed with errors'))
def getkwpars(camera,parname):
"""Extract the correct row of the parameter file based on the
value of CAMERA. Parameters are returned as a keyword:value
dictionary."""
d={}
f=pyfits.open(parname)
t=f[1].data
cols=f[1].columns
# Pick out the matching row of the "camera" column.
cams = t.field('camera')
idx = N.where(cams == camera)[0][0]
#..........................^^^^^^
# (The ugly [0][0] syntax is because numarray.where returns
# a tuple of arrays, and in this case we just want the
# actual scalar value that can be used to index the other
# columns in the table).
for k in cols:
d[k.name] = t.field(k.name)[idx]
del d['camera']
f.close()
return d
def _timestamp(_process_name):
"""Create formatted time string recognizable by OPUS."""
_prefix = time.strftime("\n%Y%j%H%M%S-I-----",time.localtime())
_lenstr = 60 - len(_process_name)
return _prefix+_process_name+(_lenstr*'-')+'\n'
def _getTime():
# Format time values for keywords IRAF-TLM, and DATE
_ltime = time.localtime(time.time())
time_str = time.strftime('%H:%M:%S (%d-%b-%Y)',_ltime)
return time_str
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run from the shell.
if __name__ == '__main__':
# Look for debug flag
debug = " -d " in sys.argv
# Handle arguments
if len(sys.argv) > 3 or len(sys.argv) < 2:
print("syntax: runcalsaa.py [-d] inputfilename")
sys.exit(_error)
rawname = sys.argv[-1]
# Run script with error checking
try:
retstat = run(rawname,debug=debug)
except Exception as e:
print(str(e))
print("ERROR: RUNCALSAA failed on %s"%rawname)
retstat = _error
# Return status
sys.exit(retstat)
|
[
"pyraf.iraf.flprcache",
"os.remove",
"os.rename",
"nictools.nic_rem_persist.NicRemPersist",
"os.path.exists",
"time.strftime",
"pyraf.iraf.pedsub",
"pyraf.iraf.saaclean.unlearn",
"time.time",
"os.path.isfile",
"numpy.where",
"time.localtime",
"astropy.io.fits.open",
"pyraf.iraf.pedsub.unlearn",
"pyraf.iraf.osfn",
"pyraf.iraf.saaclean",
"sys.exit"
] |
[((3892, 3912), 'astropy.io.fits.open', 'pyfits.open', (['calname'], {}), '(calname)\n', (3903, 3912), True, 'from astropy.io import fits as pyfits\n'), ((6895, 6922), 'os.rename', 'os.rename', (['F_Final', 'calname'], {}), '(F_Final, calname)\n', (6904, 6922), False, 'import os, time, sys\n'), ((7553, 7588), 'astropy.io.fits.open', 'pyfits.open', (['F_Final'], {'mode': '"""update"""'}), "(F_Final, mode='update')\n", (7564, 7588), True, 'from astropy.io import fits as pyfits\n'), ((9460, 9476), 'pyraf.iraf.flprcache', 'iraf.flprcache', ([], {}), '()\n', (9474, 9476), False, 'from pyraf import iraf\n'), ((9481, 9502), 'pyraf.iraf.pedsub.unlearn', 'iraf.pedsub.unlearn', ([], {}), '()\n', (9500, 9502), False, 'from pyraf import iraf\n'), ((9507, 9576), 'pyraf.iraf.pedsub', 'iraf.pedsub', ([], {'input': 'file_1', 'output': 'file_2', 'Stdout': 'pedtrlname'}), '(input=file_1, output=file_2, Stdout=pedtrlname, **kwpars)\n', (9518, 9576), False, 'from pyraf import iraf\n'), ((9746, 9767), 'os.remove', 'os.remove', (['pedtrlname'], {}), '(pedtrlname)\n', (9755, 9767), False, 'import os, time, sys\n'), ((15999, 16019), 'astropy.io.fits.open', 'pyfits.open', (['parname'], {}), '(parname)\n', (16010, 16019), True, 'from astropy.io import fits as pyfits\n'), ((16925, 16969), 'time.strftime', 'time.strftime', (['"""%H:%M:%S (%d-%b-%Y)"""', '_ltime'], {}), "('%H:%M:%S (%d-%b-%Y)', _ltime)\n", (16938, 16969), False, 'import os, time, sys\n'), ((17587, 17604), 'sys.exit', 'sys.exit', (['retstat'], {}), '(retstat)\n', (17595, 17604), False, 'import os, time, sys\n'), ((10896, 10919), 'pyraf.iraf.saaclean.unlearn', 'iraf.saaclean.unlearn', ([], {}), '()\n', (10917, 10919), False, 'from pyraf import iraf\n'), ((10928, 11047), 'pyraf.iraf.saaclean', 'iraf.saaclean', ([], {'calcimage': 'calcimage', 'targimage': 'targimage', 'output': 'output', 'saaperfile': 'saapername', 'Stderr': 'Trl'}), '(calcimage=calcimage, targimage=targimage, output=output,\n saaperfile=saapername, Stderr=Trl, **kwpars)\n', (10941, 11047), False, 'from pyraf import iraf\n'), ((12489, 12509), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (12498, 12509), False, 'import os, time, sys\n'), ((13888, 13978), 'nictools.nic_rem_persist.NicRemPersist', 'nic_rem_persist.NicRemPersist', ([], {'calcfile': 'calcimage', 'targfile': 'targimage', 'run_stdout': 'None'}), '(calcfile=calcimage, targfile=targimage,\n run_stdout=None)\n', (13917, 13978), False, 'from nictools import nic_rem_persist\n'), ((14484, 14509), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (14498, 14509), False, 'import os, time, sys\n'), ((16687, 16703), 'time.localtime', 'time.localtime', ([], {}), '()\n', (16701, 16703), False, 'import os, time, sys\n'), ((16897, 16908), 'time.time', 'time.time', ([], {}), '()\n', (16906, 16908), False, 'import os, time, sys\n'), ((17299, 17315), 'sys.exit', 'sys.exit', (['_error'], {}), '(_error)\n', (17307, 17315), False, 'import os, time, sys\n'), ((8418, 8439), 'pyraf.iraf.osfn', 'iraf.osfn', (['pedparname'], {}), '(pedparname)\n', (8427, 8439), False, 'from pyraf import iraf\n'), ((10673, 10694), 'pyraf.iraf.osfn', 'iraf.osfn', (['saaparname'], {}), '(saaparname)\n', (10682, 10694), False, 'from pyraf import iraf\n'), ((12698, 12723), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (12712, 12723), False, 'import os, time, sys\n'), ((14525, 14545), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (14534, 14545), False, 'import os, time, sys\n'), ((14717, 14742), 'os.path.exists', 'os.path.exists', (['calcimage'], {}), '(calcimage)\n', (14731, 14742), False, 'import os, time, sys\n'), ((15686, 15707), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (15700, 15707), False, 'import os, time, sys\n'), ((16154, 16177), 'numpy.where', 'N.where', (['(cams == camera)'], {}), '(cams == camera)\n', (16161, 16177), True, 'import numpy as N\n'), ((11467, 11489), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (11481, 11489), False, 'import os, time, sys\n'), ((12450, 12478), 'os.rename', 'os.rename', (['output', 'targimage'], {}), '(output, targimage)\n', (12459, 12478), False, 'import os, time, sys\n'), ((12739, 12759), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (12748, 12759), False, 'import os, time, sys\n'), ((14758, 14778), 'os.remove', 'os.remove', (['calcimage'], {}), '(calcimage)\n', (14767, 14778), False, 'import os, time, sys\n'), ((15725, 15741), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (15734, 15741), False, 'import os, time, sys\n'), ((11491, 11508), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (11500, 11508), False, 'import os, time, sys\n'), ((11750, 11772), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (11764, 11772), False, 'import os, time, sys\n'), ((12027, 12064), 'astropy.io.fits.open', 'pyfits.open', (['targimage'], {'mode': '"""update"""'}), "(targimage, mode='update')\n", (12038, 12064), True, 'from astropy.io import fits as pyfits\n'), ((11774, 11791), 'os.remove', 'os.remove', (['output'], {}), '(output)\n', (11783, 11791), False, 'import os, time, sys\n')]
|
"""
MongoKeyValueStorageImpl
"""
from waves_gateway.common import Injectable, KEY_VALUE_STORAGE_COLLECTION
from waves_gateway.model import PollingState
from waves_gateway.serializer import PollingStateSerializer
from waves_gateway.storage.key_value_storage import KeyValueStorage
from pymongo.collection import Collection # type: ignore
from typing import Optional, Any
from doc_inherit import method_doc_inherit # type: ignore
@Injectable(deps=[KEY_VALUE_STORAGE_COLLECTION, PollingStateSerializer], provides=KeyValueStorage)
class MongoKeyValueStorageImpl(KeyValueStorage):
"""
Implements a key value storage with a MongoDB collection.
"""
_COIN_BLOCK_HEIGHT_KEY = 'coin_block_height'
_WAVES_BLOCK_HEIGHT_KEY = 'waves_block_height'
_VALUE_PROPERTY_KEY = 'value'
_KEY_PROPERTY_KEY = 'key'
_COIN_POLLING_STATE_KEY = 'coin_polling_state'
_WAVES_POLLING_STATE_KEY = 'waves_polling_state'
def _set_value(self, key: str, value: Any) -> None:
"""
Inserts the key/value pair. Overwrites existing entries.
"""
query = dict()
query[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
replacement = dict()
replacement[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
replacement[MongoKeyValueStorageImpl._VALUE_PROPERTY_KEY] = value
self._collection.replace_one(filter=query, replacement=replacement, upsert=True)
def _get_value(self, key: str) -> Any:
"""
Returns the value or None if no value was found.
"""
query = dict()
query[MongoKeyValueStorageImpl._KEY_PROPERTY_KEY] = key
query_result = self._collection.find_one(filter=query)
if query_result is None:
return None
else:
return query_result[MongoKeyValueStorageImpl._VALUE_PROPERTY_KEY]
@method_doc_inherit
def set_last_checked_waves_block_height(self, block_height: int) -> None:
self._set_value(MongoKeyValueStorageImpl._WAVES_BLOCK_HEIGHT_KEY, block_height)
@method_doc_inherit
def get_last_checked_waves_block_height(self) -> Optional[int]:
return self._get_value(MongoKeyValueStorageImpl._WAVES_BLOCK_HEIGHT_KEY)
def __init__(self, collection: Collection, polling_state_serializer: PollingStateSerializer) -> None:
self._collection = collection
self._polling_state_serializer = polling_state_serializer
@method_doc_inherit
def set_last_checked_coin_block_height(self, block_height: int) -> None:
self._set_value(MongoKeyValueStorageImpl._COIN_BLOCK_HEIGHT_KEY, block_height)
@method_doc_inherit
def get_last_checked_coin_block_height(self) -> Optional[int]:
return self._get_value(MongoKeyValueStorageImpl._COIN_BLOCK_HEIGHT_KEY)
def set_waves_polling_state(self, polling_state: PollingState) -> None:
self._set_value(MongoKeyValueStorageImpl._WAVES_POLLING_STATE_KEY,
self._polling_state_serializer.as_dict(polling_state))
def get_coin_polling_state(self) -> Optional[PollingState]:
data = self._get_value(MongoKeyValueStorageImpl._COIN_POLLING_STATE_KEY)
if data is None:
return None
else:
return self._polling_state_serializer.from_dict(data)
def get_waves_polling_state(self) -> Optional[PollingState]:
data = self._get_value(MongoKeyValueStorageImpl._WAVES_POLLING_STATE_KEY)
if data is None:
return None
else:
return self._polling_state_serializer.from_dict(data)
def set_coin_polling_state(self, polling_state: PollingState) -> None:
self._set_value(MongoKeyValueStorageImpl._COIN_POLLING_STATE_KEY,
self._polling_state_serializer.as_dict(polling_state))
|
[
"waves_gateway.common.Injectable"
] |
[((433, 534), 'waves_gateway.common.Injectable', 'Injectable', ([], {'deps': '[KEY_VALUE_STORAGE_COLLECTION, PollingStateSerializer]', 'provides': 'KeyValueStorage'}), '(deps=[KEY_VALUE_STORAGE_COLLECTION, PollingStateSerializer],\n provides=KeyValueStorage)\n', (443, 534), False, 'from waves_gateway.common import Injectable, KEY_VALUE_STORAGE_COLLECTION\n')]
|
"""
Author: <NAME>
Date : 12/4/19
Brief : Handles the pareto frontier dictionary updates and accessing
Notes :
Copyright 2019 California Institute of Technology. ALL RIGHTS RESERVED.
U.S. Government Sponsorship acknowledged.
"""
import collections
from collections import Mapping
import copy
import json
import numpy
from operator import add
import os
import threading
import string
import random
import yaml
class DataDict(object):
_FLAG_FIRST = object()
def __init__(self, fitness_metrics=[], maximize=True, amount_per_bin=1, history_log=""):
self.fitness_metrics = fitness_metrics
self.maximize = maximize
self.amount_per_bin = amount_per_bin
self.dictionary = self.create_initial()
self.trial_count = 0
self.trial_count_lock = threading.Lock()
self.track_history = history_log is not None and len(history_log) > 0
self.history_log = history_log
def get_dictionary(self):
"""
:return:
"""
return self.dictionary
def update_from_datadict(self, other):
"""
:param other:
:return:
"""
self.deep_update(self.dictionary, other)
def add_trials(self, trials):
self.trial_count_lock.acquire()
self.trial_count += trials
self.trial_count_lock.release()
def update_from_population(self, population=[]):
"""
:param population:
:return:
"""
updated_individuals = []
def update(_dict={}, key_path=[], value=None):
_sub = _dict
for index, item in enumerate(key_path):
if item in _sub:
if index == len(key_path) - 1:
items = _sub[item]
if not items:
_sub[item] = [value]
else:
items.append(value)
items.sort(key=lambda x: x['metrics'][key_path[-1]], reverse=self.maximize)
_sub[item] = items[:self.amount_per_bin]
if any(x['uuid'] == value['uuid'] for x in _sub[item]):
updated_individuals.append(value)
else:
_sub = _sub[item]
return _dict
for individual in population:
if self.has_metrics(individual):
key_path = self.get_corresponding_bin(individual)
self.dictionary = update(_dict=self.dictionary, key_path=key_path, value=individual)
if self.track_history and len(updated_individuals) > 0:
for new_item in updated_individuals:
with open(self.history_log, "a") as f:
f.write(str(self.trial_count) + ": " + str(new_item['metrics']) + "\n")
return self.dictionary, updated_individuals
def has_metrics(self, individual):
"""
:param individual:
:return:
"""
individual_metrics = individual.get('metrics')
if not individual_metrics:
return False
for metrics in self.fitness_metrics:
if individual_metrics.get(metrics.name) is None:
return False
return True
def update_from_previous_run(self, files):
"""
:param files:
:return:
"""
population = []
for file in files:
population.append(yaml.safe_load(open(file)))
self.update_from_population(population)
def create_initial(self):
"""
name, fixed_axis, axis_range, index
fitness_metrics = [Metric(name='banana', axis_range=[0, 1],index=0, partitions=10),
Metric(name='sinc', axis_range=[0,100], index=1, partitions=20),
Metric(name='foo', axis_range=[2.5, math.pi], index=2, partitions=20)] <-- last in list is free axis
datadict = {'banana':
{0:{'sinc':{
0: {'foo': []},
100: {'foo': []}
}
},
1:{'sinc:{
0: {'foo': []},
100: {'foo': []}}
}
}
"""
input_arr = copy.deepcopy(self.fitness_metrics)
if not input_arr:
raise Exception("No metrics exist\nName metrics inside the Metrics: fitness: section in the run_config yml")
def helper(dictionary, array):
_dict = {}
if not array:
return dictionary
_ = array[-1]
if not _.fixed_axis:
_dict[_.name] = []
return helper(_dict, array[:-1])
else:
_range = _.axis_range
partitions = array[-1].partitions
#Solve fencepost problem here
#We need N bins so we create N+1 evenly spaced fenceposts with numpy.linspace
#Only need left endpoint of each bin, so throwaway the last one
bin_labels = numpy.linspace(min(_range), max(_range), num=partitions+1)[:-1]
_dict[_.name] = {round(el, 2): dictionary for el in bin_labels}
return helper(_dict, array[:-1])
return json.loads(json.dumps(helper({}, input_arr)))
def serialize(self, basedir):
"""
:param basedir:
:return:
"""
population = []
def walk(node, best_dir):
for key, item in node.items():
if isinstance(item, dict):
walk(item, best_dir)
else:
if item:
for i in item:
population.append(i)
walk(self.dictionary, basedir)
return population
def deep_update(self, source, overrides):
"""
:param source:
:param overrides:
:return:
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = self.deep_update(source.get(key, {}), value)
source[key] = returned
else:
items = []
if source.get(key):
items = source[key]
items.extend(overrides[key])
items = sorted(items, key=lambda x: x['metrics'][key], reverse=self.maximize)
items = items[:self.amount_per_bin]
source[key] = items
return source
def get_corresponding_bin(self, individual):
"""
:param individual:
:return:
"""
key_path = []
_dict = self.dictionary
for metric in self.fitness_metrics:
_dict = _dict[metric.name]
key_path.append(metric.name)
if metric.fixed_axis:
# get the bins for this value and sort by float if they're stored as strings for some reason
bins = sorted([float(i) for i in list(_dict.keys())])
_bin = bins[0]
for _ in bins:
if individual['metrics'][metric.name] > _:
_bin = _
_dict = _dict[str(_bin)]
key_path.append(str(_bin))
else:
return key_path
def flatten_dict(self, d):
"""
:param d:
:param join:
:param lift:
:return:
"""
results = []
def visit(subdict, results, partialKey):
for k, v in subdict.items():
newKey = partialKey + (k,)
if isinstance(v, Mapping):
visit(v, results, newKey)
else:
results.append((newKey, v))
empty_key = ()
visit(d, results, empty_key)
return results
def get_non_empty_bins(self):
"""
:return:
"""
self._FLAG_FIRST = object()
original = dict(self.flatten_dict(self.dictionary))
filtered = {k: v for k, v in original.items() if len(v) > 0}
return filtered
def _get_best_metric(self, trials):
trials = sorted(trials, key=lambda x : x['metrics'][self.fitness_metrics[-1].name], reverse=self.maximize)
best = trials[0]
return best['metrics'][self.fitness_metrics[-1].name]
def get_points(self):
"""
:return:
"""
self._FLAG_FIRST = object()
flattened = self.flatten_dict(self.dictionary)
points = []
for key, trials in flattened:
if trials:
i = self._get_best_metric(trials)
else:
i = None
if len(key) > 1:
points.append((key[-2], i))
else:
points.append((key[-1], i))
return points
|
[
"threading.Lock",
"copy.deepcopy"
] |
[((797, 813), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (811, 813), False, 'import threading\n'), ((4253, 4288), 'copy.deepcopy', 'copy.deepcopy', (['self.fitness_metrics'], {}), '(self.fitness_metrics)\n', (4266, 4288), False, 'import copy\n')]
|
import gym
import numpy as np
from gym_UR3.envs.mujoco import MujocoUR3Env
import time
def main():
env = gym.make('UR3-v0')
Da = env.action_space.shape[0]
obs=env.reset()
start = time.time()
for i in range(100):
env.reset()
print('{}th episode'.format(i+1))
for j in range(100):
env.render()
# env.step(env.action_space.sample())
a = np.zeros(8)
a[:6] = 0.01*np.random.uniform(size = 6)
a[-1] = 1
a[-2] = 1
env.step(a)
end = time.time()
print('Done! {}'.format(end-start))
#action[0] : qpos[0] radian
#action[4] : qpos[4] radian
#action[5] : qpos[5] radian
#action[6] : qpos[7] radian인가?? 여튼 밑에 finger
#action[7] : qpos[11] radian인가?? 여튼 위에 finger
#action[8] : qpos[15] radian인가?? 여튼 가운데 finger
#action[9] : qpos[6] qpos[10] radian인가?? 여튼 밑, 위 finger 위아래로 벌어짐
if __name__=="__main__":
main()
|
[
"numpy.random.uniform",
"numpy.zeros",
"gym.make",
"time.time"
] |
[((115, 133), 'gym.make', 'gym.make', (['"""UR3-v0"""'], {}), "('UR3-v0')\n", (123, 133), False, 'import gym\n'), ((201, 212), 'time.time', 'time.time', ([], {}), '()\n', (210, 212), False, 'import time\n'), ((581, 592), 'time.time', 'time.time', ([], {}), '()\n', (590, 592), False, 'import time\n'), ((425, 436), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (433, 436), True, 'import numpy as np\n'), ((462, 487), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(6)'}), '(size=6)\n', (479, 487), True, 'import numpy as np\n')]
|
import datetime
from decimal import Decimal
import unittest
from qstrader.event import FillEvent, OrderEvent, SignalEvent
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.price_handler.base import AbstractTickPriceHandler
from qstrader.compat import queue
class PriceHandlerMock(AbstractTickPriceHandler):
def __init__(self):
pass
def get_best_bid_ask(self, ticker):
prices = {
"MSFT": (Decimal("50.28"), Decimal("50.31")),
"GOOG": (Decimal("705.46"), Decimal("705.46")),
"AMZN": (Decimal("564.14"), Decimal("565.14")),
}
return prices[ticker]
class PositionSizerMock(object):
def __init__(self):
pass
def size_order(self, portfolio, initial_order):
"""
This PositionSizerMock object simply modifies
the quantity to be 100 of any share transacted.
"""
initial_order.quantity = 100
return initial_order
class RiskManagerMock(object):
def __init__(self):
pass
def refine_orders(self, portfolio, sized_order):
"""
This RiskManagerMock object simply lets the
sized order through, creates the corresponding
OrderEvent object and adds it to a list.
"""
order_event = OrderEvent(
sized_order.ticker,
sized_order.action,
sized_order.quantity
)
return [order_event]
class TestSimpleSignalOrderFillCycleForPortfolioHandler(unittest.TestCase):
"""
Tests a simple Signal, Order and Fill cycle for the
PortfolioHandler. This is, in effect, a sanity check.
"""
def setUp(self):
"""
Set up the PortfolioHandler object supplying it with
$500,000.00 USD in initial cash.
"""
initial_cash = Decimal("500000.00")
events_queue = queue.Queue()
price_handler = PriceHandlerMock()
position_sizer = PositionSizerMock()
risk_manager = RiskManagerMock()
# Create the PortfolioHandler object from the rest
self.portfolio_handler = PortfolioHandler(
initial_cash, events_queue, price_handler,
position_sizer, risk_manager
)
def test_create_order_from_signal_basic_check(self):
"""
Tests the "_create_order_from_signal" method
as a basic sanity check.
"""
signal_event = SignalEvent("MSFT", "BOT")
order = self.portfolio_handler._create_order_from_signal(signal_event)
self.assertEqual(order.ticker, "MSFT")
self.assertEqual(order.action, "BOT")
self.assertEqual(order.quantity, 0)
def test_place_orders_onto_queue_basic_check(self):
"""
Tests the "_place_orders_onto_queue" method
as a basic sanity check.
"""
order = OrderEvent("MSFT", "BOT", 100)
order_list = [order]
self.portfolio_handler._place_orders_onto_queue(order_list)
ret_order = self.portfolio_handler.events_queue.get()
self.assertEqual(ret_order.ticker, "MSFT")
self.assertEqual(ret_order.action, "BOT")
self.assertEqual(ret_order.quantity, 100)
def test_convert_fill_to_portfolio_update_basic_check(self):
"""
Tests the "_convert_fill_to_portfolio_update" method
as a basic sanity check.
"""
fill_event_buy = FillEvent(
datetime.datetime.utcnow(), "MSFT", "BOT",
100, "ARCA", Decimal("50.25"), Decimal("1.00")
)
self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_buy)
# Check the Portfolio values within the PortfolioHandler
port = self.portfolio_handler.portfolio
self.assertEqual(port.cur_cash, Decimal("494974.00"))
# TODO: Finish this off and check it works via Interactive Brokers
fill_event_sell = FillEvent(
datetime.datetime.utcnow(), "MSFT", "SLD",
100, "ARCA", Decimal("50.25"), Decimal("1.00")
)
self.portfolio_handler._convert_fill_to_portfolio_update(fill_event_sell)
def test_on_signal_basic_check(self):
"""
Tests the "on_signal" method as a basic sanity check.
"""
signal_event = SignalEvent("MSFT", "BOT")
self.portfolio_handler.on_signal(signal_event)
ret_order = self.portfolio_handler.events_queue.get()
self.assertEqual(ret_order.ticker, "MSFT")
self.assertEqual(ret_order.action, "BOT")
self.assertEqual(ret_order.quantity, 100)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"qstrader.event.SignalEvent",
"qstrader.event.OrderEvent",
"decimal.Decimal",
"qstrader.portfolio_handler.PortfolioHandler",
"datetime.datetime.utcnow",
"qstrader.compat.queue.Queue"
] |
[((4581, 4596), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4594, 4596), False, 'import unittest\n'), ((1296, 1368), 'qstrader.event.OrderEvent', 'OrderEvent', (['sized_order.ticker', 'sized_order.action', 'sized_order.quantity'], {}), '(sized_order.ticker, sized_order.action, sized_order.quantity)\n', (1306, 1368), False, 'from qstrader.event import FillEvent, OrderEvent, SignalEvent\n'), ((1822, 1842), 'decimal.Decimal', 'Decimal', (['"""500000.00"""'], {}), "('500000.00')\n", (1829, 1842), False, 'from decimal import Decimal\n'), ((1866, 1879), 'qstrader.compat.queue.Queue', 'queue.Queue', ([], {}), '()\n', (1877, 1879), False, 'from qstrader.compat import queue\n'), ((2101, 2194), 'qstrader.portfolio_handler.PortfolioHandler', 'PortfolioHandler', (['initial_cash', 'events_queue', 'price_handler', 'position_sizer', 'risk_manager'], {}), '(initial_cash, events_queue, price_handler, position_sizer,\n risk_manager)\n', (2117, 2194), False, 'from qstrader.portfolio_handler import PortfolioHandler\n'), ((2416, 2442), 'qstrader.event.SignalEvent', 'SignalEvent', (['"""MSFT"""', '"""BOT"""'], {}), "('MSFT', 'BOT')\n", (2427, 2442), False, 'from qstrader.event import FillEvent, OrderEvent, SignalEvent\n'), ((2841, 2871), 'qstrader.event.OrderEvent', 'OrderEvent', (['"""MSFT"""', '"""BOT"""', '(100)'], {}), "('MSFT', 'BOT', 100)\n", (2851, 2871), False, 'from qstrader.event import FillEvent, OrderEvent, SignalEvent\n'), ((4253, 4279), 'qstrader.event.SignalEvent', 'SignalEvent', (['"""MSFT"""', '"""BOT"""'], {}), "('MSFT', 'BOT')\n", (4264, 4279), False, 'from qstrader.event import FillEvent, OrderEvent, SignalEvent\n'), ((3414, 3440), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3438, 3440), False, 'import datetime\n'), ((3482, 3498), 'decimal.Decimal', 'Decimal', (['"""50.25"""'], {}), "('50.25')\n", (3489, 3498), False, 'from decimal import Decimal\n'), ((3500, 3515), 'decimal.Decimal', 'Decimal', (['"""1.00"""'], {}), "('1.00')\n", (3507, 3515), False, 'from decimal import Decimal\n'), ((3760, 3780), 'decimal.Decimal', 'Decimal', (['"""494974.00"""'], {}), "('494974.00')\n", (3767, 3780), False, 'from decimal import Decimal\n'), ((3907, 3933), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3931, 3933), False, 'import datetime\n'), ((3975, 3991), 'decimal.Decimal', 'Decimal', (['"""50.25"""'], {}), "('50.25')\n", (3982, 3991), False, 'from decimal import Decimal\n'), ((3993, 4008), 'decimal.Decimal', 'Decimal', (['"""1.00"""'], {}), "('1.00')\n", (4000, 4008), False, 'from decimal import Decimal\n'), ((448, 464), 'decimal.Decimal', 'Decimal', (['"""50.28"""'], {}), "('50.28')\n", (455, 464), False, 'from decimal import Decimal\n'), ((466, 482), 'decimal.Decimal', 'Decimal', (['"""50.31"""'], {}), "('50.31')\n", (473, 482), False, 'from decimal import Decimal\n'), ((506, 523), 'decimal.Decimal', 'Decimal', (['"""705.46"""'], {}), "('705.46')\n", (513, 523), False, 'from decimal import Decimal\n'), ((525, 542), 'decimal.Decimal', 'Decimal', (['"""705.46"""'], {}), "('705.46')\n", (532, 542), False, 'from decimal import Decimal\n'), ((566, 583), 'decimal.Decimal', 'Decimal', (['"""564.14"""'], {}), "('564.14')\n", (573, 583), False, 'from decimal import Decimal\n'), ((585, 602), 'decimal.Decimal', 'Decimal', (['"""565.14"""'], {}), "('565.14')\n", (592, 602), False, 'from decimal import Decimal\n')]
|
import os
import numpy as np
from . import __file__ as filepath
__all__ = ["Inoue14"]
class Inoue14(object):
def __init__(self, scale_tau=1.):
"""
IGM absorption from Inoue et al. (2014)
Parameters
----------
scale_tau : float
Parameter multiplied to the IGM :math:`\tau` values (exponential
in the linear absorption fraction).
I.e., :math:`f_\mathrm{igm} = e^{-\mathrm{scale\_tau} \tau}`.
"""
self._load_data()
self.scale_tau = scale_tau
def _load_data(self):
path = os.path.join(os.path.dirname(filepath),'data')
#print path
LAF_file = os.path.join(path, 'LAFcoeff.txt')
DLA_file = os.path.join(path, 'DLAcoeff.txt')
data = np.loadtxt(LAF_file, unpack=True)
ix, lam, ALAF1, ALAF2, ALAF3 = data
self.lam = lam[:,np.newaxis]
self.ALAF1 = ALAF1[:,np.newaxis]
self.ALAF2 = ALAF2[:,np.newaxis]
self.ALAF3 = ALAF3[:,np.newaxis]
data = np.loadtxt(DLA_file, unpack=True)
ix, lam, ADLA1, ADLA2 = data
self.ADLA1 = ADLA1[:,np.newaxis]
self.ADLA2 = ADLA2[:,np.newaxis]
return True
@property
def NA(self):
"""
Number of Lyman-series lines
"""
return self.lam.shape[0]
def tLSLAF(self, zS, lobs):
"""
Lyman series, Lyman-alpha forest
"""
z1LAF = 1.2
z2LAF = 4.7
l2 = self.lam #[:, np.newaxis]
tLSLAF_value = np.zeros_like(lobs*l2).T
x0 = (lobs < l2*(1+zS))
x1 = x0 & (lobs < l2*(1+z1LAF))
x2 = x0 & ((lobs >= l2*(1+z1LAF)) & (lobs < l2*(1+z2LAF)))
x3 = x0 & (lobs >= l2*(1+z2LAF))
tLSLAF_value = np.zeros_like(lobs*l2)
tLSLAF_value[x1] += ((self.ALAF1/l2**1.2)*lobs**1.2)[x1]
tLSLAF_value[x2] += ((self.ALAF2/l2**3.7)*lobs**3.7)[x2]
tLSLAF_value[x3] += ((self.ALAF3/l2**5.5)*lobs**5.5)[x3]
return tLSLAF_value.sum(axis=0)
def tLSDLA(self, zS, lobs):
"""
Lyman Series, DLA
"""
z1DLA = 2.0
l2 = self.lam #[:, np.newaxis]
tLSDLA_value = np.zeros_like(lobs*l2)
x0 = (lobs < l2*(1+zS)) & (lobs < l2*(1.+z1DLA))
x1 = (lobs < l2*(1+zS)) & ~(lobs < l2*(1.+z1DLA))
tLSDLA_value[x0] += ((self.ADLA1/l2**2)*lobs**2)[x0]
tLSDLA_value[x1] += ((self.ADLA2/l2**3)*lobs**3)[x1]
return tLSDLA_value.sum(axis=0)
def tLCDLA(self, zS, lobs):
"""
Lyman continuum, DLA
"""
z1DLA = 2.0
lamL = 911.8
tLCDLA_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1DLA:
tLCDLA_value[x0] = 0.2113 * _pow(1.0+zS, 2) - 0.07661 * _pow(1.0+zS, 2.3) * _pow(lobs[x0]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0]/lamL, 2)
else:
x1 = lobs >= lamL*(1.+z1DLA)
tLCDLA_value[x0 & x1] = 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & x1]/lamL, (-3e-1)) - 0.02916 * _pow(lobs[x0 & x1]/lamL, 3)
tLCDLA_value[x0 & ~x1] =0.6340 + 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[x0 & ~x1]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[x0 & ~x1]/lamL, 2) - 0.2905 * _pow(lobs[x0 & ~x1]/lamL, (-3e-1))
return tLCDLA_value
def tLCLAF(self, zS, lobs):
"""
Lyman continuum, LAF
"""
z1LAF = 1.2
z2LAF = 4.7
lamL = 911.8
tLCLAF_value = np.zeros_like(lobs)
x0 = lobs < lamL*(1.+zS)
if zS < z1LAF:
tLCLAF_value[x0] = 0.3248 * (_pow(lobs[x0]/lamL, 1.2) - _pow(1.0+zS, -9e-1) * _pow(lobs[x0]/lamL, 2.1))
elif zS < z2LAF:
x1 = lobs >= lamL*(1+z1LAF)
tLCLAF_value[x0 & x1] = 2.545e-2 * (_pow(1.0+zS, 1.6) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 3.7))
tLCLAF_value[x0 & ~x1] = 2.545e-2 * _pow(1.0+zS, 1.6) * _pow(lobs[x0 & ~x1]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & ~x1]/lamL, 1.2) - 0.2496 * _pow(lobs[x0 & ~x1]/lamL, 2.1)
else:
x1 = lobs > lamL*(1.+z2LAF)
x2 = (lobs >= lamL*(1.+z1LAF)) & (lobs < lamL*(1.+z2LAF))
x3 = lobs < lamL*(1.+z1LAF)
tLCLAF_value[x0 & x1] = 5.221e-4 * (_pow(1.0+zS, 3.4) * _pow(lobs[x0 & x1]/lamL, 2.1) - _pow(lobs[x0 & x1]/lamL, 5.5))
tLCLAF_value[x0 & x2] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x2]/lamL, 2.1) + 0.2182 * _pow(lobs[x0 & x2]/lamL, 2.1) - 2.545e-2 * _pow(lobs[x0 & x2]/lamL, 3.7)
tLCLAF_value[x0 & x3] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[x0 & x3]/lamL, 2.1) + 0.3248 * _pow(lobs[x0 & x3]/lamL, 1.2) - 3.140e-2 * _pow(lobs[x0 & x3]/lamL, 2.1)
return tLCLAF_value
def full_IGM(self, z, lobs):
"""Get full Inoue IGM absorption
Parameters
----------
z : float
Redshift to evaluate IGM absorption
lobs : array
Observed-frame wavelength(s) in Angstroms.
Returns
-------
abs : array
IGM absorption
"""
tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs)
tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs)
### Upturn at short wavelengths, low-z
#k = 1./100
#l0 = 600-6/k
#clip = lobs/(1+z) < 600.
#tau_clip = 100*(1-1./(1+np.exp(-k*(lobs/(1+z)-l0))))
tau_clip = 0.
return np.exp(-self.scale_tau*(tau_LC + tau_LS + tau_clip))
def build_grid(self, zgrid, lrest):
"""Build a spline interpolation object for fast IGM models
Returns: self.interpolate
"""
from scipy.interpolate import CubicSpline
igm_grid = np.zeros((len(zgrid), len(lrest)))
for iz in range(len(zgrid)):
igm_grid[iz,:] = self.full_IGM(zgrid[iz], lrest*(1+zgrid[iz]))
self.interpolate = CubicSpline(zgrid, igm_grid)
def _pow(a, b):
"""C-like power, a**b
"""
return a**b
|
[
"numpy.zeros_like",
"scipy.interpolate.CubicSpline",
"os.path.dirname",
"numpy.exp",
"numpy.loadtxt",
"os.path.join"
] |
[((692, 726), 'os.path.join', 'os.path.join', (['path', '"""LAFcoeff.txt"""'], {}), "(path, 'LAFcoeff.txt')\n", (704, 726), False, 'import os\n'), ((746, 780), 'os.path.join', 'os.path.join', (['path', '"""DLAcoeff.txt"""'], {}), "(path, 'DLAcoeff.txt')\n", (758, 780), False, 'import os\n'), ((801, 834), 'numpy.loadtxt', 'np.loadtxt', (['LAF_file'], {'unpack': '(True)'}), '(LAF_file, unpack=True)\n', (811, 834), True, 'import numpy as np\n'), ((1063, 1096), 'numpy.loadtxt', 'np.loadtxt', (['DLA_file'], {'unpack': '(True)'}), '(DLA_file, unpack=True)\n', (1073, 1096), True, 'import numpy as np\n'), ((1829, 1853), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (1842, 1853), True, 'import numpy as np\n'), ((2263, 2287), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (2276, 2287), True, 'import numpy as np\n'), ((2758, 2777), 'numpy.zeros_like', 'np.zeros_like', (['lobs'], {}), '(lobs)\n', (2771, 2777), True, 'import numpy as np\n'), ((3663, 3682), 'numpy.zeros_like', 'np.zeros_like', (['lobs'], {}), '(lobs)\n', (3676, 3682), True, 'import numpy as np\n'), ((5708, 5762), 'numpy.exp', 'np.exp', (['(-self.scale_tau * (tau_LC + tau_LS + tau_clip))'], {}), '(-self.scale_tau * (tau_LC + tau_LS + tau_clip))\n', (5714, 5762), True, 'import numpy as np\n'), ((6186, 6214), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['zgrid', 'igm_grid'], {}), '(zgrid, igm_grid)\n', (6197, 6214), False, 'from scipy.interpolate import CubicSpline\n'), ((614, 639), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (629, 639), False, 'import os\n'), ((1583, 1607), 'numpy.zeros_like', 'np.zeros_like', (['(lobs * l2)'], {}), '(lobs * l2)\n', (1596, 1607), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
try:
from datetime import datetime, timezone
import pytz
except Exception as e:
print('Import error {}, check requirements.txt'.format(e))
sys.exit(1)
DATEFORMAT_MISCAN = '%Y-%m-%d %H:%M:%S'
DATEFORMAT_UTC = '%Y-%m-%dT%H:%M:%SZ'
LAST_TIMESTAMP = str(datetime.today().strftime(DATEFORMAT_UTC))
mi_timestamp = "{}-{}-{} {}:{}:{}".format(
2000 + 20,
9, 23,
12, 10,
5)
# current timestamp from the mi scale
mi_datetime = datetime.strptime(mi_timestamp,DATEFORMAT_MISCAN)
print(mi_datetime)
# convert this to utc time
utc = pytz.utc
mytz = pytz.timezone('Europe/Vaduz')
utc_dt = mytz.localize(mi_datetime)
print (utc_dt.astimezone(utc).strftime(DATEFORMAT_UTC))
|
[
"sys.path.append",
"datetime.datetime.today",
"datetime.datetime.strptime",
"pytz.timezone",
"sys.exit"
] |
[((59, 80), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (74, 80), False, 'import sys\n'), ((586, 636), 'datetime.datetime.strptime', 'datetime.strptime', (['mi_timestamp', 'DATEFORMAT_MISCAN'], {}), '(mi_timestamp, DATEFORMAT_MISCAN)\n', (603, 636), False, 'from datetime import datetime, timezone\n'), ((705, 734), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Vaduz"""'], {}), "('Europe/Vaduz')\n", (718, 734), False, 'import pytz\n'), ((237, 248), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (245, 248), False, 'import sys\n'), ((352, 368), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (366, 368), False, 'from datetime import datetime, timezone\n')]
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from converter.models import Transformation
from .literals import SOURCE_UNCOMPRESS_CHOICE_ASK
from .models import POP3Email, IMAPEmail, WatchFolderSource, WebFormSource
def create_default_document_source(sender, **kwargs):
if not WebFormSource.objects.count():
WebFormSource.objects.create(
label=_('Default'), uncompress=SOURCE_UNCOMPRESS_CHOICE_ASK
)
def copy_transformations_to_version(sender, **kwargs):
instance = kwargs['instance']
# TODO: Fix this, source should be previous version
# TODO: Fix this, shouldn't this be at the documents app
Transformation.objects.copy(
source=instance.document, targets=instance.pages.all()
)
def initialize_periodic_tasks(**kwargs):
for source in POP3Email.objects.filter(enabled=True):
source.save()
for source in IMAPEmail.objects.filter(enabled=True):
source.save()
for source in WatchFolderSource.objects.filter(enabled=True):
source.save()
|
[
"django.utils.translation.ugettext_lazy"
] |
[((423, 435), 'django.utils.translation.ugettext_lazy', '_', (['"""Default"""'], {}), "('Default')\n", (424, 435), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
import fileinput
import re
data = ''.join(fileinput.input()).split('\n')
def searchData(target):
return [d for d in data if re.search(target, d) is not None]
# part 1
targets = ['shiny gold']
searched = []
all_bags = []
converged = False
while not converged:
new_targets = []
for t in targets:
if t not in searched:
# search data
bags = searchData(t)
bags = [" ".join(b.split()[:2]) for b in bags]
# remove target
while t in bags: bags.remove(t)
searched.append(t)
if len(bags) > 0:
new_targets.extend(bags)
all_bags.extend(bags)
targets = new_targets
if len(targets) == 0:
converged = True
print(len(set(all_bags)))
# part 2
pattern1 = ' bags contain'
pattern2 = r'([0-9]+)\s([a-z]+\s[a-z]+)\sbag'
targets = [(1, 'shiny gold')]
n_bags = 0
converged = False
d_bags = {}
while not converged:
new_targets = []
for t in targets:
for d in searchData(t[1] + pattern1):
bags = d.split("contain ")[1].split(', ')
for b in bags:
m = re.match(pattern2, b)
if m:
n_bag, type_bag = m.groups()
n_bags += t[0]*int(n_bag)
new_targets.append((t[0]*int(n_bag), type_bag))
if len(new_targets) == 0:
converged = True
else:
targets = new_targets
print(n_bags)
|
[
"fileinput.input",
"re.search",
"re.match"
] |
[((46, 63), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (61, 63), False, 'import fileinput\n'), ((136, 156), 're.search', 're.search', (['target', 'd'], {}), '(target, d)\n', (145, 156), False, 'import re\n'), ((1214, 1235), 're.match', 're.match', (['pattern2', 'b'], {}), '(pattern2, b)\n', (1222, 1235), False, 'import re\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from icalendar import Calendar, Event
from datetime import datetime, timedelta
from lxml import html
import requests
import argparse
import logging
import secrets
def get_holidays_grouped_by_months(year):
page = requests.get(
"http://www.consultant.ru/law/ref/calendar/proizvodstvennye/{0}/".format(year)
)
if "404 Ресурс не найден!" in page.text:
return None
tree = html.fromstring(page.content)
months = tree.xpath("//th[@class='month']/../../..")
if len(months) != 12:
logging.warning(f"Number of months in {year} don't equal to 12")
holidays = []
for m in months:
holidays_in_month = m.xpath(
".//td[@class='holiday weekend' or @class='weekend' or @class='nowork']/text()"
)
holidays.append([int(day) for day in holidays_in_month])
return holidays
def create_dayoff_event(year, month, day_start, day_end):
event = Event()
event.add("summary", "Выходной")
event.add("dtstart", datetime(year, month, day_start, 0, 0, 0).date())
event.add(
"dtend", datetime(year, month, day_end, 0, 0, 0).date() + timedelta(days=1)
)
# UID is REQUIRED https://tools.ietf.org/html/rfc5545#section-3.6.1
uid = secrets.token_hex(64)
event.add("uid", uid)
return event
def generate_events(year, holidays_by_months):
import more_itertools as mit
events = []
for month, holidays in enumerate(holidays_by_months, start=1):
holidays_groups = [list(group) for group in mit.consecutive_groups(holidays)]
for g in holidays_groups:
e = create_dayoff_event(year, month, g[0], g[-1])
events.append(e)
return events
def parse_args():
parser = argparse.ArgumentParser(
description="This script fetches data about production calendar and generates .ics file with it."
)
default_output_file = "test.ics"
parser.add_argument(
"-o",
dest="output_file",
metavar="out",
default=default_output_file,
help="output file (default: {0})".format(default_output_file),
)
parser.add_argument(
"--start-year",
metavar="yyyy",
type=int,
default=datetime.today().year,
help="year calendar starts (default: current year)",
)
parser.add_argument(
"--end-year",
metavar="yyyy",
type=int,
default=(datetime.today().year + 1),
help="year calendar ends (default: next year)",
)
parser.add_argument("--log-level", metavar="level", default="INFO")
return parser.parse_args()
def generate_calendar(events):
cal = Calendar()
cal.add("prodid", "-//My calendar product//mxm.dk//")
cal.add("version", "2.0")
cal.add("NAME", "Производственный календарь")
cal.add("X-WR-CALNAME", "Производственный календарь")
for e in events:
cal.add_component(e)
return cal
def setup_logging(log_level):
logging_level = getattr(logging, log_level.upper(), None)
if not isinstance(logging_level, int):
raise ValueError("Invalid log level: {0}".format(log_level))
logging.basicConfig(
level=logging_level,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="[%d/%m/%Y:%H:%M:%S %z]",
)
if __name__ == "__main__":
args = parse_args()
setup_logging(args.log_level)
events = []
# (args.end_year + 1) because range() function doesn't include right margin
for year in range(args.start_year, args.end_year + 1, 1):
holidays_by_months = get_holidays_grouped_by_months(year)
if not holidays_by_months:
break
events += generate_events(year, holidays_by_months)
cal = generate_calendar(events)
with open(args.output_file, "w") as f:
f.write(cal.to_ical().decode("utf-8"))
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"datetime.datetime.today",
"logging.warning",
"more_itertools.consecutive_groups",
"secrets.token_hex",
"icalendar.Event",
"lxml.html.fromstring",
"datetime.datetime",
"datetime.timedelta",
"icalendar.Calendar"
] |
[((451, 480), 'lxml.html.fromstring', 'html.fromstring', (['page.content'], {}), '(page.content)\n', (466, 480), False, 'from lxml import html\n'), ((976, 983), 'icalendar.Event', 'Event', ([], {}), '()\n', (981, 983), False, 'from icalendar import Calendar, Event\n'), ((1284, 1305), 'secrets.token_hex', 'secrets.token_hex', (['(64)'], {}), '(64)\n', (1301, 1305), False, 'import secrets\n'), ((1781, 1913), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This script fetches data about production calendar and generates .ics file with it."""'}), "(description=\n 'This script fetches data about production calendar and generates .ics file with it.'\n )\n", (1804, 1913), False, 'import argparse\n'), ((2703, 2713), 'icalendar.Calendar', 'Calendar', ([], {}), '()\n', (2711, 2713), False, 'from icalendar import Calendar, Event\n'), ((3189, 3323), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging_level', 'format': '"""%(asctime)s [%(levelname)s] %(message)s"""', 'datefmt': '"""[%d/%m/%Y:%H:%M:%S %z]"""'}), "(level=logging_level, format=\n '%(asctime)s [%(levelname)s] %(message)s', datefmt='[%d/%m/%Y:%H:%M:%S %z]'\n )\n", (3208, 3323), False, 'import logging\n'), ((573, 637), 'logging.warning', 'logging.warning', (['f"""Number of months in {year} don\'t equal to 12"""'], {}), '(f"Number of months in {year} don\'t equal to 12")\n', (588, 637), False, 'import logging\n'), ((1177, 1194), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1186, 1194), False, 'from datetime import datetime, timedelta\n'), ((1046, 1087), 'datetime.datetime', 'datetime', (['year', 'month', 'day_start', '(0)', '(0)', '(0)'], {}), '(year, month, day_start, 0, 0, 0)\n', (1054, 1087), False, 'from datetime import datetime, timedelta\n'), ((1569, 1601), 'more_itertools.consecutive_groups', 'mit.consecutive_groups', (['holidays'], {}), '(holidays)\n', (1591, 1601), True, 'import more_itertools as mit\n'), ((2268, 2284), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2282, 2284), False, 'from datetime import datetime, timedelta\n'), ((1128, 1167), 'datetime.datetime', 'datetime', (['year', 'month', 'day_end', '(0)', '(0)', '(0)'], {}), '(year, month, day_end, 0, 0, 0)\n', (1136, 1167), False, 'from datetime import datetime, timedelta\n'), ((2465, 2481), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2479, 2481), False, 'from datetime import datetime, timedelta\n')]
|
"""
Work in progress for reading some other kind of complex NITF.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
from typing import Union, Tuple, List, Optional, Callable, Sequence
import copy
from datetime import datetime
import numpy
from scipy.constants import foot
from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf
from sarpy.geometry.latlon import num as lat_lon_parser
from sarpy.io.general.base import SarpyIOError
from sarpy.io.general.data_segment import DataSegment, SubsetSegment
from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction
from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader
from sarpy.io.general.nitf_elements.security import NITFSecurityTags
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0
from sarpy.io.general.nitf_elements.base import TREList
from sarpy.io.general.nitf_elements.tres.unclass.CMETAA import CMETAA
from sarpy.io.general.utils import is_file_like
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
TxFrequencyType, WaveformParametersType, ChanParametersType
from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.PFA import PFAType
logger = logging.getLogger(__name__)
_iso_date_format = '{}-{}-{}T{}:{}:{}'
# NB: DO NOT implement is_a() here.
# This will explicitly happen after other readers
########
# Define sicd structure from image sub-header information
def extract_sicd(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
transpose: True,
nitf_header: Optional[Union[NITFHeader, NITFHeader0]] = None) -> SICDType:
"""
Extract the best available SICD structure from relevant nitf header structures.
Parameters
----------
img_header : ImageSegmentHeader|ImageSegmentHeader0
transpose : bool
nitf_header : None|NITFHeader|NITFHeader0
Returns
-------
SICDType
"""
def get_collection_info() -> CollectionInfoType:
isorce = img_header.ISORCE.strip()
collector_name = None if len(isorce) < 1 else isorce
iid2 = img_header.IID2.strip()
core_name = img_header.IID1.strip() if len(iid2) < 1 else iid2
class_str = img_header.Security.CLAS
if class_str == 'T':
classification = 'TOPSECRET'
elif class_str == 'S':
classification = 'SECRET'
elif class_str == 'C':
classification = 'CONFIDENTIAL'
elif class_str == 'U':
classification = 'UNCLASSIFIED'
else:
classification = ''
ctlh = img_header.Security.CTLH.strip()
if len(ctlh) < 1:
classification += '//' + ctlh
code = img_header.Security.CODE.strip()
if len(code) < 1:
classification += '//' + code
return CollectionInfoType(
CollectorName=collector_name,
CoreName=core_name,
Classification=classification)
def get_image_data() -> ImageDataType:
pvtype = img_header.PVTYPE
if pvtype == 'C':
if img_header.NBPP != 64:
logger.warning(
'This NITF has complex bands that are not 64-bit.\n\t'
'This is not currently supported.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'R':
if img_header.NBPP == 64:
logger.warning(
'The real/imaginary data in the NITF are stored as 64-bit floating point.\n\t'
'The closest Pixel Type, RE32F_IM32F, will be used,\n\t'
'but there may be overflow issues if converting this file.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'SI':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pvtype))
if transpose:
rows = img_header.NCOLS
cols = img_header.NROWS
else:
rows = img_header.NROWS
cols = img_header.NCOLS
return ImageDataType(
PixelType=pixel_type,
NumRows=rows,
NumCols=cols,
FirstRow=0,
FirstCol=0,
FullImage=(rows, cols),
SCPPixel=(0.5 * rows, 0.5 * cols))
def append_country_code(cc) -> None:
if len(cc) > 0:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CountryCodes=[cc, ])
elif the_sicd.CollectionInfo.CountryCodes is None:
the_sicd.CollectionInfo.CountryCodes = [cc, ]
elif cc not in the_sicd.CollectionInfo.CountryCodes:
the_sicd.CollectionInfo.CountryCodes.append(cc)
def set_image_corners(icps: numpy.ndarray, override: bool = False) -> None:
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(ImageCorners=icps)
elif the_sicd.GeoData.ImageCorners is None or override:
the_sicd.GeoData.ImageCorners = icps
def set_arp_position(arp_ecf: numpy.ndarray, override: bool = False) -> None:
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType(ARPPos=arp_ecf)
elif override:
# prioritize this information first - it should be more reliable than other sources
the_sicd.SCPCOA.ARPPos = arp_ecf
def set_scp(scp_ecf: numpy.ndarray, scp_pixel: Union[numpy.ndarray, list, tuple], override: bool = False) -> None:
def set_scppixel():
if the_sicd.ImageData is None:
the_sicd.ImageData = ImageDataType(SCPPixel=scp_pixel)
else:
the_sicd.ImageData.SCPPixel = scp_pixel
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(SCP=SCPType(ECF=scp_ecf))
set_scppixel()
elif the_sicd.GeoData.SCP is None or override:
the_sicd.GeoData.SCP = SCPType(ECF=scp_ecf)
set_scppixel()
def set_collect_start(
collect_start: Union[str, datetime, numpy.datetime64], override: bool = False) -> None:
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType(CollectStart=collect_start)
elif the_sicd.Timeline.CollectStart is None or override:
the_sicd.Timeline.CollectStart = collect_start
def set_uvects(row_unit: numpy.ndarray, col_unit: numpy.ndarray) -> None:
if the_sicd.Grid is None:
the_sicd.Grid = GridType(
Row=DirParamType(UVectECF=row_unit),
Col=DirParamType(UVectECF=col_unit))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(UVectECF=row_unit)
elif the_sicd.Grid.Row.UVectECF is None:
the_sicd.Grid.Row.UVectECF = row_unit
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(UVectECF=col_unit)
elif the_sicd.Grid.Col.UVectECF is None:
the_sicd.Grid.Col.UVectECF = col_unit
def try_CMETAA() -> None:
# noinspection PyTypeChecker
tre = None if tres is None else tres['CMETAA'] # type: CMETAA
if tre is None:
return
cmetaa = tre.DATA
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType()
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType()
if the_sicd.Grid is None:
the_sicd.Grid = GridType()
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType()
if the_sicd.RadarCollection is None:
the_sicd.RadarCollection = RadarCollectionType()
if the_sicd.ImageFormation is None:
the_sicd.ImageFormation = ImageFormationType()
the_sicd.SCPCOA.SCPTime = 0.5*float(cmetaa.WF_CDP)
the_sicd.GeoData.SCP = SCPType(ECF=tre.get_scp())
the_sicd.SCPCOA.ARPPos = tre.get_arp()
the_sicd.SCPCOA.SideOfTrack = cmetaa.CG_LD.strip().upper()
the_sicd.SCPCOA.SlantRange = float(cmetaa.CG_SRAC)
the_sicd.SCPCOA.DopplerConeAng = float(cmetaa.CG_CAAC)
the_sicd.SCPCOA.GrazeAng = float(cmetaa.CG_GAAC)
the_sicd.SCPCOA.IncidenceAng = 90 - float(cmetaa.CG_GAAC)
if hasattr(cmetaa, 'CG_TILT'):
the_sicd.SCPCOA.TwistAng = float(cmetaa.CG_TILT)
if hasattr(cmetaa, 'CG_SLOPE'):
the_sicd.SCPCOA.SlopeAng = float(cmetaa.CG_SLOPE)
the_sicd.ImageData.SCPPixel = [int(cmetaa.IF_DC_IS_COL), int(cmetaa.IF_DC_IS_ROW)]
img_corners = tre.get_image_corners()
if img_corners is not None:
the_sicd.GeoData.ImageCorners = img_corners
if cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'S':
the_sicd.Grid.ImagePlane = 'SLANT'
elif cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'G':
the_sicd.Grid.ImagePlane = 'GROUND'
else:
logger.warning(
'Got unexpected CMPLX_SIGNAL_PLANE value {},\n\t'
'setting ImagePlane to SLANT'.format(cmetaa.CMPLX_SIGNAL_PLANE))
the_sicd.Grid.Row = DirParamType(
SS=float(cmetaa.IF_RSS),
ImpRespWid=float(cmetaa.IF_RGRES),
Sgn=1 if cmetaa.IF_RFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_RFFT_SAMP)/(float(cmetaa.IF_RSS)*float(cmetaa.IF_RFFT_TOT)))
the_sicd.Grid.Col = DirParamType(
SS=float(cmetaa.IF_AZSS),
ImpRespWid=float(cmetaa.IF_AZRES),
Sgn=1 if cmetaa.IF_AFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_AZFFT_SAMP)/(float(cmetaa.IF_AZSS)*float(cmetaa.IF_AZFFT_TOT)))
cmplx_weight = cmetaa.CMPLX_WEIGHT.strip().upper()
if cmplx_weight == 'UWT':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='UNIFORM')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='UNIFORM')
elif cmplx_weight == 'HMW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HAMMING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HAMMING')
elif cmplx_weight == 'HNW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HANNING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HANNING')
elif cmplx_weight == 'TAY':
the_sicd.Grid.Row.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_RNG_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_RNG_TAY_NBAR))})
the_sicd.Grid.Col.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_AZ_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_AZ_TAY_NBAR))})
else:
logger.warning(
'Got unsupported CMPLX_WEIGHT value {}.\n\tThe resulting SICD will '
'not have valid weight array populated'.format(cmplx_weight))
the_sicd.Grid.Row.define_weight_function()
the_sicd.Grid.Col.define_weight_function()
# noinspection PyBroadException
try:
date_str = cmetaa.T_UTC_YYYYMMMDD
time_str = cmetaa.T_HHMMSSUTC
date_time = _iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
time_str[:2], time_str[2:4], time_str[4:6])
the_sicd.Timeline.CollectStart = numpy.datetime64(date_time, 'us')
except Exception:
logger.info('Failed extracting start time from CMETAA')
pass
the_sicd.Timeline.CollectDuration = float(cmetaa.WF_CDP)
the_sicd.Timeline.IPP = [
IPPSetType(TStart=0,
TEnd=float(cmetaa.WF_CDP),
IPPStart=0,
IPPEnd=numpy.floor(float(cmetaa.WF_CDP)*float(cmetaa.WF_PRF)),
IPPPoly=[0, float(cmetaa.WF_PRF)])]
the_sicd.RadarCollection.TxFrequency = TxFrequencyType(
Min=float(cmetaa.WF_SRTFR),
Max=float(cmetaa.WF_ENDFR))
the_sicd.RadarCollection.TxPolarization = cmetaa.POL_TR.upper()
the_sicd.RadarCollection.Waveform = [WaveformParametersType(
TxPulseLength=float(cmetaa.WF_WIDTH),
TxRFBandwidth=float(cmetaa.WF_BW),
TxFreqStart=float(cmetaa.WF_SRTFR),
TxFMRate=float(cmetaa.WF_CHRPRT)*1e12)]
tx_rcv_pol = '{}:{}'.format(cmetaa.POL_TR.upper(), cmetaa.POL_RE.upper())
the_sicd.RadarCollection.RcvChannels = [
ChanParametersType(TxRcvPolarization=tx_rcv_pol)]
the_sicd.ImageFormation.TxRcvPolarizationProc = tx_rcv_pol
if_process = cmetaa.IF_PROCESS.strip().upper()
if if_process == 'PF':
the_sicd.ImageFormation.ImageFormAlgo = 'PFA'
scp_ecf = tre.get_scp()
fpn_ned = numpy.array(
[float(cmetaa.CG_FPNUV_X), float(cmetaa.CG_FPNUV_Y), float(cmetaa.CG_FPNUV_Z)], dtype='float64')
ipn_ned = numpy.array(
[float(cmetaa.CG_IDPNUVX), float(cmetaa.CG_IDPNUVY), float(cmetaa.CG_IDPNUVZ)], dtype='float64')
fpn_ecf = ned_to_ecf(fpn_ned, scp_ecf, absolute_coords=False)
ipn_ecf = ned_to_ecf(ipn_ned, scp_ecf, absolute_coords=False)
the_sicd.PFA = PFAType(FPN=fpn_ecf, IPN=ipn_ecf)
elif if_process in ['RM', 'CD']:
the_sicd.ImageFormation.ImageFormAlgo = 'RMA'
# the remainder of this is guesswork to define required fields
the_sicd.ImageFormation.TStartProc = 0 # guess work
the_sicd.ImageFormation.TEndProc = float(cmetaa.WF_CDP)
the_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=float(cmetaa.WF_SRTFR), MaxProc=float(cmetaa.WF_ENDFR))
# all remaining guess work
the_sicd.ImageFormation.STBeamComp = 'NO'
the_sicd.ImageFormation.ImageBeamComp = 'SV' if cmetaa.IF_BEAM_COMP[0] == 'Y' else 'NO'
the_sicd.ImageFormation.AzAutofocus = 'NO' if cmetaa.AF_TYPE[0] == 'N' else 'SV'
the_sicd.ImageFormation.RgAutofocus = 'NO'
def try_AIMIDA() -> None:
tre = None if tres is None else tres['AIMIDA']
if tre is None:
return
aimida = tre.DATA
append_country_code(aimida.COUNTRY.strip())
create_time = datetime.strptime(aimida.CREATION_DATE, '%d%b%y')
if the_sicd.ImageCreation is None:
the_sicd.ImageCreation = ImageCreationType(DateTime=create_time)
elif the_sicd.ImageCreation.DateTime is None:
the_sicd.ImageCreation.DateTime = create_time
collect_start = datetime.strptime(aimida.MISSION_DATE+aimida.TIME, '%d%b%y%H%M')
set_collect_start(collect_start, override=False)
def try_AIMIDB() -> None:
tre = None if tres is None else tres['AIMIDB']
if tre is None:
return
aimidb = tre.DATA
append_country_code(aimidb.COUNTRY.strip())
if the_sicd.ImageFormation is not None and the_sicd.ImageFormation.SegmentIdentifier is None:
the_sicd.ImageFormation.SegmentIdentifier = aimidb.CURRENT_SEGMENT.strip()
date_str = aimidb.ACQUISITION_DATE
collect_start = numpy.datetime64(_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
set_collect_start(collect_start, override=False)
def try_ACFT() -> None:
if tres is None:
return
tre = tres['ACFTA']
if tre is None:
tre = tres['ACFTB']
if tre is None:
return
acft = tre.DATA
sensor_id = acft.SENSOR_ID.strip()
if len(sensor_id) > 1:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CollectorName=sensor_id)
elif the_sicd.CollectionInfo.CollectorName is None:
the_sicd.CollectionInfo.CollectorName = sensor_id
row_ss = float(acft.ROW_SPACING)
col_ss = float(acft.COL_SPACING)
if hasattr(acft, 'ROW_SPACING_UNITS') and acft.ROW_SPACING_UNITS.strip().lower() == 'f':
row_ss *= foot
if hasattr(acft, 'COL_SPACING_UNITS') and acft.COL_SPACING_UNITS.strip().lower() == 'f':
col_ss *= foot
# NB: these values are actually ground plane values, and should be
# corrected to slant plane if possible
if the_sicd.SCPCOA is not None:
if the_sicd.SCPCOA.GrazeAng is not None:
col_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.GrazeAng))
if the_sicd.SCPCOA.TwistAng is not None:
row_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.TwistAng))
if the_sicd.Grid is None:
the_sicd.Grid = GridType(Row=DirParamType(SS=row_ss), Col=DirParamType(SS=col_ss))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(SS=row_ss)
elif the_sicd.Grid.Row.SS is None:
the_sicd.Grid.Row.SS = row_ss
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(SS=col_ss)
elif the_sicd.Grid.Col.SS is None:
the_sicd.Grid.Col.SS = col_ss
def try_BLOCKA() -> None:
tre = None if tres is None else tres['BLOCKA']
if tre is None:
return
blocka = tre.DATA
icps = []
for fld_name in ['FRFC_LOC', 'FRLC_LOC', 'LRLC_LOC', 'LRFC_LOC']:
value = getattr(blocka, fld_name)
# noinspection PyBroadException
try:
lat_val = float(value[:10])
lon_val = float(value[10:21])
except ValueError:
lat_val = lat_lon_parser(value[:10])
lon_val = lat_lon_parser(value[10:21])
icps.append([lat_val, lon_val])
set_image_corners(icps, override=False)
def try_MPDSRA() -> None:
def valid_array(arr):
return numpy.all(numpy.isfinite(arr)) and numpy.any(arr != 0)
tre = None if tres is None else tres['MPDSRA']
if tre is None:
return
mpdsra = tre.DATA
scp_ecf = foot*numpy.array(
[float(mpdsra.ORO_X), float(mpdsra.ORO_Y), float(mpdsra.ORO_Z)], dtype='float64')
if valid_array(scp_ecf):
set_scp(scp_ecf, (int(mpdsra.ORP_COLUMN) - 1, int(mpdsra.ORP_ROW) - 1), override=False)
arp_pos_ned = foot*numpy.array(
[float(mpdsra.ARP_POS_N), float(mpdsra.ARP_POS_E), float(mpdsra.ARP_POS_D)], dtype='float64')
arp_vel_ned = foot*numpy.array(
[float(mpdsra.ARP_VEL_N), float(mpdsra.ARP_VEL_E), float(mpdsra.ARP_VEL_D)], dtype='float64')
arp_acc_ned = foot*numpy.array(
[float(mpdsra.ARP_ACC_N), float(mpdsra.ARP_ACC_E), float(mpdsra.ARP_ACC_D)], dtype='float64')
arp_pos = ned_to_ecf(arp_pos_ned, scp_ecf, absolute_coords=True) if valid_array(arp_pos_ned) else None
set_arp_position(arp_pos, override=False)
arp_vel = ned_to_ecf(arp_vel_ned, scp_ecf, absolute_coords=False) if valid_array(arp_vel_ned) else None
if the_sicd.SCPCOA.ARPVel is None:
the_sicd.SCPCOA.ARPVel = arp_vel
arp_acc = ned_to_ecf(arp_acc_ned, scp_ecf, absolute_coords=False) if valid_array(arp_acc_ned) else None
if the_sicd.SCPCOA.ARPAcc is None:
the_sicd.SCPCOA.ARPAcc = arp_acc
if the_sicd.PFA is not None and the_sicd.PFA.FPN is None:
# TODO: is this already in meters?
fpn_ecf = numpy.array(
[float(mpdsra.FOC_X), float(mpdsra.FOC_Y), float(mpdsra.FOC_Z)], dtype='float64') # *foot
if valid_array(fpn_ecf):
the_sicd.PFA.FPN = fpn_ecf
def try_MENSRB() -> None:
tre = None if tres is None else tres['MENSRB']
if tre is None:
return
mensrb = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensrb.ACFT_LOC[:12]),
lat_lon_parser(mensrb.ACFT_LOC[12:25]),
foot*float(mensrb.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensrb.RP_LOC[:12]),
lat_lon_parser(mensrb.RP_LOC[12:25]),
foot*float(mensrb.RP_ELV)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
set_scp(scp_ecf, (int(mensrb.RP_COL)-1, int(mensrb.RP_ROW)-1), override=False)
row_unit_ned = numpy.array(
[float(mensrb.C_R_NC), float(mensrb.C_R_EC), float(mensrb.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensrb.C_AZ_NC), float(mensrb.C_AZ_EC), float(mensrb.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def try_MENSRA() -> None:
tre = None if tres is None else tres['MENSRA']
if tre is None:
return
mensra = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensra.ACFT_LOC[:10]),
lat_lon_parser(mensra.ACFT_LOC[10:21]),
foot*float(mensra.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensra.CP_LOC[:10]),
lat_lon_parser(mensra.CP_LOC[10:21]),
foot*float(mensra.CP_ALT)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
# TODO: is this already zero based?
set_scp(geodetic_to_ecf(scp_llh), (int(mensra.CCRP_COL), int(mensra.CCRP_ROW)), override=False)
row_unit_ned = numpy.array(
[float(mensra.C_R_NC), float(mensra.C_R_EC), float(mensra.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensra.C_AZ_NC), float(mensra.C_AZ_EC), float(mensra.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def extract_corners() -> None:
icps = extract_image_corners(img_header)
if icps is None:
return
# TODO: include symmetry transform issue
set_image_corners(icps, override=False)
def extract_start() -> None:
# noinspection PyBroadException
try:
date_str = img_header.IDATIM
collect_start = numpy.datetime64(
_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
except Exception:
logger.info('failed extracting start time from IDATIM tre')
return
set_collect_start(collect_start, override=False)
# noinspection PyUnresolvedReferences
tres = None if img_header.ExtendedHeader.data is None \
else img_header.ExtendedHeader.data # type: Union[None, TREList]
collection_info = get_collection_info()
image_data = get_image_data()
the_sicd = SICDType(
CollectionInfo=collection_info,
ImageData=image_data)
# apply the various tres and associated logic
# NB: this should generally be in order of preference
try_CMETAA()
try_AIMIDB()
try_AIMIDA()
try_ACFT()
try_BLOCKA()
try_MPDSRA()
try_MENSRA()
try_MENSRB()
extract_corners()
extract_start()
return the_sicd
# Helper methods for transforming data
def get_linear_magnitude_scaling(scale_factor: float):
"""
Get a linear magnitude scaling function, to correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return data/scale_factor
return scaler
def get_linear_power_scaling(scale_factor):
"""
Get a linear power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
Returns
-------
callable
"""
def scaler(data):
return numpy.sqrt(data/scale_factor)
return scaler
def get_log_magnitude_scaling(scale_factor, db_per_step):
"""
Gets the log magnitude scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
lin_scaler = get_linear_magnitude_scaling(scale_factor)
def scaler(data):
return lin_scaler(numpy.exp(0.05*numpy.log(10)*db_per_step*data))
return scaler
def get_log_power_scaling(scale_factor, db_per_step):
"""
Gets the log power scaling function, to derive correct magnitude.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
db_per_step : float
The db_per_step factor, according to the definiton given in STDI-0002
Returns
-------
callable
"""
power_scaler = get_linear_power_scaling(scale_factor)
def scaler(data):
return power_scaler(numpy.exp(0.1*numpy.log(10)*db_per_step*data))
return scaler
def get_linlog_magnitude_scaling(scale_factor, tipping_point):
"""
Gets the magnitude scaling function for the model which
is initially linear, and then switches to logarithmic beyond a fixed
tipping point.
Parameters
----------
scale_factor : float
The scale factor, according to the definition given in STDI-0002.
tipping_point : float
The tipping point between the two models.
Returns
-------
callable
"""
db_per_step = 20*numpy.log10(tipping_point)/tipping_point
log_scaler = get_log_magnitude_scaling(scale_factor, db_per_step)
def scaler(data):
out = data/scale_factor
above_tipping = (out > tipping_point)
out[above_tipping] = log_scaler(data[above_tipping])
return out
return scaler
class ApplyAmplitudeScalingFunction(ComplexFormatFunction):
__slots__ = ('_scaling_function', )
_allowed_ordering = ('MP', 'PM')
has_inverse = False
def __init__(
self,
raw_dtype: Union[str, numpy.dtype],
order: str,
scaling_function: Optional[Callable] = None,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
band_dimension: int = -1):
"""
Parameters
----------
raw_dtype : str|numpy.dtype
The raw datatype. Valid options dependent on the value of order.
order : str
One of `('MP', 'PM')`, with allowable raw_dtype
`('uint8', 'uint16', 'uint32', 'float32', 'float64')`.
scaling_function : Optional[Callable]
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
band_dimension : int
Which band is the complex dimension, **after** the transpose operation.
"""
self._scaling_function = None
ComplexFormatFunction.__init__(
self, raw_dtype, order, raw_shape=raw_shape, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes, band_dimension=band_dimension)
self._set_scaling_function(scaling_function)
@property
def scaling_function(self) -> Optional[Callable]:
"""
The magnitude scaling function.
Returns
-------
None|Callable
"""
return self._scaling_function
def _set_scaling_function(self, value: Optional[Callable]):
if value is None:
self._scaling_function = None
return
if not isinstance(value, Callable):
raise TypeError('scaling_function must be callable')
self._scaling_function = value
def _forward_magnitude_theta(
self,
data: numpy.ndarray,
out: numpy.ndarray,
magnitude: numpy.ndarray,
theta: numpy.ndarray,
subscript: Tuple[slice, ...]) -> None:
if self._scaling_function is not None:
magnitude = self._scaling_function(magnitude)
ComplexFormatFunction._forward_magnitude_theta(
self, data, out, magnitude, theta, subscript)
def _extract_transform_data(
image_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
band_dimension: int):
"""
Helper function for defining necessary transform_data definition for
interpreting image segment data.
Parameters
----------
image_header : ImageSegmentHeader|ImageSegmentHeader0
Returns
-------
None|str|callable
"""
if len(image_header.Bands) != 2:
raise ValueError('Got unhandled case of {} image bands'.format(len(image_header.Bands)))
complex_order = image_header.Bands[0].ISUBCAT+image_header.Bands[1].ISUBCAT
if complex_order not in ['IQ', 'QI', 'MP', 'PM']:
raise ValueError('Got unhandled complex order `{}`'.format(complex_order))
bpp = int(image_header.NBPP/8)
pv_type = image_header.PVTYPE
if pv_type == 'INT':
raw_dtype = '>u{}'.format(bpp)
elif pv_type == 'SI':
raw_dtype = '>i{}'.format(bpp)
elif pv_type == 'R':
raw_dtype = '>f{}'.format(bpp)
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pv_type))
# noinspection PyUnresolvedReferences
tre = None if img_header.ExtendedHeader.data is None else \
img_header.ExtendedHeader.data['CMETAA'] # type: Optional[CMETAA]
if tre is None:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
cmetaa = tre.DATA
if cmetaa.CMPLX_PHASE_SCALING_TYPE.strip() != 'NS':
raise ValueError(
'Got unsupported CMPLX_PHASE_SCALING_TYPE {}'.format(
cmetaa.CMPLX_PHASE_SCALING_TYPE))
remap_type = cmetaa.CMPLX_MAG_REMAP_TYPE.strip()
if remap_type == 'NS':
if complex_order in ['IQ', 'QI']:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
else:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is "NS",\n\t '
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(complex_order))
elif remap_type not in ['LINM', 'LINP', 'LOGM', 'LOGP', 'LLM']:
raise ValueError('Got unsupported CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
if complex_order not in ['MP', 'PM']:
raise ValueError(
'Got unexpected state where cmetaa.CMPLX_MAG_REMAP_TYPE is `{}`,\n\t'
'but Band[0].ISUBCAT/Band[1].ISUBCAT = `{}`'.format(
remap_type, complex_order))
scale_factor = float(cmetaa.CMPLX_LIN_SCALE)
if remap_type == 'LINM':
scaling_function = get_linear_magnitude_scaling(scale_factor)
elif remap_type == 'LINP':
scaling_function = get_linear_power_scaling(scale_factor)
elif remap_type == 'LOGM':
# NB: there is nowhere in the CMETAA structure to define
# the db_per_step value. Strangely, the use of this value is laid
# out in the STDI-0002 standards document, which defines CMETAA
# structure. We will generically use a value which maps the
# max uint8 value to the max int16 value.
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_magnitude_scaling(scale_factor, db_per_step)
elif remap_type == 'LOGP':
db_per_step = 300*numpy.log(2)/255.0
scaling_function = get_log_power_scaling(scale_factor, db_per_step)
elif remap_type == 'LLM':
scaling_function = get_linlog_magnitude_scaling(
scale_factor, int(cmetaa.CMPLX_LINLOG_TP))
else:
raise ValueError('Got unhandled CMETAA.CMPLX_MAG_REMAP_TYPE {}'.format(remap_type))
return ApplyAmplitudeScalingFunction(raw_dtype, complex_order, scaling_function, band_dimension=band_dimension)
######
# The interpreter and reader objects
class ComplexNITFDetails(NITFDetails):
"""
Details object for NITF file containing complex data.
"""
__slots__ = (
'_segment_status', '_segment_bands', '_sicd_meta', '_reverse_axes', '_transpose_axes')
def __init__(
self,
file_name: str,
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
file_name : str
file name for a NITF file containing a complex SICD
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
self._reverse_axes = reverse_axes
self._transpose_axes = transpose_axes
self._segment_status = None
self._sicd_meta = None
self._segment_bands = None
NITFDetails.__init__(self, file_name)
self._find_complex_image_segments()
if len(self.sicd_meta) == 0:
raise SarpyIOError(
'No complex valued image segments found in file {}'.format(file_name))
@property
def reverse_axes(self) -> Union[None, int, Sequence[int]]:
return self._reverse_axes
@property
def transpose_axes(self) -> Optional[Tuple[int, ...]]:
return self._transpose_axes
@property
def segment_status(self) -> Tuple[bool, ...]:
"""
Tuple[bool, ...]: Where each image segment is viable for use.
"""
return self._segment_status
@property
def sicd_meta(self) -> Tuple[SICDType, ...]:
"""
Tuple[SICDType, ...]: The best inferred sicd structures.
"""
return self._sicd_meta
@property
def segment_bands(self) -> Tuple[Tuple[int, Optional[int]], ...]:
"""
This describes the structure for the output data segments from the NITF,
with each entry of the form `(image_segment, output_band)`, where
`output_band` will be `None` if the image segment has exactly one
complex band.
Returns
-------
Tuple[Tuple[int, Optional[int]], ...]
The band details for use.
"""
return self._segment_bands
def _check_band_details(
self,
index: int,
sicd_meta: List,
segment_status: List,
segment_bands: List):
if len(segment_status) != index:
raise ValueError('Inconsistent status checking state')
image_header = self.img_headers[index]
if image_header.ICAT.strip() not in ['SAR', 'SARIQ']:
segment_status.append(False)
return
# construct a preliminary sicd
sicd = extract_sicd(image_header, self._transpose_axes is not None)
bands = image_header.Bands
pvtype = image_header.PVTYPE
# handle odd bands
if (len(bands) % 2) == 1:
if image_header.PVTYPE != 'C':
# it's not complex, so we're done
segment_status.append(False)
return
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, len(bands)))
return
# we have an even number of bands - ensure that the bands are marked
# IQ/QI/MP/PM
order = bands[0].ISUBCAT + bands[1].ISUBCAT
if order not in ['IQ', 'QI', 'MP', 'PM']:
segment_status.append(False)
return
if len(bands) == 2:
# this should be the most common by far
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, 1))
return
for i in range(2, len(bands), 2):
if order != bands[i].ISUBCAT + bands[i+1].ISUBCAT:
logging.error(
'Image segment appears to multiband with switch complex ordering')
segment_status.append(False)
return
if order in ['IQ', 'QI']:
if pvtype not in ['SI', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
if order in ['MP', 'PM']:
if pvtype not in ['INT', 'R']:
logging.error(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
segment_status.append(False)
segment_status.append(True)
sicd_meta.append(sicd)
segment_bands.append((index, int(len(bands)/2)))
def _find_complex_image_segments(self):
"""
Find complex image segments.
Returns
-------
None
"""
sicd_meta = []
segment_status = []
segment_bands = []
for index in range(len(self.img_headers)):
self._check_band_details(index, sicd_meta, segment_status, segment_bands)
self._segment_status = tuple(segment_status)
use_sicd_meta = []
use_segment_bands = []
for (the_index, out_bands), sicd in zip(segment_bands, sicd_meta):
if out_bands == 1:
use_sicd_meta.append(sicd)
use_segment_bands.append((the_index, None))
else:
for j in range(out_bands):
use_sicd_meta.append(sicd.copy())
use_segment_bands.append((the_index, j))
self._sicd_meta = tuple(use_sicd_meta)
self._segment_bands = tuple(use_segment_bands)
class ComplexNITFReader(NITFReader, SICDTypeReader):
"""
A reader for complex valued NITF elements, this should be explicitly tried AFTER
the SICDReader.
"""
def __init__(
self,
nitf_details: Union[str, ComplexNITFDetails],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
nitf_details : str|ComplexNITFDetails
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
if isinstance(nitf_details, str):
nitf_details = ComplexNITFDetails(
nitf_details, reverse_axes=reverse_axes, transpose_axes=transpose_axes)
if not isinstance(nitf_details, ComplexNITFDetails):
raise TypeError('The input argument for ComplexNITFReader must be a filename or '
'ComplexNITFDetails object.')
SICDTypeReader.__init__(self, None, nitf_details.sicd_meta)
NITFReader.__init__(
self,
nitf_details,
reader_type="SICD",
reverse_axes=nitf_details.reverse_axes,
transpose_axes=nitf_details.transpose_axes)
self._check_sizes()
@property
def nitf_details(self) -> ComplexNITFDetails:
"""
ComplexNITFDetails: The NITF details object.
"""
# noinspection PyTypeChecker
return self._nitf_details
def get_nitf_dict(self):
"""
Populate a dictionary with the pertinent NITF header information. This
is for use in more faithful preservation of NITF header information
in copying or rewriting sicd files.
Returns
-------
dict
"""
out = {}
security = {}
security_obj = self.nitf_details.nitf_header.Security
# noinspection PyProtectedMember
for field in NITFSecurityTags._ordering:
value = getattr(security_obj, field).strip()
if value != '':
security[field] = value
if len(security) > 0:
out['Security'] = security
out['OSTAID'] = self.nitf_details.nitf_header.OSTAID
out['FTITLE'] = self.nitf_details.nitf_header.FTITLE
return out
def populate_nitf_information_into_sicd(self):
"""
Populate some pertinent NITF header information into the SICD structure.
This provides more faithful copying or rewriting options.
"""
nitf_dict = self.get_nitf_dict()
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = copy.deepcopy(nitf_dict)
def depopulate_nitf_information(self):
"""
Eliminates the NITF information dict from the SICD structure.
"""
for sicd_meta in self._sicd_meta:
sicd_meta.NITF = {}
def get_format_function(
self,
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int,
image_segment_index: Optional[int] = None,
**kwargs) -> Optional[FormatFunction]:
image_header = self.nitf_details.img_headers[image_segment_index]
bands = len(image_header.Bands)
if complex_order is not None and bands == 2:
return _extract_transform_data(image_header, band_dimension)
# TODO: strange nonstandard float16 handling?
return NITFReader.get_format_function(
self, raw_dtype, complex_order, lut, band_dimension, image_segment_index, **kwargs)
def _check_image_segment_for_compliance(
self,
index: int,
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> bool:
return self.nitf_details.segment_status[index]
def find_image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
return tuple((entry[0], ) for entry in self.nitf_details.segment_bands)
def create_data_segment_for_collection_element(self, collection_index: int) -> DataSegment:
the_index, the_band = self.nitf_details.segment_bands[collection_index]
if the_index not in self._image_segment_data_segments:
data_segment = self.create_data_segment_for_image_segment(the_index, apply_format=True)
else:
data_segment = self._image_segment_data_segments[the_index]
if the_band is None:
return data_segment
else:
return SubsetSegment(data_segment, (slice(None, None, 1), slice(None, None, 1), slice(the_band, the_band+1, 1)), 'formatted', close_parent=True)
def final_attempt(file_name: str) -> Optional[ComplexNITFReader]:
"""
Contingency check to open for some other complex NITF type file.
Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
ComplexNITFReader|None
"""
if is_file_like(file_name):
return None
try:
nitf_details = ComplexNITFDetails(file_name)
logger.info('File {} is determined to be some other format complex NITF.')
return ComplexNITFReader(nitf_details)
except (SarpyIOError, ValueError):
return None
|
[
"sarpy.io.general.format_function.ComplexFormatFunction._forward_magnitude_theta",
"sarpy.io.complex.sicd_elements.GeoData.SCPType",
"sarpy.io.complex.sicd_elements.RadarCollection.RadarCollectionType",
"sarpy.io.general.format_function.ComplexFormatFunction",
"logging.error",
"sarpy.io.complex.sicd_elements.ImageCreation.ImageCreationType",
"sarpy.io.general.nitf.NITFDetails.__init__",
"sarpy.io.complex.sicd_elements.Timeline.TimelineType",
"sarpy.io.general.nitf.NITFReader.__init__",
"sarpy.io.complex.sicd_elements.Grid.DirParamType",
"numpy.isfinite",
"sarpy.io.complex.sicd_elements.GeoData.GeoDataType",
"sarpy.io.complex.sicd_elements.Grid.WgtTypeType",
"sarpy.io.general.nitf.extract_image_corners",
"sarpy.io.general.utils.is_file_like",
"numpy.log10",
"sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType",
"sarpy.geometry.geocoords.ned_to_ecf",
"sarpy.geometry.latlon.num",
"sarpy.io.complex.sicd_elements.ImageData.ImageDataType",
"sarpy.io.complex.sicd_elements.SICD.SICDType",
"copy.deepcopy",
"sarpy.io.general.nitf.NITFReader.get_format_function",
"sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType",
"datetime.datetime.strptime",
"sarpy.io.complex.sicd_elements.RadarCollection.ChanParametersType",
"sarpy.geometry.geocoords.geodetic_to_ecf",
"sarpy.io.complex.base.SICDTypeReader.__init__",
"sarpy.io.complex.sicd_elements.PFA.PFAType",
"sarpy.io.complex.sicd_elements.ImageFormation.ImageFormationType",
"numpy.log",
"numpy.deg2rad",
"numpy.datetime64",
"numpy.any",
"sarpy.io.general.format_function.ComplexFormatFunction.__init__",
"sarpy.io.complex.sicd_elements.Grid.GridType",
"logging.getLogger",
"numpy.sqrt"
] |
[((2044, 2071), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2061, 2071), False, 'import logging\n'), ((24348, 24410), 'sarpy.io.complex.sicd_elements.SICD.SICDType', 'SICDType', ([], {'CollectionInfo': 'collection_info', 'ImageData': 'image_data'}), '(CollectionInfo=collection_info, ImageData=image_data)\n', (24356, 24410), False, 'from sarpy.io.complex.sicd_elements.SICD import SICDType\n'), ((45138, 45161), 'sarpy.io.general.utils.is_file_like', 'is_file_like', (['file_name'], {}), '(file_name)\n', (45150, 45161), False, 'from sarpy.io.general.utils import is_file_like\n'), ((3655, 3758), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CollectorName': 'collector_name', 'CoreName': 'core_name', 'Classification': 'classification'}), '(CollectorName=collector_name, CoreName=core_name,\n Classification=classification)\n', (3673, 3758), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((4880, 5030), 'sarpy.io.complex.sicd_elements.ImageData.ImageDataType', 'ImageDataType', ([], {'PixelType': 'pixel_type', 'NumRows': 'rows', 'NumCols': 'cols', 'FirstRow': '(0)', 'FirstCol': '(0)', 'FullImage': '(rows, cols)', 'SCPPixel': '(0.5 * rows, 0.5 * cols)'}), '(PixelType=pixel_type, NumRows=rows, NumCols=cols, FirstRow=0,\n FirstCol=0, FullImage=(rows, cols), SCPPixel=(0.5 * rows, 0.5 * cols))\n', (4893, 5030), False, 'from sarpy.io.complex.sicd_elements.ImageData import ImageDataType\n'), ((15291, 15340), 'datetime.datetime.strptime', 'datetime.strptime', (['aimida.CREATION_DATE', '"""%d%b%y"""'], {}), "(aimida.CREATION_DATE, '%d%b%y')\n", (15308, 15340), False, 'from datetime import datetime\n'), ((15598, 15664), 'datetime.datetime.strptime', 'datetime.strptime', (['(aimida.MISSION_DATE + aimida.TIME)', '"""%d%b%y%H%M"""'], {}), "(aimida.MISSION_DATE + aimida.TIME, '%d%b%y%H%M')\n", (15615, 15664), False, 'from datetime import datetime\n'), ((21388, 21412), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['arp_llh'], {}), '(arp_llh)\n', (21403, 21412), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21431, 21455), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (21446, 21455), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22630, 22654), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['arp_llh'], {}), '(arp_llh)\n', (22645, 22654), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22673, 22697), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (22688, 22697), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23371, 23404), 'sarpy.io.general.nitf.extract_image_corners', 'extract_image_corners', (['img_header'], {}), '(img_header)\n', (23392, 23404), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((25492, 25523), 'numpy.sqrt', 'numpy.sqrt', (['(data / scale_factor)'], {}), '(data / scale_factor)\n', (25502, 25523), False, 'import numpy\n'), ((28851, 29056), 'sarpy.io.general.format_function.ComplexFormatFunction.__init__', 'ComplexFormatFunction.__init__', (['self', 'raw_dtype', 'order'], {'raw_shape': 'raw_shape', 'formatted_shape': 'formatted_shape', 'reverse_axes': 'reverse_axes', 'transpose_axes': 'transpose_axes', 'band_dimension': 'band_dimension'}), '(self, raw_dtype, order, raw_shape=raw_shape,\n formatted_shape=formatted_shape, reverse_axes=reverse_axes,\n transpose_axes=transpose_axes, band_dimension=band_dimension)\n', (28881, 29056), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((30008, 30104), 'sarpy.io.general.format_function.ComplexFormatFunction._forward_magnitude_theta', 'ComplexFormatFunction._forward_magnitude_theta', (['self', 'data', 'out', 'magnitude', 'theta', 'subscript'], {}), '(self, data, out, magnitude,\n theta, subscript)\n', (30054, 30104), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((31419, 31497), 'sarpy.io.general.format_function.ComplexFormatFunction', 'ComplexFormatFunction', (['raw_dtype', 'complex_order'], {'band_dimension': 'band_dimension'}), '(raw_dtype, complex_order, band_dimension=band_dimension)\n', (31440, 31497), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((35010, 35047), 'sarpy.io.general.nitf.NITFDetails.__init__', 'NITFDetails.__init__', (['self', 'file_name'], {}), '(self, file_name)\n', (35030, 35047), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((41100, 41159), 'sarpy.io.complex.base.SICDTypeReader.__init__', 'SICDTypeReader.__init__', (['self', 'None', 'nitf_details.sicd_meta'], {}), '(self, None, nitf_details.sicd_meta)\n', (41123, 41159), False, 'from sarpy.io.complex.base import SICDTypeReader\n'), ((41168, 41316), 'sarpy.io.general.nitf.NITFReader.__init__', 'NITFReader.__init__', (['self', 'nitf_details'], {'reader_type': '"""SICD"""', 'reverse_axes': 'nitf_details.reverse_axes', 'transpose_axes': 'nitf_details.transpose_axes'}), "(self, nitf_details, reader_type='SICD', reverse_axes=\n nitf_details.reverse_axes, transpose_axes=nitf_details.transpose_axes)\n", (41187, 41316), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((43632, 43750), 'sarpy.io.general.nitf.NITFReader.get_format_function', 'NITFReader.get_format_function', (['self', 'raw_dtype', 'complex_order', 'lut', 'band_dimension', 'image_segment_index'], {}), '(self, raw_dtype, complex_order, lut,\n band_dimension, image_segment_index, **kwargs)\n', (43662, 43750), False, 'from sarpy.io.general.nitf import extract_image_corners, NITFDetails, NITFReader\n'), ((5712, 5742), 'sarpy.io.complex.sicd_elements.GeoData.GeoDataType', 'GeoDataType', ([], {'ImageCorners': 'icps'}), '(ImageCorners=icps)\n', (5723, 5742), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((6005, 6031), 'sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType', 'SCPCOAType', ([], {'ARPPos': 'arp_ecf'}), '(ARPPos=arp_ecf)\n', (6015, 6031), False, 'from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType\n'), ((7001, 7041), 'sarpy.io.complex.sicd_elements.Timeline.TimelineType', 'TimelineType', ([], {'CollectStart': 'collect_start'}), '(CollectStart=collect_start)\n', (7013, 7041), False, 'from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType\n'), ((7513, 7544), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'row_unit'}), '(UVectECF=row_unit)\n', (7525, 7544), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((7715, 7746), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'col_unit'}), '(UVectECF=col_unit)\n', (7727, 7746), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((8124, 8137), 'sarpy.io.complex.sicd_elements.GeoData.GeoDataType', 'GeoDataType', ([], {}), '()\n', (8135, 8137), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((8204, 8216), 'sarpy.io.complex.sicd_elements.SCPCOA.SCPCOAType', 'SCPCOAType', ([], {}), '()\n', (8214, 8216), False, 'from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType\n'), ((8279, 8289), 'sarpy.io.complex.sicd_elements.Grid.GridType', 'GridType', ([], {}), '()\n', (8287, 8289), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((8360, 8374), 'sarpy.io.complex.sicd_elements.Timeline.TimelineType', 'TimelineType', ([], {}), '()\n', (8372, 8374), False, 'from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType\n'), ((8459, 8480), 'sarpy.io.complex.sicd_elements.RadarCollection.RadarCollectionType', 'RadarCollectionType', ([], {}), '()\n', (8478, 8480), False, 'from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, TxFrequencyType, WaveformParametersType, ChanParametersType\n'), ((8563, 8583), 'sarpy.io.complex.sicd_elements.ImageFormation.ImageFormationType', 'ImageFormationType', ([], {}), '()\n', (8581, 8583), False, 'from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, TxFrequencyProcType\n'), ((10653, 10686), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""UNIFORM"""'}), "(WindowName='UNIFORM')\n", (10664, 10686), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((10727, 10760), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""UNIFORM"""'}), "(WindowName='UNIFORM')\n", (10738, 10760), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((12360, 12393), 'numpy.datetime64', 'numpy.datetime64', (['date_time', '"""us"""'], {}), "(date_time, 'us')\n", (12376, 12393), False, 'import numpy\n'), ((13493, 13541), 'sarpy.io.complex.sicd_elements.RadarCollection.ChanParametersType', 'ChanParametersType', ([], {'TxRcvPolarization': 'tx_rcv_pol'}), '(TxRcvPolarization=tx_rcv_pol)\n', (13511, 13541), False, 'from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, TxFrequencyType, WaveformParametersType, ChanParametersType\n'), ((14109, 14160), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['fpn_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(fpn_ned, scp_ecf, absolute_coords=False)\n', (14119, 14160), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((14183, 14234), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['ipn_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(ipn_ned, scp_ecf, absolute_coords=False)\n', (14193, 14234), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((14262, 14295), 'sarpy.io.complex.sicd_elements.PFA.PFAType', 'PFAType', ([], {'FPN': 'fpn_ecf', 'IPN': 'ipn_ecf'}), '(FPN=fpn_ecf, IPN=ipn_ecf)\n', (14269, 14295), False, 'from sarpy.io.complex.sicd_elements.PFA import PFAType\n'), ((15421, 15460), 'sarpy.io.complex.sicd_elements.ImageCreation.ImageCreationType', 'ImageCreationType', ([], {'DateTime': 'create_time'}), '(DateTime=create_time)\n', (15438, 15460), False, 'from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType\n'), ((17948, 17971), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'row_ss'}), '(SS=row_ss)\n', (17960, 17971), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((18128, 18151), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'col_ss'}), '(SS=col_ss)\n', (18140, 18151), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((19895, 19949), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_pos_ned', 'scp_ecf'], {'absolute_coords': '(True)'}), '(arp_pos_ned, scp_ecf, absolute_coords=True)\n', (19905, 19949), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((20057, 20112), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_vel_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(arp_vel_ned, scp_ecf, absolute_coords=False)\n', (20067, 20112), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((20257, 20312), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['arp_acc_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(arp_acc_ned, scp_ecf, absolute_coords=False)\n', (20267, 20312), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21882, 21938), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['row_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(row_unit_ned, scp_ecf, absolute_coords=False)\n', (21892, 21938), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((21959, 22015), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['col_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(col_unit_ned, scp_ecf, absolute_coords=False)\n', (21969, 22015), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((22808, 22832), 'sarpy.geometry.geocoords.geodetic_to_ecf', 'geodetic_to_ecf', (['scp_llh'], {}), '(scp_llh)\n', (22823, 22832), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23185, 23241), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['row_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(row_unit_ned, scp_ecf, absolute_coords=False)\n', (23195, 23241), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((23262, 23318), 'sarpy.geometry.geocoords.ned_to_ecf', 'ned_to_ecf', (['col_unit_ned', 'scp_ecf'], {'absolute_coords': '(False)'}), '(col_unit_ned, scp_ecf, absolute_coords=False)\n', (23272, 23318), False, 'from sarpy.geometry.geocoords import geodetic_to_ecf, ned_to_ecf\n'), ((27224, 27250), 'numpy.log10', 'numpy.log10', (['tipping_point'], {}), '(tipping_point)\n', (27235, 27250), False, 'import numpy\n'), ((31861, 31939), 'sarpy.io.general.format_function.ComplexFormatFunction', 'ComplexFormatFunction', (['raw_dtype', 'complex_order'], {'band_dimension': 'band_dimension'}), '(raw_dtype, complex_order, band_dimension=band_dimension)\n', (31882, 31939), False, 'from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction\n'), ((42778, 42802), 'copy.deepcopy', 'copy.deepcopy', (['nitf_dict'], {}), '(nitf_dict)\n', (42791, 42802), False, 'import copy\n'), ((5268, 5305), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CountryCodes': '[cc]'}), '(CountryCodes=[cc])\n', (5286, 5305), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((6424, 6457), 'sarpy.io.complex.sicd_elements.ImageData.ImageDataType', 'ImageDataType', ([], {'SCPPixel': 'scp_pixel'}), '(SCPPixel=scp_pixel)\n', (6437, 6457), False, 'from sarpy.io.complex.sicd_elements.ImageData import ImageDataType\n'), ((6755, 6775), 'sarpy.io.complex.sicd_elements.GeoData.SCPType', 'SCPType', ([], {'ECF': 'scp_ecf'}), '(ECF=scp_ecf)\n', (6762, 6775), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((10837, 10870), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HAMMING"""'}), "(WindowName='HAMMING')\n", (10848, 10870), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((10911, 10944), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HAMMING"""'}), "(WindowName='HAMMING')\n", (10922, 10944), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((16799, 16842), 'sarpy.io.complex.sicd_elements.CollectionInfo.CollectionInfoType', 'CollectionInfoType', ([], {'CollectorName': 'sensor_id'}), '(CollectorName=sensor_id)\n', (16817, 16842), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType\n'), ((19029, 19048), 'numpy.any', 'numpy.any', (['(arr != 0)'], {}), '(arr != 0)\n', (19038, 19048), False, 'import numpy\n'), ((20975, 21011), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.ACFT_LOC[:12]'], {}), '(mensrb.ACFT_LOC[:12])\n', (20989, 21011), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21026, 21064), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.ACFT_LOC[12:25]'], {}), '(mensrb.ACFT_LOC[12:25])\n', (21040, 21064), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21170, 21204), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.RP_LOC[:12]'], {}), '(mensrb.RP_LOC[:12])\n', (21184, 21204), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((21219, 21255), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensrb.RP_LOC[12:25]'], {}), '(mensrb.RP_LOC[12:25])\n', (21233, 21255), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22217, 22253), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.ACFT_LOC[:10]'], {}), '(mensra.ACFT_LOC[:10])\n', (22231, 22253), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22268, 22306), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.ACFT_LOC[10:21]'], {}), '(mensra.ACFT_LOC[10:21])\n', (22282, 22306), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22412, 22446), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.CP_LOC[:10]'], {}), '(mensra.CP_LOC[:10])\n', (22426, 22446), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((22461, 22497), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['mensra.CP_LOC[10:21]'], {}), '(mensra.CP_LOC[10:21])\n', (22475, 22497), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((37975, 38060), 'logging.error', 'logging.error', (['"""Image segment appears to multiband with switch complex ordering"""'], {}), "('Image segment appears to multiband with switch complex ordering'\n )\n", (37988, 38060), False, 'import logging\n'), ((6616, 6636), 'sarpy.io.complex.sicd_elements.GeoData.SCPType', 'SCPType', ([], {'ECF': 'scp_ecf'}), '(ECF=scp_ecf)\n', (6623, 6636), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((7337, 7368), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'row_unit'}), '(UVectECF=row_unit)\n', (7349, 7368), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((7390, 7421), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'UVectECF': 'col_unit'}), '(UVectECF=col_unit)\n', (7402, 7421), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((11021, 11054), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HANNING"""'}), "(WindowName='HANNING')\n", (11032, 11054), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((11095, 11128), 'sarpy.io.complex.sicd_elements.Grid.WgtTypeType', 'WgtTypeType', ([], {'WindowName': '"""HANNING"""'}), "(WindowName='HANNING')\n", (11106, 11128), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((17557, 17596), 'numpy.deg2rad', 'numpy.deg2rad', (['the_sicd.SCPCOA.GrazeAng'], {}), '(the_sicd.SCPCOA.GrazeAng)\n', (17570, 17596), False, 'import numpy\n'), ((17687, 17726), 'numpy.deg2rad', 'numpy.deg2rad', (['the_sicd.SCPCOA.TwistAng'], {}), '(the_sicd.SCPCOA.TwistAng)\n', (17700, 17726), False, 'import numpy\n'), ((17804, 17827), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'row_ss'}), '(SS=row_ss)\n', (17816, 17827), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((17833, 17856), 'sarpy.io.complex.sicd_elements.Grid.DirParamType', 'DirParamType', ([], {'SS': 'col_ss'}), '(SS=col_ss)\n', (17845, 17856), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((18739, 18765), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['value[:10]'], {}), '(value[:10])\n', (18753, 18765), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((18792, 18820), 'sarpy.geometry.latlon.num', 'lat_lon_parser', (['value[10:21]'], {}), '(value[10:21])\n', (18806, 18820), True, 'from sarpy.geometry.latlon import num as lat_lon_parser\n'), ((19004, 19023), 'numpy.isfinite', 'numpy.isfinite', (['arr'], {}), '(arr)\n', (19018, 19023), False, 'import numpy\n'), ((33217, 33229), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (33226, 33229), False, 'import numpy\n'), ((26085, 26098), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (26094, 26098), False, 'import numpy\n'), ((26673, 26686), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (26682, 26686), False, 'import numpy\n'), ((33373, 33385), 'numpy.log', 'numpy.log', (['(2)'], {}), '(2)\n', (33382, 33385), False, 'import numpy\n')]
|
import threading
import time
import airobot.utils.common as arutil
from airobot.ee_tool.simple_gripper_pybullet import SimpleGripperPybullet
from airobot.utils.arm_util import wait_to_reach_jnt_goal
class SimpleGripperMimicPybullet(SimpleGripperPybullet):
"""
A base class for gripper with mimic joints in pybullet.
Args:
cfgs (YACS CfgNode): configurations for the gripper.
pb_client (BulletClient): pybullet client.
Attributes:
cfgs (YACS CfgNode): configurations for the gripper.
gripper_close_angle (float): position value corresponding to the
fully closed position of the gripper.
gripper_open_angle (float): position value corresponding to the
fully open position of the gripper.
jnt_names (list): names of the gripper joints.
gripper_jnt_ids (list): pybullet joint ids of the gripper joints.
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
def __init__(self, cfgs, pb_client):
super(SimpleGripperMimicPybullet, self).__init__(cfgs=cfgs, pb_client=pb_client)
self._gripper_mimic_coeff = self.cfgs.EETOOL.MIMIC_COEFF
self._mthread_started = False
def feed_robot_info(self, robot_id, jnt_to_id):
"""
Setup the gripper, pass the robot info from the arm to the gripper.
Args:
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
super().feed_robot_info(robot_id, jnt_to_id)
# if the gripper has been activated once,
# the following code is used to prevent starting
# a new thread after the arm reset if a thread has been started
if not self._mthread_started:
self._mthread_started = True
# gripper thread
self._th_gripper = threading.Thread(target=self._th_mimic_gripper)
self._th_gripper.daemon = True
self._th_gripper.start()
else:
return
def set_jpos(self, pos, wait=True, ignore_physics=False):
"""
Set the gripper position.
Args:
pos (float): joint position.
wait (bool): wait until the joint position is set
to the target position.
Returns:
bool: A boolean variable representing if the action is
successful at the moment when the function exits.
"""
joint_name = self.jnt_names[0]
tgt_pos = arutil.clamp(
pos,
min(self.gripper_open_angle, self.gripper_close_angle),
max(self.gripper_open_angle, self.gripper_close_angle))
jnt_id = self.jnt_to_id[joint_name]
if ignore_physics:
self._zero_vel_mode()
mic_pos = self._mimic_gripper(pos)
self._hard_reset(mic_pos)
success = True
else:
self._pb.setJointMotorControl2(self.robot_id,
jnt_id,
self._pb.POSITION_CONTROL,
targetPosition=tgt_pos,
force=self._max_torque)
if not self._pb.in_realtime_mode():
self._set_rest_joints(tgt_pos)
success = False
if self._pb.in_realtime_mode() and wait:
success = wait_to_reach_jnt_goal(
tgt_pos,
get_func=self.get_jpos,
joint_name=joint_name,
get_func_derv=self.get_jvel,
timeout=self.cfgs.ARM.TIMEOUT_LIMIT,
max_error=self.cfgs.ARM.MAX_JOINT_ERROR
)
return success
def get_jpos(self):
"""
Return the joint position(s) of the gripper.
Returns:
float: joint position.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
pos = self._pb.getJointState(self.robot_id, jnt_id)[0]
return pos
def get_jvel(self):
"""
Return the joint velocity of the gripper.
Returns:
float: joint velocity.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
vel = self._pb.getJointState(self.robot_id, jnt_id)[1]
return vel
def _mimic_gripper(self, joint_val):
"""
Given the value for the first joint,
mimic the joint values for the rest joints.
"""
jnt_vals = [joint_val]
for i in range(1, len(self.jnt_names)):
jnt_vals.append(joint_val * self._gripper_mimic_coeff[i])
return jnt_vals
def _th_mimic_gripper(self):
"""
Make all the other joints of the gripper
follow the motion of the first joint of the gripper.
"""
while True:
if self._is_activated and self._pb.in_realtime_mode():
self._set_rest_joints()
time.sleep(0.005)
def _set_rest_joints(self, gripper_pos=None):
max_torq = self._max_torque
max_torques = [max_torq] * (len(self.jnt_names) - 1)
if gripper_pos is None:
gripper_pos = self.get_jpos()
gripper_poss = self._mimic_gripper(gripper_pos)[1:]
gripper_vels = [0.0] * len(max_torques)
self._pb.setJointMotorControlArray(self.robot_id,
self.gripper_jnt_ids[1:],
self._pb.POSITION_CONTROL,
targetPositions=gripper_poss,
targetVelocities=gripper_vels,
forces=max_torques)
|
[
"threading.Thread",
"airobot.utils.arm_util.wait_to_reach_jnt_goal",
"time.sleep"
] |
[((1917, 1964), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._th_mimic_gripper'}), '(target=self._th_mimic_gripper)\n', (1933, 1964), False, 'import threading\n'), ((5237, 5254), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (5247, 5254), False, 'import time\n'), ((3476, 3671), 'airobot.utils.arm_util.wait_to_reach_jnt_goal', 'wait_to_reach_jnt_goal', (['tgt_pos'], {'get_func': 'self.get_jpos', 'joint_name': 'joint_name', 'get_func_derv': 'self.get_jvel', 'timeout': 'self.cfgs.ARM.TIMEOUT_LIMIT', 'max_error': 'self.cfgs.ARM.MAX_JOINT_ERROR'}), '(tgt_pos, get_func=self.get_jpos, joint_name=\n joint_name, get_func_derv=self.get_jvel, timeout=self.cfgs.ARM.\n TIMEOUT_LIMIT, max_error=self.cfgs.ARM.MAX_JOINT_ERROR)\n', (3498, 3671), False, 'from airobot.utils.arm_util import wait_to_reach_jnt_goal\n')]
|
#
# This file is part of Bakefile (http://bakefile.org)
#
# Copyright (C) 2008-2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module contains the very core of Bakefile -- the interpreter,
:class:`bkl.interpreter.Interpreter`, and its supporting classes.
"""
import logging
import bkl.parser
import bkl.model
import bkl.api
import bkl.expr
import passes
from builder import Builder
from bkl.error import Error, warning
from bkl.parser import parse_file
logger = logging.getLogger("bkl.interpreter")
class Interpreter(object):
"""
The interpreter is responsible for doing everything necessary to
"translate" input ``.bkl`` files into generated native makefiles. This
includes building a project model from the input, checking it for
correctness, optimizing it and creating outputs for all enabled toolsets.
:class:`Interpreter` provides both high-level interface for single-call
usage (see :meth:`process`) and other methods with finer granularity that
allows you to inspect individual steps (most useful for the test suite).
.. attribute:: model
Model of the project, as :class:`bkl.model.Project`. It's state always
reflects current state of processing.
.. attribute:: toolsets_to_use
Set of toolsets to generate for. This list may contain only a subset of
toolsets the bakefile is written for and may even contain toolsets not
specified in the bakefile.
If :const:`None` (the default), then the toolsets listed in the bakefile
are used.
"""
def __init__(self):
self.model = bkl.model.Project()
self.toolsets_to_use = None
def limit_toolsets(self, toolsets):
"""Sets :attr:`toolsets_to_use`."""
self.toolsets_to_use = set(toolsets)
def process(self, ast):
"""
Interprets input file and generates the outputs.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
Processing is done in several phases:
1. Basic model is built (see :class:`bkl.interpreter.builder.Builder`).
No optimizations or checks are performed at this point.
2. Several generic optimization and checking passes are run on the
model. Among other things, types correctness and other constraints
are checked, variables are substituted and evaluated.
3. The model is split into several copies, one per output toolset.
4. Further optimization passes are done.
5. Output files are generated.
Step 1 is done by :meth:`add_module`. Steps 2-4 are done by
:meth:`finalize` and step 5 is implemented in :meth:`generate`.
"""
self.add_module(ast, self.model)
self.finalize()
self.generate()
def process_file(self, filename):
"""Like :meth:`process()`, but takes filename as its argument."""
self.process(parse_file(filename))
def add_module(self, ast, parent):
"""
Adds parsed AST to the model, without doing any optimizations. May be
called more than once, with different parsed files.
:param ast: AST of the input file, as returned by
:func:`bkl.parser.parse_file`.
"""
logger.info("processing %s", ast.filename)
submodules = []
b = Builder(on_submodule=lambda fn, pos: submodules.append((fn,pos)))
module = b.create_model(ast, parent)
while submodules:
sub_filename, sub_pos = submodules[0]
submodules.pop(0)
try:
sub_ast = parse_file(sub_filename)
except IOError as e:
if e.filename:
msg = "%s: %s" % (e.strerror, e.filename)
else:
msg = e.strerror
raise Error(msg, pos=sub_pos)
self.add_module(sub_ast, module)
def _call_custom_steps(self, model, func):
for step in bkl.api.CustomStep.all():
logger.debug("invoking custom step %s.%s()", step.name, func)
getattr(step, func)(model)
def finalize(self):
"""
Finalizes the model, i.e. checks it for validity, optimizes, creates
per-toolset models etc.
"""
logger.debug("finalizing the model")
# call any custom steps first:
self._call_custom_steps(self.model, "finalize")
# then apply standard processing:
passes.detect_potential_problems(self.model)
passes.normalize_and_validate_bool_subexpressions(self.model)
passes.normalize_vars(self.model)
passes.validate_vars(self.model)
passes.normalize_paths_in_model(self.model, toolset=None)
passes.simplify_exprs(self.model)
def finalize_for_toolset(self, toolset_model, toolset):
"""
Finalizes after "toolset" variable was set.
"""
passes.remove_disabled_model_parts(toolset_model, toolset)
# TODO: do this in finalize() instead
passes.make_variables_for_missing_props(toolset_model, toolset)
passes.eliminate_superfluous_conditionals(toolset_model)
# This is done second time here (in addition to finalize()) to deal
# with paths added by make_variables_for_missing_props() and paths with
# @builddir (which is toolset specific and couldn't be resolved
# earlier). Ideally we wouldn't do it, but hopefully it's not all that
# inefficient, as no real work is done for paths that are already
# normalized:
passes.normalize_paths_in_model(toolset_model, toolset)
def make_toolset_specific_model(self, toolset, skip_making_copy=False):
"""
Returns toolset-specific model, i.e. one that works only with
*toolset*, has the ``toolset`` property set to it. The caller
still needs to call finalize_for_toolset() on it.
"""
if skip_making_copy:
model = self.model
else:
model = self.model.clone()
# don't use Variable.from_property(), because it's read-only
model.add_variable(bkl.model.Variable.from_property(
model.get_prop("toolset"),
bkl.expr.LiteralExpr(toolset)))
return model
def generate(self):
"""
Generates output files.
"""
# collect all requested toolsets:
toolsets = set()
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
toolsets.update(module_toolsets.value.as_py())
if self.toolsets_to_use:
for t in self.toolsets_to_use:
if t not in toolsets:
try:
bkl.api.Toolset.get(t)
except KeyError:
raise Error("unknown toolset \"%s\" given on command line" % t)
warning("toolset \"%s\" is not supported by the project, there may be issues", t)
# Add the forced toolset to all submodules:
for module in self.model.modules:
module_toolsets = module.get_variable("toolsets")
if module_toolsets:
module_toolsets.value.items.append(bkl.expr.LiteralExpr(t))
toolsets = self.toolsets_to_use
toolsets = list(toolsets)
logger.debug("toolsets to generate for: %s", toolsets)
if not toolsets:
raise Error("nothing to generate, \"toolsets\" property is empty")
# call any custom steps first:
self._call_custom_steps(self.model, "generate")
# and generate the outputs (notice that we can avoid making a
# (expensive!) deepcopy of the model for one of the toolsets and can
# reuse the current model):
for toolset in toolsets[:-1]:
self.generate_for_toolset(toolset)
self.generate_for_toolset(toolsets[-1], skip_making_copy=True)
def generate_for_toolset(self, toolset, skip_making_copy=False):
"""
Generates output for given *toolset*.
"""
logger.debug("****** preparing model for toolset %s ******", toolset)
model = self.make_toolset_specific_model(toolset, skip_making_copy)
self.finalize_for_toolset(model, toolset)
logger.debug("****** generating for toolset %s ********", toolset)
bkl.api.Toolset.get(toolset).generate(model)
|
[
"bkl.error.warning",
"passes.remove_disabled_model_parts",
"passes.make_variables_for_missing_props",
"passes.eliminate_superfluous_conditionals",
"bkl.error.Error",
"passes.detect_potential_problems",
"passes.validate_vars",
"passes.normalize_paths_in_model",
"passes.normalize_vars",
"passes.simplify_exprs",
"bkl.parser.parse_file",
"logging.getLogger",
"passes.normalize_and_validate_bool_subexpressions"
] |
[((1515, 1551), 'logging.getLogger', 'logging.getLogger', (['"""bkl.interpreter"""'], {}), "('bkl.interpreter')\n", (1532, 1551), False, 'import logging\n'), ((5532, 5576), 'passes.detect_potential_problems', 'passes.detect_potential_problems', (['self.model'], {}), '(self.model)\n', (5564, 5576), False, 'import passes\n'), ((5585, 5646), 'passes.normalize_and_validate_bool_subexpressions', 'passes.normalize_and_validate_bool_subexpressions', (['self.model'], {}), '(self.model)\n', (5634, 5646), False, 'import passes\n'), ((5655, 5688), 'passes.normalize_vars', 'passes.normalize_vars', (['self.model'], {}), '(self.model)\n', (5676, 5688), False, 'import passes\n'), ((5697, 5729), 'passes.validate_vars', 'passes.validate_vars', (['self.model'], {}), '(self.model)\n', (5717, 5729), False, 'import passes\n'), ((5738, 5795), 'passes.normalize_paths_in_model', 'passes.normalize_paths_in_model', (['self.model'], {'toolset': 'None'}), '(self.model, toolset=None)\n', (5769, 5795), False, 'import passes\n'), ((5804, 5837), 'passes.simplify_exprs', 'passes.simplify_exprs', (['self.model'], {}), '(self.model)\n', (5825, 5837), False, 'import passes\n'), ((5984, 6042), 'passes.remove_disabled_model_parts', 'passes.remove_disabled_model_parts', (['toolset_model', 'toolset'], {}), '(toolset_model, toolset)\n', (6018, 6042), False, 'import passes\n'), ((6098, 6161), 'passes.make_variables_for_missing_props', 'passes.make_variables_for_missing_props', (['toolset_model', 'toolset'], {}), '(toolset_model, toolset)\n', (6137, 6161), False, 'import passes\n'), ((6171, 6227), 'passes.eliminate_superfluous_conditionals', 'passes.eliminate_superfluous_conditionals', (['toolset_model'], {}), '(toolset_model)\n', (6212, 6227), False, 'import passes\n'), ((6641, 6696), 'passes.normalize_paths_in_model', 'passes.normalize_paths_in_model', (['toolset_model', 'toolset'], {}), '(toolset_model, toolset)\n', (6672, 6696), False, 'import passes\n'), ((3992, 4012), 'bkl.parser.parse_file', 'parse_file', (['filename'], {}), '(filename)\n', (4002, 4012), False, 'from bkl.parser import parse_file\n'), ((8684, 8742), 'bkl.error.Error', 'Error', (['"""nothing to generate, "toolsets" property is empty"""'], {}), '(\'nothing to generate, "toolsets" property is empty\')\n', (8689, 8742), False, 'from bkl.error import Error, warning\n'), ((4672, 4696), 'bkl.parser.parse_file', 'parse_file', (['sub_filename'], {}), '(sub_filename)\n', (4682, 4696), False, 'from bkl.parser import parse_file\n'), ((4904, 4927), 'bkl.error.Error', 'Error', (['msg'], {'pos': 'sub_pos'}), '(msg, pos=sub_pos)\n', (4909, 4927), False, 'from bkl.error import Error, warning\n'), ((8092, 8171), 'bkl.error.warning', 'warning', (['"""toolset "%s" is not supported by the project, there may be issues"""', 't'], {}), '(\'toolset "%s" is not supported by the project, there may be issues\', t)\n', (8099, 8171), False, 'from bkl.error import Error, warning\n'), ((8014, 8069), 'bkl.error.Error', 'Error', (['(\'unknown toolset "%s" given on command line\' % t)'], {}), '(\'unknown toolset "%s" given on command line\' % t)\n', (8019, 8069), False, 'from bkl.error import Error, warning\n')]
|
import time
import threading
import serial
import parameter
class Rs232Connection(threading.Thread):
exit = True
stop = True
try:
__ser = serial.Serial(
port='/dev/ttyS0', # Open RPI buit-in serial port
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
except:
print ("RS232-Port could not be opened!")
def __init__(self):
threading.Thread.__init__(self)
if parameter.printMessages:
print("init rs232")
threading.Thread.start(self)
def run(self):
#self.lock.acquire()
while self.exit:#threat wird erst beendet wenn aus while schleife herausgeganen wird
if self.stop:
self.request()
time.sleep(parameter.timeTriggerPowerAnalayser)
#self.lock.release()
def request(self):
pass
def getSerialPort(self):
return self.__ser
def setStop(self):
self.stop = False
def setStart(self):
self.stop = True
def setExit(self):
self.exit = False
self.__ser.close
def __exit__(self):
pass
|
[
"serial.Serial",
"threading.Thread.__init__",
"threading.Thread.start",
"time.sleep"
] |
[((170, 316), 'serial.Serial', 'serial.Serial', ([], {'port': '"""/dev/ttyS0"""', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS', 'timeout': '(1)'}), "(port='/dev/ttyS0', baudrate=9600, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1)\n", (183, 316), False, 'import serial\n'), ((529, 560), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (554, 560), False, 'import threading\n'), ((637, 665), 'threading.Thread.start', 'threading.Thread.start', (['self'], {}), '(self)\n', (659, 665), False, 'import threading\n'), ((877, 924), 'time.sleep', 'time.sleep', (['parameter.timeTriggerPowerAnalayser'], {}), '(parameter.timeTriggerPowerAnalayser)\n', (887, 924), False, 'import time\n')]
|
from flask import render_template, session, redirect, url_for
from flask_login import login_required
from . import core
@core.route('/', methods=['GET', 'POST'])
@login_required
def index():
return redirect(url_for('aurora.aurora_overview'))
@core.route('/offline.html')
def offline():
return core.send_static_file('offline.html')
@core.route('/service-worker.js')
def sw():
return core.send_static_file('service-worker.js')
|
[
"flask.url_for"
] |
[((213, 246), 'flask.url_for', 'url_for', (['"""aurora.aurora_overview"""'], {}), "('aurora.aurora_overview')\n", (220, 246), False, 'from flask import render_template, session, redirect, url_for\n')]
|
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import torch
import time
from torch.autograd import Variable
import captcha_setting
import my_dataset
from captcha_cnn_model import CNN
def main():
print('开始对图片进行预测')
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load('model.pkl'))
print("加载神经网络训练的模型.")
result = []
predict_dataloader = my_dataset.get_predict_data_loader()
for i, (image_name, images, labels) in enumerate(predict_dataloader):
start = time.time()
image = images
vimage = Variable(image)
predict_label = cnn(vimage)
c0 = captcha_setting.ALL_CHAR_SET[np.argmax(predict_label[0, 0:captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c1 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, captcha_setting.ALL_CHAR_SET_LEN:2 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c2 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 2 * captcha_setting.ALL_CHAR_SET_LEN:3 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
c3 = captcha_setting.ALL_CHAR_SET[np.argmax(
predict_label[0, 3 * captcha_setting.ALL_CHAR_SET_LEN:4 * captcha_setting.ALL_CHAR_SET_LEN].data.numpy())]
res = '%s%s%s%s' % (c0, c1, c2, c3)
cost = '%.2f ms' % ((time.time() - start) * 1000)
result.append([image_name[0],res, cost])
print('经过训练后的神经网络预测图片的结果为:')
data = np.hstack([result])
res = pd.DataFrame(data, columns=['图片名称', '预测结果', '耗费时间'])
print(res)
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"torch.autograd.Variable",
"torch.load",
"numpy.hstack",
"time.time",
"captcha_cnn_model.CNN",
"my_dataset.get_predict_data_loader"
] |
[((246, 251), 'captcha_cnn_model.CNN', 'CNN', ([], {}), '()\n', (249, 251), False, 'from captcha_cnn_model import CNN\n'), ((383, 419), 'my_dataset.get_predict_data_loader', 'my_dataset.get_predict_data_loader', ([], {}), '()\n', (417, 419), False, 'import my_dataset\n'), ((1442, 1461), 'numpy.hstack', 'np.hstack', (['[result]'], {}), '([result])\n', (1451, 1461), True, 'import numpy as np\n'), ((1472, 1524), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['图片名称', '预测结果', '耗费时间']"}), "(data, columns=['图片名称', '预测结果', '耗费时间'])\n", (1484, 1524), True, 'import pandas as pd\n'), ((291, 314), 'torch.load', 'torch.load', (['"""model.pkl"""'], {}), "('model.pkl')\n", (301, 314), False, 'import torch\n'), ((510, 521), 'time.time', 'time.time', ([], {}), '()\n', (519, 521), False, 'import time\n'), ((562, 577), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (570, 577), False, 'from torch.autograd import Variable\n'), ((1320, 1331), 'time.time', 'time.time', ([], {}), '()\n', (1329, 1331), False, 'import time\n')]
|
import os
import yaml
import tornado.ioloop
import tornado.gen
import tornado.web
from job_server.context import JobServerContext
from job_server.routes import PostJobHandler, RunJobHandler
from job_server.db import init_db
def job_server(context):
return tornado.web.Application([
(r'/job/run', RunJobHandler, dict(
context=context
)),
(r'/job/post/([A-z]+)', PostJobHandler, dict(
context=context
))
])
if __name__ == "__main__":
context = JobServerContext(yaml.load(file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'config.yaml'),
'r')))
init_db(context)
app = job_server(context)
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
|
[
"os.path.realpath",
"job_server.db.init_db"
] |
[((738, 754), 'job_server.db.init_db', 'init_db', (['context'], {}), '(context)\n', (745, 754), False, 'from job_server.db import init_db\n'), ((577, 603), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (593, 603), False, 'import os\n')]
|
#!/usr/bin/env python3
import requests
import sys
import argparse
import uuid
from time import sleep
from string import Template
def Spray(domain, users, target_url, output_file, wait, verbose, more_verbose, debug):
i = 0
results = []
if verbose or more_verbose:
print("Targeting: " + target_url + "\n")
for user in users:
if more_verbose:
print("\ntesting " + user)
body = '{"Username": "%s@%s"}' % (user, domain)
r = requests.post(target_url, data=body)
#print(target_url)
if debug:
print("Time elapsed: " + str(r.elapsed) + "\n")
if more_verbose:
print("Status: " + str(r.status_code))
print(r.headers)
print(r.text)
if 'ThrottleStatus' in r.headers.keys():
print("Throttling detected => ThrottleStatus: " + r.headers('ThrottleStatus'))
if '"IfExistsResult":0' in r.content.decode('UTF-8'):
output_file.write(user + "@" + domain +" - VALID\n")
if verbose or more_verbose:
print("Found " + user + "@" + domain)
continue
sleep(wait)
i = i + 1
if i % 50 == 0:
print("Tested " + str(i) + " possible users")
return results
def main():
parser = argparse.ArgumentParser(description="Enumerate users against Office365")
target_group = parser.add_argument_group(title="Attack Target")
target_group.add_argument('-d', dest='domain', type=str, help='Target domain - required')
target_group.add_argument('-l', dest='user_list', type=argparse.FileType('r'), help='File with list of target usernames (without domain)')
target_group.add_argument('-u', '--url', type=str, dest='url', help='Target URL if using something like fireprox; otherwise will directly call the O365 login endpoint')
target_group.add_argument('-w', '--wait', type=int, dest='wait', help='Number of seconds to sleep between individual user attempts', default=0)
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False)
parser.add_argument('-vv', '--more-verbose', action='store_true', dest='more_verbose', default=False)
parser.add_argument('-D', '--debug', action='store_true', dest='debug', default=False)
parser.add_argument('-o', '--output', type=argparse.FileType('w'), dest='output_file', default='spray_results.txt', help='Output file for results (txt). Default is spray_results.txt')
args = parser.parse_args()
if not args.domain:
parser.print_help()
print('\nNo target domain provided')
sys.exit()
if not args.user_list:
parser.print_help()
print('\nNo list of target users provided')
sys.exit()
if not args.url:
target_url = 'https://login.microsoftonline.com/common/GetCredentialType'
else:
target_url = args.url + 'common/GetCredentialType'
if args.debug:
print("*** DEBUG MESSAGING ENABLED ***")
users = []
for line in args.user_list:
users.append(line.split('@')[0].strip())
results = Spray(args.domain, users, target_url, args.output_file, args.wait, args.verbose, args.more_verbose, args.debug)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"time.sleep",
"requests.post",
"argparse.FileType",
"sys.exit"
] |
[((1129, 1201), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Enumerate users against Office365"""'}), "(description='Enumerate users against Office365')\n", (1152, 1201), False, 'import argparse\n'), ((442, 478), 'requests.post', 'requests.post', (['target_url'], {'data': 'body'}), '(target_url, data=body)\n', (455, 478), False, 'import requests\n'), ((994, 1005), 'time.sleep', 'sleep', (['wait'], {}), '(wait)\n', (999, 1005), False, 'from time import sleep\n'), ((2398, 2408), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2406, 2408), False, 'import sys\n'), ((2504, 2514), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2512, 2514), False, 'import sys\n'), ((1415, 1437), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1432, 1437), False, 'import argparse\n'), ((2143, 2165), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (2160, 2165), False, 'import argparse\n')]
|
#!/usr/bin/python3
# Script to shape the desired output to be processed (MMODES)
# the datatable way
# @author: <NAME>
# Creation: 09/06/2019
import os
import re
import numpy as np
import datatable as dt
from datatable import f
def log(cons, media):
'''
Writes information of consortium object to file
'''
logf = 'simulations.txt'
p = re.compile(r'#+ SIMULATION (\d+) #+')
if os.path.isfile(logf): # parse last simulation number
with open(logf) as l:
for line in l.readlines():
num_sim = p.search(line)
if num_sim:
head = " SIMULATION "+str(int(num_sim.group(1))+1)+" "
else:
head = " SIMULATION 1 "
lines = '{:{fill}{align}{width}}'.format(head,
fill = '#',
align = '^',
width = 30) + "\n"
lines += cons.__str__()
pers = ', '.join([per["PERTURBATION"] for per in media])
lines += "\nPERTURBATIONS: " + pers + "\n\n"
with open(logf, "a") as l:
l.write(lines)
return
def equidistant(df, n):
sample = np.linspace(df.nrows-1,1,n).astype('int')
sample.sort()
return df[sample, :]
def tsv_filter(medium = "", flux = "", txpers = {}, inplace = False, v = 0, equif = True, bin = False):
'''
Function that filters medium and fluxes TSV files based on perturbation times.
INPUTS -> medium: string, path to medium file;
flux: string, path to medium file;
txpers: dictionary, time : perturbation;
inplace: bool, whether overwrite input paths (default False);
v: float, volume magnitude to obtain medium concentrations;
equif: bool, whether write an additional fluxes filtered file,
with 100 equidistant points (default True)
OUTPUT -> it returns None, writes 2(3) TSV files
'''
dfs = []
if not medium:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(medium), medium, 0])
if v != 0:
for i in range(1,dfs[0][0].ncols): dfs[0][0][:,i] = dfs[0][0][:,f[i]/v]
if not flux:
print("Medium parameter wasn't supplied, it won't be generated.")
else:
dfs.append([dt.fread(flux), flux, 1])
if not medium:
print("You must supply a txpers parameter. Exitting function...")
return
for log, path, n in dfs:
log[:,'Perturbations'] = "FALSE" # now last column (-1)
log[-1,-1] = "END"
if len(txpers) > 1:
for tp, per in txpers.items():
if tp == 0:
log[0,-1] = per
else:
# take last time that matches <= perturbation time
log[f.time == log[f.time < tp, f.time][-1,-1], -1] = per
# if per == 'START':
# log[0,-1] = 'START'
# else:
# # take last index that matches <= perturbation time
# log[f.time == log[f.time <= tp, f.time][-1,-1], -1] = per
else:
log[0, -1] = 'START'
if n != 0 and equif:
log_equif = equidistant(log,100) # take 100 equidistant rows
log_equif.to_csv(path[:-4] + '_equi' + '.tsv')
del(log_equif)
# TODO: I don't know how to implement a condroll with datatable
# We aren't currentyly using it, anyway
log = log[f.Perturbations != "FALSE", :]
if inplace:
log.to_csv(path)
else:
log.to_csv(path[:-4] + '_filtered' + '.tsv')
|
[
"os.path.isfile",
"datatable.fread",
"numpy.linspace",
"re.compile"
] |
[((359, 396), 're.compile', 're.compile', (['"""#+ SIMULATION (\\\\d+) #+"""'], {}), "('#+ SIMULATION (\\\\d+) #+')\n", (369, 396), False, 'import re\n'), ((404, 424), 'os.path.isfile', 'os.path.isfile', (['logf'], {}), '(logf)\n', (418, 424), False, 'import os\n'), ((1072, 1103), 'numpy.linspace', 'np.linspace', (['(df.nrows - 1)', '(1)', 'n'], {}), '(df.nrows - 1, 1, n)\n', (1083, 1103), True, 'import numpy as np\n'), ((1986, 2002), 'datatable.fread', 'dt.fread', (['medium'], {}), '(medium)\n', (1994, 2002), True, 'import datatable as dt\n'), ((2240, 2254), 'datatable.fread', 'dt.fread', (['flux'], {}), '(flux)\n', (2248, 2254), True, 'import datatable as dt\n')]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type inference.
This analysis annotates all symbols nodes of an AST with type information
extracted from static sources:
* type annotations
* global and local symbols visible to the function at analysis time
* literals
Requires reaching function definitions analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Tuple
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import annos
class Resolver(object):
"""Resolver objects handle the process of looking up actual names and types.
All resolve_* methods:
* have a first namespace argument, mapping string to actual values
* specify names as QN objects
* specify types as a Set of inferred types
All resolve_* methods must return either:
* a set of `type` objects
* None
"""
def res_name(self, ns, name):
"""Resolves the type an external (e.g. closure, global) variable."""
raise NotImplementedError('subclasses must implement')
def res_value(self, ns, value):
"""Resolves the type a literal value."""
raise NotImplementedError('subclasses must implement')
# TODO(mdan): Allow caller to model side effects.
def res_call(self, ns, name, target, args, keywords, starargs, kwargs):
"""Resolves the return type an external function or method call.
Args:
ns: namespace
name: str, the function name
target: if this is a method call, the types of the method target, None
otherwise
args: list or argument types
keywords: dict of name to argument types
starargs: list of types of the *args arguments (should be at most one)
kwargs: list of types of the **kwargs arguments (in order of appearance)
"""
raise NotImplementedError('subclasses must implement')
def res_arg(self, ns, f_name, arg_name, type_anno):
"""Resolves the type of a (possibly annotated) function argument."""
raise NotImplementedError('subclasses must implement')
class _SymbolTable(object):
"""Abstraction for the state of the CFG walk for type inference.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of
possible types.
"""
def __init__(self, init_from=None):
if init_from:
assert isinstance(init_from, _SymbolTable)
self.value = {
s: set(other_types) for s, other_types in init_from.value.items()
}
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _SymbolTable)
result = _SymbolTable(self)
for s, other_types in other.value.items():
if s not in result.value:
self_types = set()
result.value[s] = self_types
else:
self_types = result.value[s]
self_types.update(other_types)
return result
def __repr__(self):
return 'SymbolTable {}'.format(self.value)
_GETITEM = qual_names.QN('__getitem__')
_HANDLERS = {
gast.Eq: qual_names.QN('__eq__'),
gast.NotEq: qual_names.QN('__ne__'),
gast.Lt: qual_names.QN('__lt__'),
gast.LtE: qual_names.QN('__le__'),
gast.Gt: qual_names.QN('__gt__'),
gast.GtE: qual_names.QN('__ge__'),
gast.In: qual_names.QN('__contains__'),
# TODO(mdan): Is this actually correct?
# NotIn(*) = Not(In(*))
gast.NotIn: qual_names.QN('__not__'),
gast.Add: qual_names.QN('__add__'),
gast.Sub: qual_names.QN('__sub__'),
gast.Mult: qual_names.QN('__mul__'),
gast.Div: qual_names.QN('__div__'),
gast.FloorDiv: qual_names.QN('__floordiv__'),
gast.Mod: qual_names.QN('__mod__'),
gast.Pow: qual_names.QN('__pow__'),
gast.LShift: qual_names.QN('__lshift__'),
gast.RShift: qual_names.QN('__rshift__'),
gast.BitOr: qual_names.QN('__or__'),
gast.BitXor: qual_names.QN('__xor__'),
gast.BitAnd: qual_names.QN('__and__'),
gast.MatMult: qual_names.QN('__matmul__'),
}
_FIXED_RETTYPES = {
gast.Is: bool,
gast.IsNot: bool,
}
class StmtInferrer(gast.NodeVisitor):
"""Runs type inference on a single AST statement.
This visitor annotates most nodes with type information. It also sets types
for the symbols modified by this statement in its types_out property.
"""
def __init__(self, resolver, scope, namespace, closure_types, types_in):
self.resolver = resolver
self.scope = scope
self.namespace = namespace
self.closure_types = closure_types
self.types_in = types_in
self.new_symbols = {}
def visit(self, node):
types = super().visit(node)
if types is not None:
# TODO(mdan): Normalize by removing subtypes.
anno.setanno(node, anno.Static.TYPES, tuple(types))
return types
def visit_FunctionDef(self, node):
# Skip local function definitions. They are analyzed separately.
return None
def visit_Constant(self, node):
return self.resolver.res_value(self.namespace, node.value)
def visit_Tuple(self, node):
if isinstance(node.ctx, gast.Load):
for elt in node.elts:
self.visit(elt)
# TODO(mdan): Parameterize it.
return {Tuple}
assert isinstance(node.ctx, gast.Store)
# TODO(mdan): Implement tuple unpacking.
return None
def visit_List(self, node):
if isinstance(node.ctx, gast.Load):
el_types = []
for elt in node.elts:
el_types.append(self.visit(elt))
return {list}
raise NotImplementedError('list unpacking')
def visit_Set(self, node):
raise NotImplementedError()
def visit_Name(self, node):
name = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
types = self.types_in.value.get(name, None)
if (types is None) and (name not in self.scope.bound):
if name in self.closure_types:
types = self.closure_types[name]
else:
types = self.resolver.res_name(self.namespace, name)
return types
elif isinstance(node.ctx, gast.Param):
type_name = anno.getanno(node.annotation, anno.Basic.QN, None)
types = self.resolver.res_arg(self.namespace, self.scope.function_name,
name, type_name)
if types is not None:
self.new_symbols[name] = types
return types
elif isinstance(node.ctx, gast.Store):
if self.rvalue is not None:
self.new_symbols[name] = self.rvalue
else:
# No type information, assume Any.
self.new_symbols[name] = {Any}
return self.rvalue
assert False, 'unknown ctx'
def visit_Call(self, node):
f_name = anno.getanno(node.func, anno.Basic.QN)
kwargs = [self.visit(kw.value) for kw in node.keywords if kw.arg is None]
keywords = {
kw.arg: self.visit(kw.value)
for kw in node.keywords
if kw.arg is not None
}
is_starred = [isinstance(a, gast.Starred) for a in node.args]
args = [
self.visit(a)
for a, starred in zip(node.args, is_starred)
if not starred
]
starargs = [
self.visit(a.value)
for a, starred in zip(node.args, is_starred)
if starred
]
if f_name in self.scope.bound:
# Don't attempt external resolution of local functions.
# TODO(mdan): Use type annotations of the local definition.
return None
return self.resolver.res_call(
self.namespace, f_name, None, args, keywords, starargs, kwargs)
def visit_Index(self, node):
return self.visit(node.value)
def visit_Assign(self, node):
self.rvalue = self.visit(node.value)
for t in node.targets:
self.visit(t)
self.rvalue = None
def visit_Subscript(self, node):
val_type = self.visit(node.value)
slice_type = self.visit(node.slice)
if val_type is None or slice_type is None:
return None
return self.resolver.res_call(self.namespace, _GETITEM, val_type,
(slice_type,), {}, (), ())
def visit_Compare(self, node):
right_types = [self.visit(c) for c in node.comparators]
op_types = [type(o) for o in node.ops]
if len(op_types) > 1:
raise NotImplementedError('chained comparisons')
assert len(right_types) == 1
left_type = self.visit(node.left)
right_type, = right_types
op_type, = op_types
if left_type is None or right_type is None:
return None
f_name = _HANDLERS.get(op_type, None)
if f_name is None:
# Python doesn't allow overriding these operators. Their return types are
# fixed.
return {_FIXED_RETTYPES[op_type]}
return self.resolver.res_call(self.namespace, _HANDLERS[op_type],
left_type, (right_type,), {}, (), ())
def visit_BinOp(self, node):
left_type = self.visit(node.left)
right_type = self.visit(node.right)
if left_type is None or right_type is None:
return None
# TODO(mdan): This does not fully follow Python operator semantics.
# For example, in `a + b` Python will try `a.__add__`, but also `b.__radd__`
return self.resolver.res_call(self.namespace, _HANDLERS[type(node.op)],
left_type, (right_type,), {}, (), ())
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that propagates type information across statements."""
def __init__(self, graph, resolver, namespace, scope, closure_types):
"""Creates a new analyzer.
Args:
graph: cfg.Graph
resolver: Resolver
namespace: Dict[str, Any]
scope: activity.Scope
closure_types: Dict[QN, Set]
"""
super(Analyzer, self).__init__(graph)
self.resolver = resolver
self.namespace = namespace
self.scope = scope
self.closure_types = closure_types
def init_state(self, _):
return _SymbolTable()
def _update_closure_types(self, ast_node, types):
existing_types = anno.getanno(ast_node, anno.Static.CLOSURE_TYPES, None)
if existing_types is None:
existing_types = {}
anno.setanno(ast_node, anno.Static.CLOSURE_TYPES, existing_types)
for k, v in types.value.items():
if k in existing_types:
existing_types[k].update(v)
else:
existing_types[k] = set(v)
def visit_node(self, node):
prev_types_out = self.out[node]
types_in = _SymbolTable()
for n in node.prev:
types_in |= self.out[n]
types_out = _SymbolTable(types_in)
ast_node = node.ast_node
inferrer = StmtInferrer(
self.resolver, self.scope, self.namespace, self.closure_types, types_in)
inferrer.visit(ast_node)
types_out.value.update(inferrer.new_symbols)
reaching_fndefs = anno.getanno(ast_node, anno.Static.DEFINED_FNS_IN)
node_scope = anno.getanno(ast_node, anno.Static.SCOPE, None)
if node_scope is not None:
# TODO(mdan): Check that it's actually safe to skip nodes without scope.
reads = {str(qn) for qn in node_scope.read}
for def_node in reaching_fndefs:
if def_node.name in reads:
self._update_closure_types(def_node, types_out)
self.in_[node] = types_in
self.out[node] = types_out
return prev_types_out != types_out
class FunctionVisitor(transformer.Base):
"""AST visitor that applies type inference to each function separately."""
def __init__(self, source_info, graphs, resolver):
super(FunctionVisitor, self).__init__(source_info)
self.graphs = graphs
self.resolver = resolver
def visit_FunctionDef(self, node):
subgraph = self.graphs[node]
scope = anno.getanno(node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)
closure_types = anno.getanno(node, anno.Static.CLOSURE_TYPES, {})
analyzer = Analyzer(
subgraph, self.resolver, self.ctx.info.namespace, scope, closure_types)
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
node.body = self.visit_block(node.body)
return node
def resolve(node, source_info, graphs, resolver):
"""Performs type inference.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
resolver: Resolver
Returns:
ast.AST
"""
visitor = FunctionVisitor(source_info, graphs, resolver)
node = visitor.visit(node)
return node
|
[
"tensorflow.python.autograph.pyct.anno.getanno",
"tensorflow.python.autograph.pyct.anno.setanno",
"tensorflow.python.autograph.pyct.qual_names.QN"
] |
[((4158, 4186), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__getitem__"""'], {}), "('__getitem__')\n", (4171, 4186), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4215, 4238), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__eq__"""'], {}), "('__eq__')\n", (4228, 4238), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4256, 4279), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__ne__"""'], {}), "('__ne__')\n", (4269, 4279), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4294, 4317), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__lt__"""'], {}), "('__lt__')\n", (4307, 4317), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4333, 4356), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__le__"""'], {}), "('__le__')\n", (4346, 4356), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4371, 4394), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__gt__"""'], {}), "('__gt__')\n", (4384, 4394), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4410, 4433), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__ge__"""'], {}), "('__ge__')\n", (4423, 4433), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4448, 4477), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__contains__"""'], {}), "('__contains__')\n", (4461, 4477), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4567, 4591), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__not__"""'], {}), "('__not__')\n", (4580, 4591), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4608, 4632), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__add__"""'], {}), "('__add__')\n", (4621, 4632), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4648, 4672), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__sub__"""'], {}), "('__sub__')\n", (4661, 4672), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4689, 4713), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__mul__"""'], {}), "('__mul__')\n", (4702, 4713), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4729, 4753), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__div__"""'], {}), "('__div__')\n", (4742, 4753), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4774, 4803), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__floordiv__"""'], {}), "('__floordiv__')\n", (4787, 4803), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4819, 4843), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__mod__"""'], {}), "('__mod__')\n", (4832, 4843), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4859, 4883), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__pow__"""'], {}), "('__pow__')\n", (4872, 4883), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4902, 4929), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__lshift__"""'], {}), "('__lshift__')\n", (4915, 4929), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4948, 4975), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__rshift__"""'], {}), "('__rshift__')\n", (4961, 4975), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((4993, 5016), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__or__"""'], {}), "('__or__')\n", (5006, 5016), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((5035, 5059), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__xor__"""'], {}), "('__xor__')\n", (5048, 5059), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((5078, 5102), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__and__"""'], {}), "('__and__')\n", (5091, 5102), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((5122, 5149), 'tensorflow.python.autograph.pyct.qual_names.QN', 'qual_names.QN', (['"""__matmul__"""'], {}), "('__matmul__')\n", (5135, 5149), False, 'from tensorflow.python.autograph.pyct import qual_names\n'), ((6772, 6805), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['node', 'anno.Basic.QN'], {}), '(node, anno.Basic.QN)\n', (6784, 6805), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((7784, 7822), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['node.func', 'anno.Basic.QN'], {}), '(node.func, anno.Basic.QN)\n', (7796, 7822), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((11043, 11098), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['ast_node', 'anno.Static.CLOSURE_TYPES', 'None'], {}), '(ast_node, anno.Static.CLOSURE_TYPES, None)\n', (11055, 11098), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((11813, 11863), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['ast_node', 'anno.Static.DEFINED_FNS_IN'], {}), '(ast_node, anno.Static.DEFINED_FNS_IN)\n', (11825, 11863), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((11881, 11928), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['ast_node', 'anno.Static.SCOPE', 'None'], {}), '(ast_node, anno.Static.SCOPE, None)\n', (11893, 11928), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((12689, 12743), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['node', 'annos.NodeAnno.ARGS_AND_BODY_SCOPE'], {}), '(node, annos.NodeAnno.ARGS_AND_BODY_SCOPE)\n', (12701, 12743), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((12764, 12813), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['node', 'anno.Static.CLOSURE_TYPES', '{}'], {}), '(node, anno.Static.CLOSURE_TYPES, {})\n', (12776, 12813), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((11163, 11228), 'tensorflow.python.autograph.pyct.anno.setanno', 'anno.setanno', (['ast_node', 'anno.Static.CLOSURE_TYPES', 'existing_types'], {}), '(ast_node, anno.Static.CLOSURE_TYPES, existing_types)\n', (11175, 11228), False, 'from tensorflow.python.autograph.pyct import anno\n'), ((7197, 7247), 'tensorflow.python.autograph.pyct.anno.getanno', 'anno.getanno', (['node.annotation', 'anno.Basic.QN', 'None'], {}), '(node.annotation, anno.Basic.QN, None)\n', (7209, 7247), False, 'from tensorflow.python.autograph.pyct import anno\n')]
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from maya import cmds
from maya import standalone
import os
import unittest
from pxr import Usd, UsdGeom
class testUsdMayaAdaptorGeom(unittest.TestCase):
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
cmds.loadPlugin('pxrUsd')
usdFile = os.path.abspath('UsdAttrs.usda')
cmds.usdImport(file=usdFile, shadingMode='none')
def testImportImageable(self):
"""
Tests that UsdGeomImageable.purpose is properly imported.
"""
# Testing for the different purpose attributes
self.assertEqual(cmds.getAttr('pCube1.USD_ATTR_purpose'), 'default')
self.assertEqual(cmds.getAttr('pCube2.USD_ATTR_purpose'), 'render')
self.assertEqual(cmds.getAttr('pCube3.USD_ATTR_purpose'), 'proxy')
# pCube4 does not have a purpose attribute
self.assertFalse(cmds.objExists('pCube4.USD_ATTR_purpose'))
self.assertFalse(cmds.objExists('pCube4.USD_purpose')) # alias
def testExportImageable(self):
"""
Test that UsdGeomImageable.purpose is properly exported.
"""
newUsdFilePath = os.path.abspath('UsdAttrsNew.usda')
cmds.usdExport(file=newUsdFilePath, shadingMode='none')
newUsdStage = Usd.Stage.Open(newUsdFilePath)
# Testing the exported purpose attributes
geom1 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube1'))
self.assertEqual(geom1.GetPurposeAttr().Get(), 'default')
geom2 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube2'))
self.assertEqual(geom2.GetPurposeAttr().Get(), 'render')
geom3 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube3'))
self.assertEqual(geom3.GetPurposeAttr().Get(), 'proxy')
# Testing that there is no authored attribute
geom4 = UsdGeom.Imageable(newUsdStage.GetPrimAtPath('/World/pCube4'))
self.assertFalse(geom4.GetPurposeAttr().HasAuthoredValue())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"unittest.main",
"maya.cmds.loadPlugin",
"os.path.abspath",
"maya.standalone.uninitialize",
"maya.cmds.getAttr",
"maya.cmds.usdExport",
"maya.cmds.usdImport",
"pxr.Usd.Stage.Open",
"maya.cmds.objExists",
"maya.standalone.initialize"
] |
[((3166, 3192), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (3179, 3192), False, 'import unittest\n'), ((1289, 1314), 'maya.standalone.uninitialize', 'standalone.uninitialize', ([], {}), '()\n', (1312, 1314), False, 'from maya import standalone\n'), ((1366, 1394), 'maya.standalone.initialize', 'standalone.initialize', (['"""usd"""'], {}), "('usd')\n", (1387, 1394), False, 'from maya import standalone\n'), ((1403, 1428), 'maya.cmds.loadPlugin', 'cmds.loadPlugin', (['"""pxrUsd"""'], {}), "('pxrUsd')\n", (1418, 1428), False, 'from maya import cmds\n'), ((1448, 1480), 'os.path.abspath', 'os.path.abspath', (['"""UsdAttrs.usda"""'], {}), "('UsdAttrs.usda')\n", (1463, 1480), False, 'import os\n'), ((1489, 1537), 'maya.cmds.usdImport', 'cmds.usdImport', ([], {'file': 'usdFile', 'shadingMode': '"""none"""'}), "(file=usdFile, shadingMode='none')\n", (1503, 1537), False, 'from maya import cmds\n'), ((2292, 2327), 'os.path.abspath', 'os.path.abspath', (['"""UsdAttrsNew.usda"""'], {}), "('UsdAttrsNew.usda')\n", (2307, 2327), False, 'import os\n'), ((2336, 2391), 'maya.cmds.usdExport', 'cmds.usdExport', ([], {'file': 'newUsdFilePath', 'shadingMode': '"""none"""'}), "(file=newUsdFilePath, shadingMode='none')\n", (2350, 2391), False, 'from maya import cmds\n'), ((2414, 2444), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', (['newUsdFilePath'], {}), '(newUsdFilePath)\n', (2428, 2444), False, 'from pxr import Usd, UsdGeom\n'), ((1744, 1783), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""pCube1.USD_ATTR_purpose"""'], {}), "('pCube1.USD_ATTR_purpose')\n", (1756, 1783), False, 'from maya import cmds\n'), ((1821, 1860), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""pCube2.USD_ATTR_purpose"""'], {}), "('pCube2.USD_ATTR_purpose')\n", (1833, 1860), False, 'from maya import cmds\n'), ((1897, 1936), 'maya.cmds.getAttr', 'cmds.getAttr', (['"""pCube3.USD_ATTR_purpose"""'], {}), "('pCube3.USD_ATTR_purpose')\n", (1909, 1936), False, 'from maya import cmds\n'), ((2024, 2065), 'maya.cmds.objExists', 'cmds.objExists', (['"""pCube4.USD_ATTR_purpose"""'], {}), "('pCube4.USD_ATTR_purpose')\n", (2038, 2065), False, 'from maya import cmds\n'), ((2092, 2128), 'maya.cmds.objExists', 'cmds.objExists', (['"""pCube4.USD_purpose"""'], {}), "('pCube4.USD_purpose')\n", (2106, 2128), False, 'from maya import cmds\n')]
|
"""
Utility routines for the maximum entropy module.
Most of them are either Python replacements for the corresponding Fortran
routines or wrappers around matrices to allow the maxent module to
manipulate ndarrays, scipy sparse matrices, and PySparse matrices a
common interface.
Perhaps the logsumexp() function belongs under the utils/ branch where other
modules can access it more easily.
Copyright: <NAME>, 2003-2006
License: BSD-style (see LICENSE.txt in main source directory)
"""
# Future imports must come before any code in 2.5
from __future__ import division
from __future__ import print_function
from builtins import range
__author__ = "<NAME>"
__version__ = '2.0'
import random
import math
import cmath
import numpy as np
#from numpy import log, exp, asarray, ndarray, empty
import scipy.sparse
from scipy.misc import logsumexp
def feature_sampler(vec_f, auxiliary_sampler):
"""
A generator function for tuples (F, log_q_xs, xs)
Parameters
----------
vec_f : function
Pass `vec_f` as a (vectorized) function that operates on a vector of
samples xs = {x1,...,xn} and returns a feature matrix (m x n), where m
is some number of feature components.
auxiliary_sampler : function
Pass `auxiliary_sampler` as a function that returns a tuple
(xs, log_q_xs) representing a sample to use for sampling (e.g.
importance sampling) on the sample space of the model.
xs : list, 1d ndarray, or 2d matrix (n x d)
We require len(xs) == n.
Yields
------
tuples (F, log_q_xs, xs)
F : matrix (m x n)
log_q_xs : as returned by auxiliary_sampler
xs : as returned by auxiliary_sampler
"""
while True:
xs, log_q_xs = auxiliary_sampler()
F = vec_f(xs) # compute feature matrix from points
yield F, log_q_xs, xs
def dictsample(freq, size=None, return_probs=None):
"""
Create a sample of the given size from the specified discrete distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> dictsample(freq, size=1)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
n = len(freq)
probs = np.fromiter(freq.values(), float)
probs /= probs.sum()
indices = np.random.choice(np.arange(n), size=size, p=probs)
labels = np.empty(n, dtype=object)
for i, label in enumerate(freq.keys()):
labels[i] = label
sample = labels[indices]
if return_probs is None:
return sample
sampleprobs = probs[indices]
if return_probs == 'prob':
return sample, sampleprobs
elif return_probs == 'logprob':
return sample, np.log(sampleprobs)
else:
raise ValueError('return_probs must be "prob", "logprob", or None')
def dictsampler(freq, size=None, return_probs=None):
"""
A generator of samples of the given size from the specified discrete
distribution.
Parameters
----------
freq : a dictionary
A mapping from values x_j in the sample space to probabilities (or
unnormalized frequencies).
size : a NumPy size parameter (like a shape tuple)
Something passable to NumPy as a size argument to np.random.choice(...)
return_probs : int, optional (default 0)
None: don't return pmf values at each sample point
'prob': return pmf values at each sample point
'logprob': return log pmf values at each sample point
Returns
-------
Returns a sample of the given size from the keys of the given
dictionary `freq` with probabilities given according to the
values (normalized to 1). Optionally returns the probabilities
under the distribution of each observation.
Example
-------
>>> freq = {'a': 10, 'b': 15, 'c': 20}
>>> g = dictsample_gen(freq, size=1)
>>> next(g)
array([c, b, b, b, b, b, c, b, b, b], dtype=object)
"""
while True:
yield dictsample(freq, size=size, return_probs=return_probs)
def auxiliary_sampler_scipy(auxiliary, dimensions=1, n=10**5):
"""
Sample (once) from the given scipy.stats distribution
Parameters
----------
auxiliary : a scipy.stats distribution object (rv_frozen)
Returns
-------
sampler : function
sampler(), when called with no parameters, returns a tuple
(xs, log_q_xs), where:
xs : matrix (n x d): [x_1, ..., x_n]: a sample
log_q_xs: log pdf values under the auxiliary sampler for each x_j
"""
def sampler():
xs = auxiliary.rvs(size=(n, dimensions))
log_q_xs = np.log(auxiliary.pdf(xs.T)).sum(axis=0)
return (xs, log_q_xs)
return sampler
def _logsumexpcomplex(values):
"""A version of logsumexp that should work if the values passed are
complex-numbered, such as the output of robustarraylog(). So we
expect:
cmath.exp(logsumexpcomplex(robustarraylog(values))) ~= sum(values,axis=0)
except for a small rounding error in both real and imag components.
The output is complex. (To recover just the real component, use
A.real, where A is the complex return value.)
"""
if len(values) == 0:
return 0.0
iterator = iter(values)
# Get the first element
while True:
# Loop until we have a value greater than -inf
try:
b_i = next(iterator) + 0j
except StopIteration:
# empty
return float('-inf')
if b_i.real != float('-inf'):
break
# Now the rest
for a_i in iterator:
a_i += 0j
if b_i.real > a_i.real:
increment = robustlog(1.+cmath.exp(a_i - b_i))
# print "Increment is " + str(increment)
b_i = b_i + increment
else:
increment = robustlog(1.+cmath.exp(b_i - a_i))
# print "Increment is " + str(increment)
b_i = a_i + increment
return b_i
def logsumexp_naive(values):
"""For testing logsumexp(). Subject to numerical overflow for large
values (e.g. 720).
"""
s = 0.0
for x in values:
s += math.exp(x)
return math.log(s)
def robustlog(x):
"""Returns log(x) if x > 0, the complex log cmath.log(x) if x < 0,
or float('-inf') if x == 0.
"""
if x == 0.:
return float('-inf')
elif type(x) is complex or (type(x) is float and x < 0):
return cmath.log(x)
else:
return math.log(x)
def _robustarraylog(x):
""" An array version of robustlog. Operates on a real array x.
"""
arraylog = empty(len(x), np.complex64)
for i in range(len(x)):
xi = x[i]
if xi > 0:
arraylog[i] = math.log(xi)
elif xi == 0.:
arraylog[i] = float('-inf')
else:
arraylog[i] = cmath.log(xi)
return arraylog
# def arrayexp(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the real array x.
#
# We try to exponentiate with np.exp() and, if that fails, with
# python's math.exp(). np.exp() is about 10 times faster but throws
# an OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
# """
# try:
# ex = np.exp(x)
# except OverflowError:
# print("Warning: OverflowError using np.exp(). Using slower Python"\
# " routines instead!")
# ex = np.empty(len(x), float)
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# return ex
#
# def arrayexpcomplex(x):
# """
# OBSOLETE?
#
# Returns the elementwise antilog of the vector x.
#
# We try to exponentiate with np.exp() and, if that fails, with python's
# math.exp(). np.exp() is about 10 times faster but throws an
# OverflowError exception for numerical underflow (e.g. exp(-800),
# whereas python's math.exp() just returns zero, which is much more
# helpful.
#
# """
# try:
# ex = np.exp(x).real
# except OverflowError:
# ex = np.empty(len(x), float)
# try:
# for j in range(len(x)):
# ex[j] = math.exp(x[j])
# except TypeError:
# # Perhaps x[j] is complex. If so, try using the complex
# # exponential and returning the real part.
# for j in range(len(x)):
# ex[j] = cmath.exp(x[j]).real
# return ex
def sample_wr(population, k):
"""Chooses k random elements (with replacement) from a population.
(From the Python Cookbook).
"""
n = len(population)
_random, _int = random.random, int # speed hack
return [population[_int(_random() * n)] for i in range(k)]
def evaluate_feature_matrix(feature_functions,
xs,
vectorized=True,
format='csc_matrix',
dtype=float,
verbose=False):
"""Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
F[i, :] = f_i(xs[:])
if xs is 1D, or as:
F[i, j] = f_i(xs[:, j])
if xs is 2D, for each feature function `f_i` in `feature_functions`.
Parameters
----------
feature_functions : a list of m feature functions f_i.
xs : either:
1. a (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
vectorized : bool (default True)
If True, the feature functions f_i are assumed to be vectorized;
then these will be passed all observations xs at once, in turn.
If False, the feature functions f_i will be evaluated one at a time.
format : str (default 'csc_matrix')
Options: 'ndarray', 'csc_matrix', 'csr_matrix', 'dok_matrix'.
If you have enough memory, it may be faster to create a dense
ndarray and then construct a e.g. CSC matrix from this.
Returns
-------
F : (m x n) matrix (in the given format: ndarray / csc_matrix / etc.)
Matrix of evaluated features.
"""
m = len(feature_functions)
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
if d == 1 and vectorized:
# xs may be a column vector, i.e. (n x 1) array.
# In this case, reshape it to a 1d array. This
# makes it easier to define functions that
# operate on only one variable (the usual case)
# given that sklearn's interface now forces 2D
# arrays X when calling .transform(X) and .fit(X).
xs = np.reshape(xs, n)
else:
n, d = len(xs), 1
if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
F = scipy.sparse.dok_matrix((m, n), dtype=dtype)
elif format == 'ndarray':
F = np.empty((m, n), dtype=dtype)
else:
raise ValueError('matrix format not recognized')
for i, f_i in enumerate(feature_functions):
if verbose:
print('Computing feature {i} of {m} ...'.format(i=i, m=m))
if vectorized:
F[i::m, :] = f_i(xs)
else:
for j in range(n):
f_i_x = f_i(xs[j])
if f_i_x != 0:
F[i,j] = f_i_x
if format == 'csc_matrix':
return F.tocsc()
elif format == 'csr_matrix':
return F.tocsr()
else:
return F
# def densefeatures(f, x):
# """Returns a dense array of non-zero evaluations of the vector
# functions fi in the list f at the point x.
# """
#
# return np.array([fi(x) for fi in f])
# def densefeaturematrix(f, sample, verbose=False):
# """Compute an (m x n) dense array of non-zero evaluations of the
# scalar functions fi in the list f at the points x_1,...,x_n in the
# list sample.
# """
#
# # Was: return np.array([[fi(x) for fi in f] for x in sample])
#
# m = len(f)
# n = len(sample)
#
# F = np.empty((m, n), float)
# for i in range(m):
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# F[i,j] = f_i(x)
# return F
# def sparsefeatures(f, x, format='csc_matrix'):
# """Compute an mx1 sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the point x.
#
# """
# m = len(f)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparsef = scipy.sparse.dok_matrix((m, 1))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# f_i_x = f[i](x)
# if f_i_x != 0:
# sparsef[i, 0] = f_i_x
#
# if format == 'csc_matrix':
# print("Converting to CSC matrix ...")
# return sparsef.tocsc()
# elif format == 'csr_matrix':
# print("Converting to CSR matrix ...")
# return sparsef.tocsr()
# else:
# return sparsef
# def sparsefeaturematrix(f, sample, format='csc_matrix', verbose=False):
# """Compute an (m x n) sparse matrix of non-zero evaluations of the
# scalar functions f_1,...,f_m in the list f at the points x_1,...,x_n
# in the sequence 'sample'.
#
# """
# m = len(f)
# n = len(sample)
# if format in ('dok_matrix', 'csc_matrix', 'csr_matrix'):
# sparseF = scipy.sparse.dok_matrix((m, n))
# else:
# raise ValueError("sparse matrix format not recognized")
#
# for i in range(m):
# if verbose:
# print('Computing feature {i} of {m}'.format(i=i, m=m))
# f_i = f[i]
# for j in range(n):
# x = sample[j]
# f_i_x = f_i(x)
# if f_i_x != 0:
# sparseF[i,j] = f_i_x
#
# if format == 'csc_matrix':
# return sparseF.tocsc()
# elif format == 'csr_matrix':
# return sparseF.tocsr()
# else:
# return sparseF
# def sparsefeaturematrix_vectorized(feature_functions, xs, format='csc_matrix'):
# """
# Evaluate a (m x n) matrix of features `F` of the sample `xs` as:
#
# F[i, j] = f_i(xs[:, j])
#
# Parameters
# ----------
# feature_functions : a list of feature functions f_i.
#
# xs : either:
# 1. a (d x n) matrix representing n d-dimensional
# observations xs[: ,j] for j=1,...,n.
# 2. a 1d array or sequence (e.g list) of observations xs[j]
# for j=1,...,n.
#
# The feature functions f_i are assumed to be vectorized. These will be
# passed all observations xs at once, in turn.
#
# Note: some samples may be more efficient / practical to compute
# features one sample observation at a time (e.g. generated). For these
# cases, use sparsefeaturematrix().
#
# Only pass sparse=True if you need the memory savings. If you want a
# sparse matrix but have enough memory, it may be faster to
# pass dense=True and then construct a CSC matrix from the dense NumPy
# array.
#
# """
# m = len(feature_functions)
#
# if isinstance(xs, np.ndarray) and xs.ndim == 2:
# d, n = xs.shape
# else:
# n = len(xs)
# if not sparse:
# F = np.empty((m, n), float)
# else:
# import scipy.sparse
# F = scipy.sparse.lil_matrix((m, n), dtype=float)
#
# for i, f_i in enumerate(feature_functions):
# F[i::m, :] = f_i(xs)
#
# if format == 'csc_matrix':
# return F.tocsc()
# elif format == 'csr_matrix':
# return F.tocsr()
# else:
# return F
def old_vec_feature_function(feature_functions, sparse=False):
"""
Create and return a vectorized function `features(xs)` that
evaluates an (n x m) matrix of features `F` of the sample `xs` as:
F[j, i] = f_i(xs[:, j])
Parameters
----------
feature_functions : a list of feature functions f_i.
`xs` will be passed to these functions as either:
1. an (n x d) matrix representing n d-dimensional
observations xs[j, :] for j=1,...,n.
2. a 1d array or sequence (e.g list) of observations xs[j]
for j=1,...,n.
The feature functions f_i are assumed to be vectorized. These will be
passed all observations xs at once, in turn.
Note: some samples may be more efficient / practical to compute
features of one sample observation at a time (e.g. generated).
Only pass sparse=True if you need the memory savings. If you want a
sparse matrix but have enough memory, it may be faster to
pass sparse=False and then construct a CSC matrix from the dense NumPy
array.
"""
if sparse:
import scipy.sparse
m = len(feature_functions)
def vectorized_features(xs):
if isinstance(xs, np.ndarray) and xs.ndim == 2:
n, d = xs.shape
else:
n = len(xs)
if not sparse:
F = np.empty((n, m), float)
else:
F = scipy.sparse.lil_matrix((n, m), dtype=float)
# Equivalent:
# for i, f_i in enumerate(feature_functions):
# for k in range(len(xs)):
# F[len(feature_functions)*k+i, :] = f_i(xs[k])
for i, f_i in enumerate(feature_functions):
F[:, i::m] = f_i(xs)
if not sparse:
return F
else:
return scipy.sparse.csc_matrix(F)
return vectorized_features
def dotprod(u,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the dot product of the (1 x m) sparse array u with the
(m x 1) (dense) numpy array v.
"""
#print "Taking the dot product u.v, where"
#print "u has shape " + str(u.shape)
#print "v = " + str(v)
try:
dotprod = np.array([0.0]) # a 1x1 array. Required by spmatrix.
u.matvec(v, dotprod)
return dotprod[0] # extract the scalar
except AttributeError:
# Assume u is a dense array.
return np.dot(u,v)
def innerprod(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Returns the inner product of the (m x n) dense or sparse matrix A
with the n-element dense array v. This is a wrapper for A.dot(v) for
dense arrays and spmatrix objects, and for A.matvec(v, result) for
PySparse matrices.
"""
# We assume A is sparse.
(m, n) = A.shape
vshape = v.shape
try:
(p,) = vshape
except ValueError:
(p, q) = vshape
if n != p:
raise TypeError("matrix dimensions are incompatible")
if isinstance(v, np.ndarray):
try:
# See if A is sparse
A.matvec
except AttributeError:
# It looks like A is dense
return np.dot(A, v)
else:
# Assume A is sparse
if scipy.sparse.isspmatrix(A):
innerprod = A.matvec(v) # This returns a float32 type. Why???
return innerprod
else:
# Assume PySparse format
innerprod = np.empty(m, float)
A.matvec(v, innerprod)
return innerprod
elif scipy.sparse.isspmatrix(v):
return A * v
else:
raise TypeError("unsupported types for inner product")
def innerprodtranspose(A,v):
"""
This is a wrapper around general dense or sparse dot products.
It is not necessary except as a common interface for supporting
ndarray, scipy spmatrix, and PySparse arrays.
Computes A^T V, where A is a dense or sparse matrix and V is a numpy
array. If A is sparse, V must be a rank-1 array, not a matrix. This
function is efficient for large matrices A. This is a wrapper for
A.T.dot(v) for dense arrays and spmatrix objects, and for
A.matvec_transp(v, result) for pysparse matrices.
"""
(m, n) = A.shape
#pdb.set_trace()
if hasattr(A, 'matvec_transp'):
# A looks like a PySparse matrix
if len(v.shape) == 1:
innerprod = np.empty(n, float)
A.matvec_transp(v, innerprod)
else:
raise TypeError("innerprodtranspose(A,v) requires that v be "
"a vector (rank-1 dense array) if A is sparse.")
return innerprod
elif scipy.sparse.isspmatrix(A):
return (A.conj().transpose() * v).transpose()
else:
# Assume A is dense
if isinstance(v, np.ndarray):
# v is also dense
if len(v.shape) == 1:
# We can't transpose a rank-1 matrix into a row vector, so
# we reshape it.
vm = v.shape[0]
vcolumn = np.reshape(v, (1, vm))
x = np.dot(vcolumn, A)
return np.reshape(x, (n,))
else:
#(vm, vn) = v.shape
# Assume vm == m
x = np.dot(np.transpose(v), A)
return np.transpose(x)
else:
raise TypeError("unsupported types for inner product")
def rowmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (m x 1) vector representing the mean of the rows of A,
which be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]], float)
>>> rowmeans(a)
array([ 1.5, 3.5])
"""
if type(A) is np.ndarray:
return A.mean(1)
else:
# Assume it's sparse
try:
n = A.shape[1]
except AttributeError:
raise TypeError("rowmeans() only works with sparse and dense "
"arrays")
rowsum = innerprod(A, np.ones(n, float))
return rowsum / float(n)
def columnmeans(A):
"""
This is a wrapper for general dense or sparse dot products.
It is only necessary as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with the column averages of A, which can
be an (m x n) sparse or dense matrix.
>>> a = np.array([[1,2],[3,4]],'d')
>>> columnmeans(a)
array([ 2., 3.])
"""
if type(A) is np.ndarray:
return A.mean(0)
else:
# Assume it's sparse
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnmeans() only works with sparse and dense "
"arrays")
columnsum = innerprodtranspose(A, np.ones(m, float))
return columnsum / float(m)
def columnvariances(A):
"""
This is a wrapper for general dense or sparse dot products.
It is not necessary except as a common interface for supporting ndarray,
scipy spmatrix, and PySparse arrays.
Returns a dense (1 x n) vector with unbiased estimators for the column
variances for each column of the (m x n) sparse or dense matrix A. (The
normalization is by (m - 1).)
>>> a = np.array([[1,2], [3,4]], 'd')
>>> columnvariances(a)
array([ 2., 2.])
"""
if type(A) is np.ndarray:
return np.std(A,0)**2
else:
try:
m = A.shape[0]
except AttributeError:
raise TypeError("columnvariances() only works with sparse "
"and dense arrays")
means = columnmeans(A)
return columnmeans((A-means)**2) * (m/(m-1.0))
def flatten(a):
"""Flattens the sparse matrix or dense array/matrix 'a' into a
1-dimensional array
"""
if scipy.sparse.isspmatrix(a):
return a.A.flatten()
else:
return np.asarray(a).flatten()
class DivergenceError(Exception):
"""Exception raised if the entropy dual has no finite minimum.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self)
def __str__(self):
return repr(self.message)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
[
"math.exp",
"numpy.log",
"numpy.std",
"numpy.empty",
"numpy.asarray",
"cmath.log",
"numpy.transpose",
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.reshape",
"cmath.exp",
"numpy.dot",
"math.log",
"builtins.range",
"doctest.testmod"
] |
[((3174, 3199), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'object'}), '(n, dtype=object)\n', (3182, 3199), True, 'import numpy as np\n'), ((6991, 7002), 'math.log', 'math.log', (['s'], {}), '(s)\n', (6999, 7002), False, 'import math\n'), ((25128, 25145), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (25143, 25145), False, 'import doctest\n'), ((3126, 3138), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (3135, 3138), True, 'import numpy as np\n'), ((6968, 6979), 'math.exp', 'math.exp', (['x'], {}), '(x)\n', (6976, 6979), False, 'import math\n'), ((18779, 18794), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (18787, 18794), True, 'import numpy as np\n'), ((7255, 7267), 'cmath.log', 'cmath.log', (['x'], {}), '(x)\n', (7264, 7267), False, 'import cmath\n'), ((7293, 7304), 'math.log', 'math.log', (['x'], {}), '(x)\n', (7301, 7304), False, 'import math\n'), ((7541, 7553), 'math.log', 'math.log', (['xi'], {}), '(xi)\n', (7549, 7553), False, 'import math\n'), ((9588, 9596), 'builtins.range', 'range', (['k'], {}), '(k)\n', (9593, 9596), False, 'from builtins import range\n'), ((11566, 11583), 'numpy.reshape', 'np.reshape', (['xs', 'n'], {}), '(xs, n)\n', (11576, 11583), True, 'import numpy as np\n'), ((11781, 11810), 'numpy.empty', 'np.empty', (['(m, n)'], {'dtype': 'dtype'}), '((m, n), dtype=dtype)\n', (11789, 11810), True, 'import numpy as np\n'), ((12109, 12117), 'builtins.range', 'range', (['n'], {}), '(n)\n', (12114, 12117), False, 'from builtins import range\n'), ((17811, 17834), 'numpy.empty', 'np.empty', (['(n, m)', 'float'], {}), '((n, m), float)\n', (17819, 17834), True, 'import numpy as np\n'), ((19003, 19015), 'numpy.dot', 'np.dot', (['u', 'v'], {}), '(u, v)\n', (19009, 19015), True, 'import numpy as np\n'), ((21164, 21182), 'numpy.empty', 'np.empty', (['n', 'float'], {}), '(n, float)\n', (21172, 21182), True, 'import numpy as np\n'), ((22896, 22913), 'numpy.ones', 'np.ones', (['n', 'float'], {}), '(n, float)\n', (22903, 22913), True, 'import numpy as np\n'), ((23691, 23708), 'numpy.ones', 'np.ones', (['m', 'float'], {}), '(m, float)\n', (23698, 23708), True, 'import numpy as np\n'), ((24295, 24307), 'numpy.std', 'np.std', (['A', '(0)'], {}), '(A, 0)\n', (24301, 24307), True, 'import numpy as np\n'), ((3509, 3528), 'numpy.log', 'np.log', (['sampleprobs'], {}), '(sampleprobs)\n', (3515, 3528), True, 'import numpy as np\n'), ((7657, 7670), 'cmath.log', 'cmath.log', (['xi'], {}), '(xi)\n', (7666, 7670), False, 'import cmath\n'), ((19896, 19908), 'numpy.dot', 'np.dot', (['A', 'v'], {}), '(A, v)\n', (19902, 19908), True, 'import numpy as np\n'), ((20199, 20217), 'numpy.empty', 'np.empty', (['m', 'float'], {}), '(m, float)\n', (20207, 20217), True, 'import numpy as np\n'), ((24802, 24815), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (24812, 24815), True, 'import numpy as np\n'), ((6501, 6521), 'cmath.exp', 'cmath.exp', (['(a_i - b_i)'], {}), '(a_i - b_i)\n', (6510, 6521), False, 'import cmath\n'), ((6661, 6681), 'cmath.exp', 'cmath.exp', (['(b_i - a_i)'], {}), '(b_i - a_i)\n', (6670, 6681), False, 'import cmath\n'), ((21804, 21826), 'numpy.reshape', 'np.reshape', (['v', '(1, vm)'], {}), '(v, (1, vm))\n', (21814, 21826), True, 'import numpy as np\n'), ((21847, 21865), 'numpy.dot', 'np.dot', (['vcolumn', 'A'], {}), '(vcolumn, A)\n', (21853, 21865), True, 'import numpy as np\n'), ((21889, 21908), 'numpy.reshape', 'np.reshape', (['x', '(n,)'], {}), '(x, (n,))\n', (21899, 21908), True, 'import numpy as np\n'), ((22066, 22081), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (22078, 22081), True, 'import numpy as np\n'), ((22023, 22038), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (22035, 22038), True, 'import numpy as np\n')]
|
import six
if six.PY2:
from backports import tempfile
else:
import tempfile
import pytest as pt
import os
from ditto.readers.opendss.read import Reader as Reader_opendss
from ditto.readers.cyme.read import Reader as Reader_cyme
from ditto.writers.json.write import Writer
from ditto.store import Store
import logging
import json_tricks
logger = logging.getLogger(__name__)
test_list = os.walk('data')
for (dirpath, dirname, files) in test_list:
if files !=[]:
reader_type = dirpath.split('\\')[2]
m = Store()
if reader_type == 'opendss':
reader = Reader_opendss(master_file = os.path.join('..',dirpath,'master.dss'), buscoordinates_file = os.path.join('..',dirpath,'buscoord.dss'))
elif reader_type == 'cyme':
reader = Reader_cyme(data_folder_path=os.path.join('..',dirpath))
else:
#Update with other tests if they get added to the persistence tests
continue
reader.parse(m)
m.set_names()
output_path = tempfile.TemporaryDirectory()
w = Writer(output_path=output_path.name, log_path=output_path)
w.write(m)
original = json_tricks.load(open(os.path.join(dirpath,files[0]),'r'))
update = json_tricks.load(open(os.path.join(output_path.name,'Model.json'),'r'))
try:
assert update["model"] == original["model"]
except AssertionError as e:
logger.error("Model differs for usecase {loc}".format(loc = dirpath))
e.args += ("Model differs for usecase {loc}".format(loc = dirpath),)
raise
|
[
"ditto.store.Store",
"tempfile.TemporaryDirectory",
"os.walk",
"ditto.writers.json.write.Writer",
"os.path.join",
"logging.getLogger"
] |
[((355, 382), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (372, 382), False, 'import logging\n'), ((395, 410), 'os.walk', 'os.walk', (['"""data"""'], {}), "('data')\n", (402, 410), False, 'import os\n'), ((531, 538), 'ditto.store.Store', 'Store', ([], {}), '()\n', (536, 538), False, 'from ditto.store import Store\n'), ((1029, 1058), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1056, 1058), False, 'import tempfile\n'), ((1071, 1129), 'ditto.writers.json.write.Writer', 'Writer', ([], {'output_path': 'output_path.name', 'log_path': 'output_path'}), '(output_path=output_path.name, log_path=output_path)\n', (1077, 1129), False, 'from ditto.writers.json.write import Writer\n'), ((1190, 1221), 'os.path.join', 'os.path.join', (['dirpath', 'files[0]'], {}), '(dirpath, files[0])\n', (1202, 1221), False, 'import os\n'), ((1266, 1310), 'os.path.join', 'os.path.join', (['output_path.name', '"""Model.json"""'], {}), "(output_path.name, 'Model.json')\n", (1278, 1310), False, 'import os\n'), ((626, 667), 'os.path.join', 'os.path.join', (['""".."""', 'dirpath', '"""master.dss"""'], {}), "('..', dirpath, 'master.dss')\n", (638, 667), False, 'import os\n'), ((689, 732), 'os.path.join', 'os.path.join', (['""".."""', 'dirpath', '"""buscoord.dss"""'], {}), "('..', dirpath, 'buscoord.dss')\n", (701, 732), False, 'import os\n'), ((818, 845), 'os.path.join', 'os.path.join', (['""".."""', 'dirpath'], {}), "('..', dirpath)\n", (830, 845), False, 'import os\n')]
|
import unittest
from pyatlas import identifier_converters
class IdentifierConvertersTest(unittest.TestCase):
def setUp(self):
pass
def test_osm_conversion(self):
atlas_id = 222222000000
osm_id = 222222
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = 123001002
osm_id = 123
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = 3101220
osm_id = 3
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
atlas_id = -222222000001
osm_id = 222222
self.assertEqual(osm_id, identifier_converters.get_osm_identifier(atlas_id))
def test_country_code_conversion(self):
atlas_id = 222222000000
country_code = 0
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = 123001002
country_code = 1
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = 3101220
country_code = 101
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
atlas_id = -222222002001
country_code = 2
self.assertEqual(country_code, identifier_converters.get_country_code(atlas_id))
def test_way_section_conversion(self):
atlas_id = 222222000000
way_section = 0
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = 123001002
way_section = 2
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = 3101220
way_section = 220
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
atlas_id = -222222002001
way_section = 1
self.assertEqual(way_section, identifier_converters.get_way_section_index(atlas_id))
|
[
"pyatlas.identifier_converters.get_country_code",
"pyatlas.identifier_converters.get_way_section_index",
"pyatlas.identifier_converters.get_osm_identifier"
] |
[((271, 321), 'pyatlas.identifier_converters.get_osm_identifier', 'identifier_converters.get_osm_identifier', (['atlas_id'], {}), '(atlas_id)\n', (311, 321), False, 'from pyatlas import identifier_converters\n'), ((407, 457), 'pyatlas.identifier_converters.get_osm_identifier', 'identifier_converters.get_osm_identifier', (['atlas_id'], {}), '(atlas_id)\n', (447, 457), False, 'from pyatlas import identifier_converters\n'), ((539, 589), 'pyatlas.identifier_converters.get_osm_identifier', 'identifier_converters.get_osm_identifier', (['atlas_id'], {}), '(atlas_id)\n', (579, 589), False, 'from pyatlas import identifier_converters\n'), ((682, 732), 'pyatlas.identifier_converters.get_osm_identifier', 'identifier_converters.get_osm_identifier', (['atlas_id'], {}), '(atlas_id)\n', (722, 732), False, 'from pyatlas import identifier_converters\n'), ((875, 923), 'pyatlas.identifier_converters.get_country_code', 'identifier_converters.get_country_code', (['atlas_id'], {}), '(atlas_id)\n', (913, 923), False, 'from pyatlas import identifier_converters\n'), ((1019, 1067), 'pyatlas.identifier_converters.get_country_code', 'identifier_converters.get_country_code', (['atlas_id'], {}), '(atlas_id)\n', (1057, 1067), False, 'from pyatlas import identifier_converters\n'), ((1163, 1211), 'pyatlas.identifier_converters.get_country_code', 'identifier_converters.get_country_code', (['atlas_id'], {}), '(atlas_id)\n', (1201, 1211), False, 'from pyatlas import identifier_converters\n'), ((1311, 1359), 'pyatlas.identifier_converters.get_country_code', 'identifier_converters.get_country_code', (['atlas_id'], {}), '(atlas_id)\n', (1349, 1359), False, 'from pyatlas import identifier_converters\n'), ((1499, 1552), 'pyatlas.identifier_converters.get_way_section_index', 'identifier_converters.get_way_section_index', (['atlas_id'], {}), '(atlas_id)\n', (1542, 1552), False, 'from pyatlas import identifier_converters\n'), ((1646, 1699), 'pyatlas.identifier_converters.get_way_section_index', 'identifier_converters.get_way_section_index', (['atlas_id'], {}), '(atlas_id)\n', (1689, 1699), False, 'from pyatlas import identifier_converters\n'), ((1793, 1846), 'pyatlas.identifier_converters.get_way_section_index', 'identifier_converters.get_way_section_index', (['atlas_id'], {}), '(atlas_id)\n', (1836, 1846), False, 'from pyatlas import identifier_converters\n'), ((1944, 1997), 'pyatlas.identifier_converters.get_way_section_index', 'identifier_converters.get_way_section_index', (['atlas_id'], {}), '(atlas_id)\n', (1987, 1997), False, 'from pyatlas import identifier_converters\n')]
|
import numpy as np
def softmax(x, axis=None):
max = np.max(x,axis=axis,keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x,axis=axis,keepdims=True)
f_x = e_x / sum
return f_x
|
[
"numpy.max",
"numpy.sum",
"numpy.exp"
] |
[((57, 92), 'numpy.max', 'np.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (63, 92), True, 'import numpy as np\n'), ((101, 116), 'numpy.exp', 'np.exp', (['(x - max)'], {}), '(x - max)\n', (107, 116), True, 'import numpy as np\n'), ((127, 164), 'numpy.sum', 'np.sum', (['e_x'], {'axis': 'axis', 'keepdims': '(True)'}), '(e_x, axis=axis, keepdims=True)\n', (133, 164), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import statistics as stat
class optical_braille_recognition():
def __init__(self) -> None:
pass
def make_histogram_y(self, img):
'''
Organiza os dados da projeção horizontal na imagem
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção horizontal
'''
height, width = img.shape
hist = np.zeros(height)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[x] += 1
return hist
def make_histogram_x(self, img):
'''
Organiza os dados da projeção vertical na imagem, essa projeção só pode ser
feita se a imagem de entrada possuir apenas uma única linha de caracteres
braiile
Entrada:
img -> Array da imagem
Saída:
hist -> Array com os valores do histograma de projeção vertical
'''
height, width = img.shape
hist = np.zeros(width)
for x in range(height):
for y in range(width):
if (img[x][y] == 1):
hist[y] += 1
return hist
def get_delimiters(self, hist):
'''
Encontra os delimitadores verticais e horizontais da posição onde se
encontram os pontos dos caracteres braille por meio do histograma
Entrada:
hist --> Array com os valores do histograma
Saída:
delimiters --> Array com os delimitadores de posição dos pontos
'''
delimiters = list()
for i in range(1, len(hist)-1):
if (hist[i] > 0) and (hist[i-1] == 0) and (hist[i+1] > 0):
delimiters.append(i-1)
if (hist[i] > 0) and (hist[i-1] > 0) and (hist[i+1] == 0):
delimiters.append(i+1)
return delimiters
def get_line_delimiters(self, delimiters):
'''
Encontra os delimitadores que determinam onde começam e onde terminam
as linhas de texto braille da imagem
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
line_delimiters --> Array com os delimitadores de linha
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
# print(distances)
min = distances.min() # Distância entre linhas de pontos de um mesmo caractere
mode = stat.mode(distances) # Diâmetro dos pontos
# print(mode)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
line_delimiters = list()
for i in range(1, len(delimiters)-2):
if (distances[i] > mode and distances[i+1] > limiar and distances[i-1] > limiar):
line_delimiters.append(delimiters[i])
line_delimiters.append(delimiters[i+1])
if i-1 == 0:
line_delimiters.append(delimiters[i-1])
if i+1 == len(delimiters)-2:
line_delimiters.append(delimiters[i+2])
return line_delimiters
def get_character_delimiters(self, delimiters):
'''
Utiliza os delimitadores de posição para determinar os delimitadores dos
caracteres braille por meio do cálculo de suas distâncias
Entrada:
delimiters --> Array com os delimitadores de posição dos pontos
Saída:
character_delimiters --> Array com os delimitadores dos caracteres
'''
distances = list()
for i in range(len(delimiters)-1):
distances.append(delimiters[i+1] - delimiters[i])
# print(f"{delimiters[i+1]} - {delimiters[i]}", end='\n')
distances = np.array(distances)
min = distances.min()
mode=stat.mode(distances)
if (mode - min) > 2:
limiar = min+2
else:
limiar = min+1
# print(limiar)
# print(distances)
character_delimiters = list()
for i in range(len(delimiters)-1):
# Delimitando os caracters que possuem pontos nas duas colunas
diameter = mode
if (distances[i] <= limiar and distances[i] != mode-1 ):
if i != 0:
diameter = delimiters[i] - delimiters[i-1]
character_delimiters.append(delimiters[i] - diameter)
character_delimiters.append(delimiters[i+1] + diameter)
#Delimitando os caracteres de início e final de linha
elif i == 0 and distances[i+1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da esquerda
if (distances[i+1] > mode+limiar):
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da direita
else:
character_delimiters.append(delimiters[i] - min - mode)
character_delimiters.append(delimiters[i+1])
elif (i == len(distances)-1) and distances[i-1] > limiar:
# Caso em que o caractere possui pontos apenas na coluna da direita
if (distances[i-1] > mode+limiar and distances[i-3] > limiar):
character_delimiters.append(delimiters[i-1] - min - mode)
character_delimiters.append(delimiters[i])
# Caso em que o caractere possui pontos apenas na coluna da esquerda
else:
character_delimiters.append(delimiters[i+1] + min + mode)
character_delimiters.append(delimiters[i])
# Delimitando os caracteres que possuem pontos apenas na coluna da esquerda
if (distances[i] > 1.5*mode+min):
if i > 1 and distances[i-2] > limiar:
character_delimiters.append(delimiters[i] + min + mode)
character_delimiters.append(delimiters[i-1])
# Delimitando os caracteres que possuem pontos apenas na coluna da direita
elif ((distances[i] > 1.5*mode+min) and (i < len(delimiters)-3) and
(distances[i+2] > limiar)):
# if (i < len(delimiters_x)-3) and distances[i+2] > min+1:
character_delimiters.append(delimiters[i+2])
character_delimiters.append(delimiters[i+1] - min - mode)
# elif i == len(delimiters)-2:
# character_delimiters.append(delimiters[i+2])
# character_delimiters.append(delimiters[i+1] - min - mode)
# Delimitando os caracteres de espaço em branco
if (distances[i] >= 3*mode+min):
character_delimiters.append(delimiters[i] + mode)
character_delimiters.append(delimiters[i+1] - mode)
return character_delimiters
def get_line_subimages(self, img, line_delimiters):
'''
Utiliza os delimitadores de linha para recortar a imagem em subimagens, cada
uma com uma linha de carateres braille
Entrada:
img -> Array da imagem que será recortada
line_delimiters --> Array com os delimitadores de linha
Saída:
line_subimages --> Array com subimagens das linhas recortadas
'''
line_delimiters = sorted(line_delimiters)
line_subimages = list()
for i in range(len(line_delimiters)//2):
line_subimages.append(img[line_delimiters[2*i]:line_delimiters[2*i+1],:])
return line_subimages
def get_character_subimages(self, img, char_delimiters):
'''
Recorta a imagem que contém uma linha de caracteres braille em subimagens
contendo os caracteres, que por sua vez são armazenadas em um array na ordem
de leitura
Entrada:
img --> Array da imagem contendo um linha de caracteres
char_delimiters --> Array com os delimitadores dos caracteres
Saída:
subimages --> Array com as subimagens dos caracteres
'''
char_delimiters = sorted(char_delimiters)
for i in range(len(char_delimiters)):
if char_delimiters[i] < 0:
char_delimiters[i] = 0
char_subimages = list()
for i in range(len(char_delimiters)//2):
char_subimages.append(img[:,char_delimiters[2*i]:char_delimiters[2*i+1]])
return char_subimages
def optical_braille_recognition(self, img):
'''
Recebe uma imagem pré-processada contendo um texto em braille, detecta a
posição desses caracters na imagem e apartir disso obtem uma matriz de
subimagens contendo uma palavra do texto em cada linha
Entrada:
img --> Array da imagem pré-processada
Saída:
subimages --> matriz de subimagens, onde cada linha possui os caracteres de
uma palavra
'''
hist_y = self.make_histogram_y(img)
delimiters_y = self.get_delimiters(hist_y)
line_delimiters = self.get_line_delimiters(delimiters_y)
line_subimages = self.get_line_subimages(img, line_delimiters)
subimages = list()
for i in range(len(line_subimages)):
hist_x = self.make_histogram_x(line_subimages[i])
delimiters_x = self.get_delimiters(hist_x)
char_delimiters = self.get_character_delimiters(delimiters_x)
char_subimages = self.get_character_subimages(line_subimages[i], char_delimiters)
word_subimages = list()
for j in range(len(char_subimages)):
hist_x = self.make_histogram_x(char_subimages[j])
if np.max(hist_x) != 0:
word_subimages.append(char_subimages[j])
else:
subimages.append(word_subimages)
word_subimages = list()
if np.max(hist_x) != 0 and j == len(char_subimages)-1:
subimages.append(word_subimages)
word_subimages = list()
return subimages
def tilt_correction(self, img):
max = 0
rows, cols = img.shape
for theta in np.arange(-6, 6, 0.1):
Mr = cv2.getRotationMatrix2D( (cols/2, rows/2), theta , 1)
aux_img = cv2.warpAffine(img, Mr, (cols, rows))
hist_y = self.make_histogram_y(aux_img)
delimiters_y = self.get_delimiters(hist_y)
if len(delimiters_y) > max:
max = len(delimiters_y)
dst_img = aux_img
return dst_img
|
[
"numpy.zeros",
"cv2.warpAffine",
"numpy.max",
"numpy.array",
"numpy.arange",
"statistics.mode",
"cv2.getRotationMatrix2D"
] |
[((467, 483), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (475, 483), True, 'import numpy as np\n'), ((1098, 1113), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1106, 1113), True, 'import numpy as np\n'), ((2554, 2573), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (2562, 2573), True, 'import numpy as np\n'), ((2704, 2724), 'statistics.mode', 'stat.mode', (['distances'], {}), '(distances)\n', (2713, 2724), True, 'import statistics as stat\n'), ((3992, 4011), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (4000, 4011), True, 'import numpy as np\n'), ((4055, 4075), 'statistics.mode', 'stat.mode', (['distances'], {}), '(distances)\n', (4064, 4075), True, 'import statistics as stat\n'), ((10651, 10672), 'numpy.arange', 'np.arange', (['(-6)', '(6)', '(0.1)'], {}), '(-6, 6, 0.1)\n', (10660, 10672), True, 'import numpy as np\n'), ((10691, 10746), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', 'theta', '(1)'], {}), '((cols / 2, rows / 2), theta, 1)\n', (10714, 10746), False, 'import cv2\n'), ((10768, 10805), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'Mr', '(cols, rows)'], {}), '(img, Mr, (cols, rows))\n', (10782, 10805), False, 'import cv2\n'), ((10107, 10121), 'numpy.max', 'np.max', (['hist_x'], {}), '(hist_x)\n', (10113, 10121), True, 'import numpy as np\n'), ((10348, 10362), 'numpy.max', 'np.max', (['hist_x'], {}), '(hist_x)\n', (10354, 10362), True, 'import numpy as np\n')]
|
import time
import pexpect
import re
import subprocess
from pexpect_strip_ansi import StripAnsiSpawn
class BluetoothctlError(Exception):
"""This exception is raised when bluetoothctl fails to start."""
pass
class Bluetoothctl:
"""A wrapper for bluetoothctl utility."""
def __init__(self, log=False):
out = subprocess.check_output("rfkill unblock bluetooth", shell = True)
logfile = open("bluetoothctl.log", "w") if log else None
self.child = StripAnsiSpawn("bluetoothctl", echo = False, encoding="utf-8", logfile=logfile)
def get_output(self, command, pause = 0):
"""Run a command in bluetoothctl prompt, return output as a list of lines."""
self.child.send(command + "\n")
time.sleep(pause)
start_failed = self.child.expect([r"\[[^\]]+\]#", pexpect.EOF])
if start_failed:
raise BluetoothctlError("Bluetoothctl failed after running " + command)
return self.child.before.split("\r\n")
def start_scan(self):
"""Start bluetooth scanning process."""
try:
out = self.get_output("scan on")
except BluetoothctlError as e:
print(e)
return None
def make_discoverable(self):
"""Make device discoverable."""
try:
out = self.get_output("discoverable on")
except BluetoothctlError as e:
print(e)
return None
def parse_device_info(self, info_string):
"""Parse a string corresponding to a device."""
device = {}
block_list = ["[\x1b[0;", "removed"]
string_valid = not any(keyword in info_string for keyword in block_list)
if string_valid:
try:
device_position = info_string.index("Device")
except ValueError:
pass
else:
if device_position > -1:
attribute_list = info_string[device_position:].split(" ", 2)
device = {
"mac_address": attribute_list[1],
"name": attribute_list[2]
}
return device
def get_available_devices(self):
"""Return a list of tuples of paired and discoverable devices."""
try:
out = self.get_output("devices")
except BluetoothctlError as e:
print(e)
return None
else:
available_devices = []
for line in out:
device = self.parse_device_info(line)
if device:
available_devices.append(device)
return available_devices
def get_paired_devices(self):
"""Return a list of tuples of paired devices."""
try:
out = self.get_output("paired-devices")
except BluetoothctlError as e:
print(e)
return None
else:
paired_devices = []
for line in out:
device = self.parse_device_info(line)
if device:
paired_devices.append(device)
return paired_devices
def get_discoverable_devices(self):
"""Filter paired devices out of available."""
available = self.get_available_devices()
paired = self.get_paired_devices()
return [d for d in available if d not in paired]
def get_device_info(self, mac_address):
"""Get device info by mac address."""
try:
out = self.get_output("info " + mac_address)
except BluetoothctlError as e:
print(e)
return None
else:
info_lines: list[str] = [line for line in out if not re.match(r"^\s*Device", line)]
info = {}
for line in info_lines:
try:
attr_name, attr_value = [part.strip() for part in line.split(":", maxsplit=1)]
info[attr_name] = attr_value
except:
pass
return info
def pair(self, mac_address):
"""Try to pair with a device by mac address."""
try:
out = self.get_output("pair " + mac_address, 4)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to pair", "Pairing successful", pexpect.EOF])
success = True if res == 1 else False
return success
def remove(self, mac_address):
"""Remove paired device by mac address, return success of the operation."""
try:
out = self.get_output("remove " + mac_address, 3)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", "Device has been removed", pexpect.EOF])
success = True if res == 1 else False
return success
def connect(self, mac_address):
"""Try to connect to a device by mac address."""
try:
out = self.get_output("connect " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to connect", r".*Connection successful", pexpect.EOF])
success = True if res == 1 else False
return success
def disconnect(self, mac_address):
"""Try to disconnect to a device by mac address."""
try:
out = self.get_output("disconnect " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["Failed to disconnect", "Successful disconnected", pexpect.EOF])
success = True if res == 1 else False
return success
def trust(self, mac_address):
"""Try to trust a device by mac address."""
try:
out = self.get_output("trust " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", r"Changing ([A-Z0-9:]+) trust succeeded", pexpect.EOF])
success = True if res == 1 else False
return success
def untrust(self, mac_address):
"""Try to untrust a device by mac address."""
try:
out = self.get_output("untrust " + mac_address, 2)
except BluetoothctlError as e:
print(e)
return None
else:
res = self.child.expect(["not available", r"Changing ([A-Z0-9:]+) untrust succeeded", pexpect.EOF])
success = True if res == 1 else False
return success
|
[
"re.match",
"subprocess.check_output",
"pexpect_strip_ansi.StripAnsiSpawn",
"time.sleep"
] |
[((335, 398), 'subprocess.check_output', 'subprocess.check_output', (['"""rfkill unblock bluetooth"""'], {'shell': '(True)'}), "('rfkill unblock bluetooth', shell=True)\n", (358, 398), False, 'import subprocess\n'), ((487, 564), 'pexpect_strip_ansi.StripAnsiSpawn', 'StripAnsiSpawn', (['"""bluetoothctl"""'], {'echo': '(False)', 'encoding': '"""utf-8"""', 'logfile': 'logfile'}), "('bluetoothctl', echo=False, encoding='utf-8', logfile=logfile)\n", (501, 564), False, 'from pexpect_strip_ansi import StripAnsiSpawn\n'), ((748, 765), 'time.sleep', 'time.sleep', (['pause'], {}), '(pause)\n', (758, 765), False, 'import time\n'), ((3722, 3751), 're.match', 're.match', (['"""^\\\\s*Device"""', 'line'], {}), "('^\\\\s*Device', line)\n", (3730, 3751), False, 'import re\n')]
|
import heapq as hq
import math
import numpy as np
from models.geometry_utils import *
# TODO: Generalize to 3D?
class Node:
def __init__(self, pos, parent=None, g_cost=math.inf, f_cost=math.inf):
self.pos = pos
self.parent = parent
self.g_cost = g_cost
self.f_cost = f_cost
def __eq__(self, other):
return all(self.pos == other.pos)
def __le__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] <= other.pos[1]
else:
return self.pos[0] <= other.pos[0]
def __lt__(self, other):
if self.pos[0] == other.pos[0]:
return self.pos[1] < other.pos[1]
else:
return self.pos[0] < other.pos[0]
# TODO: Generalize to 3D
class GridMap:
# cell_size > 0; don't make cell_size too small
def __init__(self, bounds=((0.0, 0.0), (10.0, 10.0)), cell_size=0.1, quad=True):
self.bounds = bounds
self.cell_size = cell_size
self.quad = quad
self.Nx = math.ceil((bounds[1][0] - bounds[0][0]) / cell_size)
self.Ny = math.ceil((bounds[1][1] - bounds[0][1]) / cell_size)
pos = lambda i, j: np.array([bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) * cell_size])
self.grid = [[Node(pos(i, j)) for j in range(self.Ny)] for i in range(self.Nx)]
# pos should be within bounds
def set_node(self, pos, parent, g_cost, f_cost):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
self.grid[i_x][i_y].parent = parent
self.grid[i_x][i_y].g_cost = g_cost
self.grid[i_x][i_y].f_cost = f_cost
return self.grid[i_x][i_y]
# pos should be within bounds
def get_node(self, pos):
i_x = math.floor((pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((pos[1] - self.bounds[0][1]) / self.cell_size)
return self.grid[i_x][i_y]
def get_neighbours(self, node):
i_x = math.floor((node.pos[0] - self.bounds[0][0]) / self.cell_size)
i_y = math.floor((node.pos[1] - self.bounds[0][1]) / self.cell_size)
neighbours = []
for i in range(i_x - 1, i_x + 2):
for j in range(i_y - 1, i_y + 2):
if i == i_x and j == i_y:
continue
if self.quad:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1 and abs(i - i_x) + abs(j - i_y) <= 1:
neighbours.append(self.grid[i][j])
else:
if 0 <= i <= self.Nx - 1 and 0 <= j <= self.Ny - 1:
neighbours.append(self.grid[i][j])
return neighbours
class GraphSearch:
def __init__(self, graph, obstacles, margin):
self.graph = graph
self.obstacles = obstacles
self.margin = margin
def a_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
if not n in (x[1] for x in openSet):
hq.heappush(openSet, (n_.f_cost, n_))
return []
def theta_star(self, start_pos, goal_pos):
h_cost = lambda pos: np.linalg.norm(goal_pos - pos)
edge_cost = lambda n1, n2: np.linalg.norm(n1.pos - n2.pos)
openSet = []
start = self.graph.set_node(start_pos, None, 0.0, h_cost(start_pos))
goal = self.graph.get_node(goal_pos)
hq.heappush(openSet, (start.f_cost, start))
while len(openSet) > 0:
current = openSet[0][1]
if current == goal:
return self.reconstruct_path(current)
hq.heappop(openSet)
for n in self.graph.get_neighbours(current):
if self.check_collision(n.pos):
continue
if (not current.parent is None) and self.line_of_sight(current.parent, n):
g_score = current.parent.g_cost + edge_cost(current.parent, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current.parent, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
else:
g_score = current.g_cost + edge_cost(current, n)
if g_score < n.g_cost:
n_ = self.graph.set_node(n.pos, current, g_score, g_score + h_cost(n.pos))
# delete n from min-heap
for i in range(len(openSet)):
if openSet[i][1] == n:
openSet[i] = openSet[-1]
openSet.pop()
if i < len(openSet):
hq._siftup(openSet, i)
hq._siftdown(openSet, 0, i)
break
hq.heappush(openSet, (n_.f_cost, n_))
return []
# TODO: optimize
def line_of_sight(self, n1, n2):
e = self.graph.cell_size
div = np.linalg.norm(n2.pos - n1.pos) / e
for i in range(1, math.floor(div) + 1):
if self.check_collision((n2.pos * i + n1.pos * (div - i)) / div):
return False
return True
def check_collision(self, pos):
for o in self.obstacles:
A, b = o.get_convex_rep()
b = b.reshape((len(b),))
if all(A @ pos - b - self.margin * np.linalg.norm(A, axis=1) <= 0):
return True
return False
def reconstruct_path(self, node):
path = [node]
while not node.parent is None:
node = node.parent
path.append(node)
return [path[len(path) - i - 1] for i in range(len(path))]
def reduce_path(self, path):
red_path = []
if len(path) > 1:
for i in range(1, len(path)):
if (not path[i].parent.parent is None) and self.line_of_sight(path[i], path[i].parent.parent):
path[i].parent = path[i].parent.parent
else:
red_path.append(path[i].parent)
red_path.append(path[-1])
return red_path
|
[
"heapq.heappush",
"math.ceil",
"math.floor",
"heapq.heappop",
"heapq._siftdown",
"heapq._siftup",
"numpy.array",
"numpy.linalg.norm"
] |
[((1065, 1117), 'math.ceil', 'math.ceil', (['((bounds[1][0] - bounds[0][0]) / cell_size)'], {}), '((bounds[1][0] - bounds[0][0]) / cell_size)\n', (1074, 1117), False, 'import math\n'), ((1137, 1189), 'math.ceil', 'math.ceil', (['((bounds[1][1] - bounds[0][1]) / cell_size)'], {}), '((bounds[1][1] - bounds[0][1]) / cell_size)\n', (1146, 1189), False, 'import math\n'), ((1502, 1559), 'math.floor', 'math.floor', (['((pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((pos[0] - self.bounds[0][0]) / self.cell_size)\n', (1512, 1559), False, 'import math\n'), ((1575, 1632), 'math.floor', 'math.floor', (['((pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((pos[1] - self.bounds[0][1]) / self.cell_size)\n', (1585, 1632), False, 'import math\n'), ((1886, 1943), 'math.floor', 'math.floor', (['((pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((pos[0] - self.bounds[0][0]) / self.cell_size)\n', (1896, 1943), False, 'import math\n'), ((1959, 2016), 'math.floor', 'math.floor', (['((pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((pos[1] - self.bounds[0][1]) / self.cell_size)\n', (1969, 2016), False, 'import math\n'), ((2107, 2169), 'math.floor', 'math.floor', (['((node.pos[0] - self.bounds[0][0]) / self.cell_size)'], {}), '((node.pos[0] - self.bounds[0][0]) / self.cell_size)\n', (2117, 2169), False, 'import math\n'), ((2185, 2247), 'math.floor', 'math.floor', (['((node.pos[1] - self.bounds[0][1]) / self.cell_size)'], {}), '((node.pos[1] - self.bounds[0][1]) / self.cell_size)\n', (2195, 2247), False, 'import math\n'), ((3323, 3366), 'heapq.heappush', 'hq.heappush', (['openSet', '(start.f_cost, start)'], {}), '(openSet, (start.f_cost, start))\n', (3334, 3366), True, 'import heapq as hq\n'), ((4381, 4424), 'heapq.heappush', 'hq.heappush', (['openSet', '(start.f_cost, start)'], {}), '(openSet, (start.f_cost, start))\n', (4392, 4424), True, 'import heapq as hq\n'), ((1220, 1310), 'numpy.array', 'np.array', (['[bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) * cell_size]'], {}), '([bounds[0][0] + (i + 0.5) * cell_size, bounds[0][1] + (j + 0.5) *\n cell_size])\n', (1228, 1310), True, 'import numpy as np\n'), ((3065, 3095), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_pos - pos)'], {}), '(goal_pos - pos)\n', (3079, 3095), True, 'import numpy as np\n'), ((3132, 3163), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1.pos - n2.pos)'], {}), '(n1.pos - n2.pos)\n', (3146, 3163), True, 'import numpy as np\n'), ((3542, 3561), 'heapq.heappop', 'hq.heappop', (['openSet'], {}), '(openSet)\n', (3552, 3561), True, 'import heapq as hq\n'), ((4123, 4153), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_pos - pos)'], {}), '(goal_pos - pos)\n', (4137, 4153), True, 'import numpy as np\n'), ((4190, 4221), 'numpy.linalg.norm', 'np.linalg.norm', (['(n1.pos - n2.pos)'], {}), '(n1.pos - n2.pos)\n', (4204, 4221), True, 'import numpy as np\n'), ((4600, 4619), 'heapq.heappop', 'hq.heappop', (['openSet'], {}), '(openSet)\n', (4610, 4619), True, 'import heapq as hq\n'), ((6539, 6570), 'numpy.linalg.norm', 'np.linalg.norm', (['(n2.pos - n1.pos)'], {}), '(n2.pos - n1.pos)\n', (6553, 6570), True, 'import numpy as np\n'), ((6602, 6617), 'math.floor', 'math.floor', (['div'], {}), '(div)\n', (6612, 6617), False, 'import math\n'), ((3986, 4023), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (3997, 4023), True, 'import heapq as hq\n'), ((5591, 5628), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (5602, 5628), True, 'import heapq as hq\n'), ((6371, 6408), 'heapq.heappush', 'hq.heappush', (['openSet', '(n_.f_cost, n_)'], {}), '(openSet, (n_.f_cost, n_))\n', (6382, 6408), True, 'import heapq as hq\n'), ((6952, 6977), 'numpy.linalg.norm', 'np.linalg.norm', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (6966, 6977), True, 'import numpy as np\n'), ((5439, 5461), 'heapq._siftup', 'hq._siftup', (['openSet', 'i'], {}), '(openSet, i)\n', (5449, 5461), True, 'import heapq as hq\n'), ((5499, 5526), 'heapq._siftdown', 'hq._siftdown', (['openSet', '(0)', 'i'], {}), '(openSet, 0, i)\n', (5511, 5526), True, 'import heapq as hq\n'), ((6219, 6241), 'heapq._siftup', 'hq._siftup', (['openSet', 'i'], {}), '(openSet, i)\n', (6229, 6241), True, 'import heapq as hq\n'), ((6279, 6306), 'heapq._siftdown', 'hq._siftdown', (['openSet', '(0)', 'i'], {}), '(openSet, 0, i)\n', (6291, 6306), True, 'import heapq as hq\n')]
|
#!/usr/bin/env python
# import required libraries
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import sys
import numpy as np
from pyrf.devices.thinkrf import WSA
from pyrf.util import read_data_and_context
from pyrf.numpy_util import compute_fft
# plot constants
CENTER_FREQ = 2450 * 1e6
SAMPLE_SIZE = 1024
ATTENUATOR = 1
DECIMATION = 1
RFE_MODE = 'ZIF'
# connect to WSA device
dut = WSA()
ip = sys.argv[1]
dut.connect(ip)
class MainApplication(pg.GraphicsWindow):
def __init__(self, dut):
super(MainApplication, self).__init__()
self.dut = dut
def keyPressEvent(self, event):
if event.text() == ';':
cmd, ok = QtGui.QInputDialog.getText(win, 'Enter SCPI Command',
'Enter SCPI Command:')
if ok:
if '?' not in cmd:
dut.scpiset(cmd)
win = MainApplication(dut)
win.resize(1000,600)
win.setWindowTitle("PYRF FFT Plot Example")
# initialize WSA configurations
dut.reset()
dut.request_read_perm()
dut.freq(CENTER_FREQ)
dut.decimation(DECIMATION)
dut.attenuator(ATTENUATOR)
dut.rfe_mode(RFE_MODE)
BANDWIDTH = dut.properties.FULL_BW[RFE_MODE]
# initialize plot
fft_plot = win.addPlot(title="Power Vs. Frequency")
# initialize x-axes limits
plot_xmin = (CENTER_FREQ) - (BANDWIDTH / 2)
plot_xmax = (CENTER_FREQ) + (BANDWIDTH / 2)
fft_plot.setLabel('left', text= 'Power', units = 'dBm', unitPrefix=None)
# initialize the y-axis of the plot
plot_ymin = -130
plot_ymax = 20
fft_plot.setYRange(plot_ymin ,plot_ymax)
fft_plot.setLabel('left', text= 'Power', units = 'dBm', unitPrefix=None)
# disable auto size of the x-y axis
fft_plot.enableAutoRange('xy', False)
# initialize a curve for the plot
curve = fft_plot.plot(pen='g')
def update():
global dut, curve, fft_plot, plot_xmin, plot_xmax
# read data
data, context = read_data_and_context(dut, SAMPLE_SIZE)
# compute the fft and plot the data
pow_data = compute_fft(dut, data, context)
# update the frequency range (Hz)
freq_range = np.linspace(plot_xmin , plot_xmax, len(pow_data))
# initialize the x-axis of the plot
fft_plot.setXRange(plot_xmin,plot_xmax)
fft_plot.setLabel('bottom', text= 'Frequency', units = 'Hz', unitPrefix=None)
curve.setData(freq_range,pow_data, pen = 'g')
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
[
"pyrf.util.read_data_and_context",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.Qt.QtCore.QTimer",
"pyqtgraph.Qt.QtGui.QInputDialog.getText",
"pyrf.devices.thinkrf.WSA",
"pyrf.numpy_util.compute_fft"
] |
[((407, 412), 'pyrf.devices.thinkrf.WSA', 'WSA', ([], {}), '()\n', (410, 412), False, 'from pyrf.devices.thinkrf import WSA\n'), ((2340, 2355), 'pyqtgraph.Qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (2353, 2355), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1876, 1915), 'pyrf.util.read_data_and_context', 'read_data_and_context', (['dut', 'SAMPLE_SIZE'], {}), '(dut, SAMPLE_SIZE)\n', (1897, 1915), False, 'from pyrf.util import read_data_and_context\n'), ((1971, 2002), 'pyrf.numpy_util.compute_fft', 'compute_fft', (['dut', 'data', 'context'], {}), '(dut, data, context)\n', (1982, 2002), False, 'from pyrf.numpy_util import compute_fft\n'), ((681, 757), 'pyqtgraph.Qt.QtGui.QInputDialog.getText', 'QtGui.QInputDialog.getText', (['win', '"""Enter SCPI Command"""', '"""Enter SCPI Command:"""'], {}), "(win, 'Enter SCPI Command', 'Enter SCPI Command:')\n", (707, 757), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((2603, 2632), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (2630, 2632), False, 'from pyqtgraph.Qt import QtGui, QtCore\n')]
|
import picamera
from time import sleep
import os
# Xlib: extension "RANDR" missing on display ":10.0".
#(gpicview:2869):
# GLib-GObject-WARNING **:
# Attempt to add property GtkSettings:
# :gtk-scrolled-window-placement after class was initialised
camera = picamera.PiCamera()
camera.rotation = 180
print ('klick1.py: Take picture')
camera.capture('python-camera.jpg')
print ('klick1.py: Launch Viewer')
os.system('gpicview python-camera.jpg &')
print ('klick1.py: Wait 1')
sleep(2)
print ('klick1.py: Wait 2')
sleep(2)
print ('klick1.py: Wait 3')
sleep(2)
print ('klick1.py: Close')
os.system('killall gpicview')
#camera.start_preview()
#sleep(5)
#camera.stop_preview()
|
[
"time.sleep",
"os.system",
"picamera.PiCamera"
] |
[((259, 278), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (276, 278), False, 'import picamera\n'), ((406, 451), 'os.system', 'os.system', (['"""gpicview python-camera.jpg &"""'], {}), "('gpicview python-camera.jpg &')\n", (415, 451), False, 'import os\n'), ((480, 488), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (485, 488), False, 'from time import sleep\n'), ((517, 525), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (522, 525), False, 'from time import sleep\n'), ((554, 562), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (559, 562), False, 'from time import sleep\n'), ((590, 619), 'os.system', 'os.system', (['"""killall gpicview"""'], {}), "('killall gpicview')\n", (599, 619), False, 'import os\n')]
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import, unicode_literals
import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal
# utilities for compatibility.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
input = raw_input
def as_bytes(s, encoding='utf-8'):
if isinstance(s, str):
return s
else:
return s.encode(encoding)
def as_string(s, encoding='utf-8'):
if isinstance(s, unicode):
return s
else:
return s.decode(encoding)
else:
input = input
def as_bytes(s, encoding='utf8'):
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
def as_string(s, encoding='utf8'):
if isinstance(s, str):
return s
else:
return s.decode(encoding)
def shell_escape(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
def run(cmd):
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
print(e, file=sys.stderr)
def sig_handler(signum, frame):
sys.exit(0)
def start(args):
run_commands = args.run
pre_commands = args.pre
post_commands = args.post
# handing signal to execute finally code.
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
try:
# run pre command
for cmd in pre_commands:
run(cmd)
# start run commands
threads = []
for cmd in run_commands:
t = threading.Thread(target=run, args=(cmd,))
threads.append(t)
t.start()
# wait for all run command threads finish
for t in threads:
t.join()
finally:
# run post command
for cmd in post_commands:
run(cmd)
def main():
parser = argparse.ArgumentParser(
description="process-starter.py is a utility to start multiple processes",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
description:
A utility to start multiple processes
example:
process-starter.py --run "your-file-watcher-command" "your-dev-server-start-command"
process-starter.py --pre "your-build-command" --run "your-dev-server-start-command"
Copyright (c) <NAME> <<EMAIL>>
The MIT License (MIT)
'''))
parser.add_argument("--pre", dest="pre", metavar="COMMAND", nargs='*', help="Set commands that are executed before run commands", default=[])
parser.add_argument("--post", dest="post", metavar="COMMAND", nargs='*',help="Set commands that are executed after run commands", default=[])
parser.add_argument("--run", "-r", dest="run", metavar="COMMAND", nargs='*', help="Set commands to run concurrently", default=[])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
start(args)
if __name__ == '__main__': main()
|
[
"textwrap.dedent",
"threading.Thread",
"sys.exit",
"signal.signal",
"subprocess.check_call"
] |
[((1177, 1188), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1185, 1188), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((1344, 1386), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'sig_handler'], {}), '(signal.SIGTERM, sig_handler)\n', (1357, 1386), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((1391, 1432), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'sig_handler'], {}), '(signal.SIGINT, sig_handler)\n', (1404, 1432), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((1020, 1058), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1041, 1058), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((3021, 3032), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3029, 3032), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((1623, 1664), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'args': '(cmd,)'}), '(target=run, args=(cmd,))\n', (1639, 1664), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n'), ((2121, 2536), 'textwrap.dedent', 'textwrap.dedent', (['""" description:\n A utility to start multiple processes\n\n example:\n process-starter.py --run "your-file-watcher-command" "your-dev-server-start-command"\n process-starter.py --pre "your-build-command" --run "your-dev-server-start-command"\n\n Copyright (c) <NAME> <<EMAIL>>\n The MIT License (MIT)\n """'], {}), '(\n """ description:\n A utility to start multiple processes\n\n example:\n process-starter.py --run "your-file-watcher-command" "your-dev-server-start-command"\n process-starter.py --pre "your-build-command" --run "your-dev-server-start-command"\n\n Copyright (c) <NAME> <<EMAIL>>\n The MIT License (MIT)\n """\n )\n', (2136, 2536), False, 'import argparse, os, sys, re, fcntl, time, subprocess, textwrap, threading, signal\n')]
|
import cv2
import os
import sys
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
if __name__ == "__main__":
for line in open("MOT17/train/ImageSets/Main/trainval.txt", "r"):
line = line.rstrip()
img_path = os.path.join("MOT17/train/JPEGImages", line + ".jpg")
anno_path = os.path.join("MOT17/train/Annotations", line + ".xml")
img = cv2.imread(img_path)
anno = ET.parse(anno_path).getroot()
file_name = anno.find('filename').text.lower().strip()
pts = ['xmin', 'ymin', 'xmax', 'ymax']
for obj in anno.iter('object'):
bbox = obj.find('bndbox')
box = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
box.append(cur_pt)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
cv2.imshow("MOT17", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
[
"xml.etree.ElementTree.parse",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"cv2.rectangle",
"cv2.imshow",
"os.path.join"
] |
[((305, 358), 'os.path.join', 'os.path.join', (['"""MOT17/train/JPEGImages"""', "(line + '.jpg')"], {}), "('MOT17/train/JPEGImages', line + '.jpg')\n", (317, 358), False, 'import os\n'), ((380, 434), 'os.path.join', 'os.path.join', (['"""MOT17/train/Annotations"""', "(line + '.xml')"], {}), "('MOT17/train/Annotations', line + '.xml')\n", (392, 434), False, 'import os\n'), ((450, 470), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (460, 470), False, 'import cv2\n'), ((956, 980), 'cv2.imshow', 'cv2.imshow', (['"""MOT17"""', 'img'], {}), "('MOT17', img)\n", (966, 980), False, 'import cv2\n'), ((876, 946), 'cv2.rectangle', 'cv2.rectangle', (['img', '(box[0], box[1])', '(box[2], box[3])', '(0, 0, 255)', '(2)'], {}), '(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)\n', (889, 946), False, 'import cv2\n'), ((1043, 1066), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1064, 1066), False, 'import cv2\n'), ((487, 506), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_path'], {}), '(anno_path)\n', (495, 506), True, 'import xml.etree.ElementTree as ET\n'), ((995, 1009), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1006, 1009), False, 'import cv2\n')]
|
# Generated by Django 3.0.3 on 2020-10-28 07:13
from django.db import migrations, models
import profiles.models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0034_auto_20201028_0358'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=profiles.models.default_image, upload_to=profiles.models.upload_images_path),
),
]
|
[
"django.db.models.ImageField"
] |
[((360, 467), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': 'profiles.models.default_image', 'upload_to': 'profiles.models.upload_images_path'}), '(default=profiles.models.default_image, upload_to=profiles\n .models.upload_images_path)\n', (377, 467), False, 'from django.db import migrations, models\n')]
|
from datetime import datetime, timedelta
def parse_line(line):
date = datetime.strptime(line[1:17], "%Y-%m-%d %H:%M")
message = line[19:]
return (date, message)
with open('data.txt') as data:
unordered_list = [parse_line(line) for line in data.readlines()]
ordered_list = sorted(unordered_list, key= lambda i : i[0])
guard_log = dict() # guard_id -> ( fell_asleep, woke_up, time_asleep )
guard = None
fell_asleep = None
## Parse the ordered guard sleep log
for log_entry in ordered_list:
timestamp = log_entry[0]
message = log_entry[1]
if message.startswith("Guard"):
guard = message[7:message.index(" ", 7)]
elif message.startswith("falls asleep"):
fell_asleep = timestamp
elif message.startswith("wakes up"):
if fell_asleep == None:
raise Exception("WTF")
time_asleep = timestamp - fell_asleep
sleep_entry = (fell_asleep, timestamp, time_asleep)
if guard in guard_log:
guard_log[guard].append(sleep_entry)
else:
guard_log[guard] = [ sleep_entry ]
fell_asleep = None
## Create a tuple for sorting to find the guard that sleeps the most
total_sleep_time = []
for g_id in guard_log.keys():
total_time = sum([log_entry[2] for log_entry in guard_log[g_id]], timedelta())
total_sleep_time.append((g_id, total_time))
total_sleep_time.sort(reverse=True, key=lambda elem : elem[1])
sleepy_guard_id = total_sleep_time[0][0]
print(sleepy_guard_id)
## Find the sleepy guards most slept minute
sleep_log = guard_log[sleepy_guard_id]
sleep_minute_count = dict()
for i in range(0, 60):
sleep_minute_count[i] = 0
for log_entry in sleep_log: # ( fell_asleep, woke_up, time_asleep )
fell_asleep = log_entry[0]
time_asleep = log_entry[2]
start_sleep_minute = int(fell_asleep.minute)
end_sleep_minute = start_sleep_minute + int(time_asleep.total_seconds() / 60)
for minute in range(start_sleep_minute, end_sleep_minute):
sleep_minute_count[minute] += 1
sleep_minute_count = [(k, sleep_minute_count[k]) for k in sleep_minute_count.keys()]
sleep_minute_count.sort(reverse=True, key=lambda elem : elem[1])
sleepy_minute = sleep_minute_count[0][0]
print(sleepy_minute)
checksum = int(sleepy_guard_id) * sleepy_minute
print(checksum)
|
[
"datetime.datetime.strptime",
"datetime.timedelta"
] |
[((73, 120), 'datetime.datetime.strptime', 'datetime.strptime', (['line[1:17]', '"""%Y-%m-%d %H:%M"""'], {}), "(line[1:17], '%Y-%m-%d %H:%M')\n", (90, 120), False, 'from datetime import datetime, timedelta\n'), ((1235, 1246), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (1244, 1246), False, 'from datetime import datetime, timedelta\n')]
|
"""Notifier package responsible for user notification
"""
import json
import logging
import re
import time
import traceback
# std
from abc import ABC, abstractmethod
from dataclasses import dataclass
from json_logic import jsonLogic
from typing import List
from enum import Enum
# Ignore Chiadog alerts about being offline due to entire container just launching in the first 30 minutes
MINIMUM_LAUNCH_SECONDS_BEFORE_ALERTING_ABOUT_BEING_OFFLINE = 30 * 60
class EventPriority(Enum):
"""Event priority dictates how urgently
the user needs to be notified about it
"""
LOW = -1
NORMAL = 0
HIGH = 1
class EventType(Enum):
"""Events can either be user events
that are propagated directly to the
user, or keep-alive events that are
processed to ensure the system runs
"""
KEEPALIVE = 0
USER = 1
DAILY_STATS = 2
PLOTDECREASE = 3
PLOTINCREASE = 4
class EventService(Enum):
"""Even service helps to distinguish
between similar events for different services
"""
HARVESTER = 0
FARMER = 1
FULL_NODE = 2
DAILY = 3
WALLET = 4
@dataclass
class Event:
type: EventType
priority: EventPriority
service: EventService
message: str
class Notifier(ABC):
"""This abstract class provides common interface for
any notifier implementation. It should be easy to add
extensions that integrate with variety of services such as
Pushover, E-mail, Slack, WhatsApp, etc
"""
def __init__(self, title_prefix: str, config: dict):
self._program_launch_time = time.time()
self._title_prefix = title_prefix
self._config = config
self._conn_timeout_seconds = 10
self._notification_types = [EventType.USER]
self._notification_services = [EventService.HARVESTER, EventService.FARMER, EventService.FULL_NODE]
daily_stats = config.get("daily_stats", False)
wallet_events = config.get("wallet_events", False)
decreasing_plot_events = config.get("decreasing_plot_events", False)
increasing_plot_events = config.get("increasing_plot_events", False)
if daily_stats:
self._notification_types.append(EventType.DAILY_STATS)
self._notification_services.append(EventService.DAILY)
if wallet_events:
self._notification_services.append(EventService.WALLET)
if decreasing_plot_events:
self._notification_types.append(EventType.PLOTDECREASE)
if increasing_plot_events:
self._notification_types.append(EventType.PLOTINCREASE)
def get_title_for_event(self, event):
icon = ""
if event.priority == EventPriority.HIGH:
icon = "🚨"
elif event.priority == EventPriority.NORMAL:
icon = "⚠️"
elif event.priority == EventPriority.LOW:
icon = "ℹ️"
return f"{icon} {self._title_prefix} {event.service.name}"
def should_ignore_event(self, event):
# Automatically ignore Chiadog's spurious "Your harvester appears to be offline!" alerts immediately after a relaunch of container
# Obviously if the Machinaris container (and thus all farming/harvesting) was just started, there will be a gap in the log...
if (self._program_launch_time + MINIMUM_LAUNCH_SECONDS_BEFORE_ALERTING_ABOUT_BEING_OFFLINE) >= time.time():
if (event.service.name == 'HARVESTER' and event.message.startswith("Your harvester appears to be offline!")) or \
(event.service.name == 'FULL_NODE' and event.message.startswith("Experiencing networking issues?")):
return True
# Next only ignore if user has set an "ignore" clause in config.xml for a particular Notifier
if not "ignore" in self._config:
return False
ignore = self._config["ignore"]
try:
# First check for one of type, priority, service, and message as a simple filter
if 'type' in ignore and ignore['type'] == event.type.name:
return True
if 'priority' in ignore and ignore['priority'] == event.priority.name:
return True
if 'service' in ignore and ignore['service'] == event.service.name:
return True
if 'message' in ignore and re.search(ignore['message'], event.message, re.M|re.I):
return True
# Then look for compound ignore clause to invoke json logic
if 'compound' in ignore:
rule = json.loads(ignore['compound'])
data = {
"type" : event.type.name.lower(),
"priority" : event.priority.name.lower(),
"service" : event.service.name.lower(),
"message" : event.message
}
logging.debug("Rule: {0}".format(json.loads(ignore['compound'])))
logging.debug("Data: {0}".format(data))
result = jsonLogic(rule, data)
logging.debug("Result: {0}".format(result))
return result
except Exception as ex:
logging.error("Ignore config '{0}' error {1}".format(ignore, str(ex)))
traceback.print_exc()
return False
@abstractmethod
def send_events_to_user(self, events: List[Event]) -> bool:
"""Implementation specific to the integration"""
pass
|
[
"traceback.print_exc",
"json.loads",
"time.time",
"json_logic.jsonLogic",
"re.search"
] |
[((1581, 1592), 'time.time', 'time.time', ([], {}), '()\n', (1590, 1592), False, 'import time\n'), ((3364, 3375), 'time.time', 'time.time', ([], {}), '()\n', (3373, 3375), False, 'import time\n'), ((4320, 4376), 're.search', 're.search', (["ignore['message']", 'event.message', '(re.M | re.I)'], {}), "(ignore['message'], event.message, re.M | re.I)\n", (4329, 4376), False, 'import re\n'), ((4536, 4566), 'json.loads', 'json.loads', (["ignore['compound']"], {}), "(ignore['compound'])\n", (4546, 4566), False, 'import json\n'), ((5000, 5021), 'json_logic.jsonLogic', 'jsonLogic', (['rule', 'data'], {}), '(rule, data)\n', (5009, 5021), False, 'from json_logic import jsonLogic\n'), ((5263, 5284), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5282, 5284), False, 'import traceback\n'), ((4886, 4916), 'json.loads', 'json.loads', (["ignore['compound']"], {}), "(ignore['compound'])\n", (4896, 4916), False, 'import json\n')]
|
import typing
from dataclasses import dataclass
import yaml
if typing.TYPE_CHECKING:
from app.web.app import Application
@dataclass
class Config:
username: str
password: str
def setup_config(app: "Application"):
with open("config/config.yaml", "r") as f:
raw_config = yaml.safe_load(f)
app.config = Config(
username=raw_config["credentials"]["username"],
password=raw_config["credentials"]["password"],
)
|
[
"yaml.safe_load"
] |
[((298, 315), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (312, 315), False, 'import yaml\n')]
|
# validated: 2018-03-01 DS b3d643236ddc libraries/driver/include/ctre/phoenix/Motion/TrajectoryPoint.h
from collections import namedtuple
__all__ = ["TrajectoryPoint"]
#: Motion Profile Trajectory Point for use with pushMotionProfileTrajectory
TrajectoryPoint = namedtuple(
"TrajectoryPoint",
[
"position",
"velocity",
"auxiliaryPos",
"profileSlotSelect0",
"profileSlotSelect1",
"isLastPoint",
"zeroPos",
"timeDur",
],
)
TrajectoryPoint.position.__doc__ = "The position to servo to."
TrajectoryPoint.velocity.__doc__ = "The velocity to feed-forward."
TrajectoryPoint.auxiliaryPos.__doc__ = "The position for auxiliary PID to target."
TrajectoryPoint.profileSlotSelect0.__doc__ = """
Which slot to get PIDF gains.
PID is used for position servo.
F is used as the Kv constant for velocity feed-forward.
Typically this is hardcoded to a particular slot, but you are free to
gain schedule if need be.
Choose from [0,3]
"""
TrajectoryPoint.profileSlotSelect1.__doc__ = """
Which slot to get PIDF gains for auxiliary PID.
This only has impact during MotionProfileArc Control mode.
Choose from [0,1]
"""
TrajectoryPoint.isLastPoint.__doc__ = """
Set to true to signal Talon that this is the final point, so do not
attempt to pop another trajectory point from out of the Talon buffer.
Instead continue processing this way point. Typically the velocity
member variable should be zero so that the motor doesn't spin indefinitely.
"""
TrajectoryPoint.zeroPos.__doc__ = """
Set to true to signal Talon to zero the selected sensor.
When generating MPs, one simple method is to make the first target position zero,
and the final target position the target distance from the current position.
Then when you fire the MP, the current position gets set to zero.
If this is the intent, you can set zeroPos on the first trajectory point.
Otherwise you can leave this false for all points, and offset the positions
of all trajectory points so they are correct.
"""
TrajectoryPoint.timeDur.__doc__ = """
Duration to apply this trajectory pt.
This time unit is ADDED to the existing base time set by
configMotionProfileTrajectoryPeriod().
"""
|
[
"collections.namedtuple"
] |
[((265, 425), 'collections.namedtuple', 'namedtuple', (['"""TrajectoryPoint"""', "['position', 'velocity', 'auxiliaryPos', 'profileSlotSelect0',\n 'profileSlotSelect1', 'isLastPoint', 'zeroPos', 'timeDur']"], {}), "('TrajectoryPoint', ['position', 'velocity', 'auxiliaryPos',\n 'profileSlotSelect0', 'profileSlotSelect1', 'isLastPoint', 'zeroPos',\n 'timeDur'])\n", (275, 425), False, 'from collections import namedtuple\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
#import difflib
import datetime
import logging
import os
from selenium import webdriver
# from selenium.webdriver.firefox.options import Options
# from selenium import selenium
# from selenium.common.exceptions import TimeoutException
# from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
import sys
# import subprocess
import time
if sys.version_info.major == 2:
from urllib2 import urlopen
reload(sys)
sys.setdefaultencoding('utf8')
elif sys.version_info.major == 3:
from urllib.request import urlopen
# 2014 Mar 10 - Reorganize output, astephens
# 2014 Mar 12 - Ignore URLs (lines with "http") when performing diff
# 2014 Mar 14 - Add CompareDirs
# 2015 Feb 2 - AWS, ignore page-break lines between multiple plots
# 2021 Feb 12 - BWM, code cleanup
sleep = 0.1 # May need to increase on fast computers in headless mode
#---------------------------------------------------------------------------------------------------
def Usage():
print ('')
print ('SYNOPSIS')
cmd = sys.argv[0]
print (' ', cmd[cmd.rfind('/')+1:], 'test/production')
print ('')
print ('DESCRIPTION')
print (' Blah.')
print ('')
print ('OPTIONS')
#print (' -d : debug mode')
print ('')
# raise SystemExit
#---------------------------------------------------------------------------------------------------
def GetURL(Instrument, Testing, site='web'):
url = ''
if Testing:
url = 'http://itcdev.cl.gemini.edu:9080/itc/servlets/web/'
# url = 'http://sbfitcdev1.cl.gemini.edu:9080/itc/servlets/web/'
else:
if site in ['gn', 'gs']:
# Used by ODBs
url = 'http://' + site + 'odb.gemini.edu:8442/itc/servlets/web/'
elif site == 'web':
# Used by ITC web pages
url = 'https://www.gemini.edu/itc/servlets/web/'
else:
print('Site must be either "gn", "gs", or "web".')
return url
if Instrument == 'NIRI':
url += 'ITCniri.html'
elif Instrument == 'F2':
url += 'ITCflamingos2.html'
elif Instrument == 'GMOSN':
url += 'ITCgmos.html'
elif Instrument == 'GMOSS':
url += 'ITCgmosSouth.html'
elif Instrument == 'GNIRS':
url += 'ITCgnirs.html'
elif Instrument == 'NIFS':
url += 'ITCnifs.html'
elif Instrument == 'Michelle':
url += 'ITCmichelle.html'
elif Instrument == 'GSAOI':
url += 'ITCgsaoi.html'
elif Instrument == 'TReCS':
url += 'ITCtrecs.html'
return(url)
#---------------------------------------------------------------------------------------------------
# Get the output path
def GetPath(Instrument, Testing, outdir='/tmp/'):
# path = os.getenv('HOME') + '/tmp/' + Instrument + '/' + str(datetime.date.today())
path = os.getenv('HOME') + outdir
if Testing:
path += '/Test'
else:
path += '/Prod'
return(path)
#---------------------------------------------------------------------------------------------------
def SetLog(instrument, outdir='/tmp'):
path = os.environ['HOME'] + outdir + '/'
logfile = path + '/ITC_' + instrument + '.' + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + '.log'
logger = ConfigureLogging(logfile)
#logger = logging.getLogger()
logger.info('Log = %s', logfile)
return(logger)
#---------------------------------------------------------------------------------------------------
def ParseArgs(argv):
logger = logging.getLogger()
Testing = False
if len(argv) != 2:
Usage()
logger.info('Using default (Production URL...)')
else:
if 'test' in argv[1].lower():
Testing = True
logger.info('Using Test URL...')
elif 'prod' in argv[1].lower():
logger.info('Using Production URL...')
else:
Usage()
raise SystemExit
return(Testing)
#---------------------------------------------------------------------------------------------------
# Record the URL in the output directory for future reference
def RecordURL(URL, Instrument, Testing):
path = GetPath(Instrument, Testing)
if not os.path.exists(path):
os.mkdir(path)
URLFile = open(path + '/URL','w')
URLFile.write(URL + '\n')
URLFile.close()
#---------------------------------------------------------------------------------------------------
# Pass a URL that contains the ITC tests
def startWebpage(URL, headless=True):
# Create a new instance of the Firefox driver
# https://developer.mozilla.org/en-US/docs/Mozilla/Firefox/Headless_mode
options = webdriver.firefox.options.Options()
if headless:
options.add_argument('-headless')
driver = webdriver.Firefox(executable_path='geckodriver', options=options)
# go to the GMOS ITC Page
driver.get(URL)
return driver
#---------------------------------------------------------------------------------------------------
# Input: Brightness
# Units
def setPointSource(driver, Brightness, Units):
logger = logging.getLogger('setPointSource')
#Select Point Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='POINT']").click()
#Set Point Source Brightness
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting Point Source brightness to %s', Brightness)
driver.find_element_by_name("psSourceNorm").clear()
driver.find_element_by_name("psSourceNorm").send_keys(Brightness)
#Set Point Source Units
driver.find_element_by_xpath("//select[@name='psSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setGaussianSource(driver, FullWidth, Brightness, Units):
logger = logging.getLogger('setGaussianSource')
# Turn Fullwidth to str
if type(FullWidth) is float:
FullWidth = str(FullWidth)
# Turn Brightness to str
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting Gaussian source with FWHM = %s and brightness = %s %s', FullWidth, Brightness, Units)
# Select Gaussian Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='GAUSSIAN']").click()
# Set Full Width Half Max
driver.find_element_by_name("gaussFwhm").clear()
driver.find_element_by_name("gaussFwhm").send_keys(FullWidth)
# Set Brightness
driver.find_element_by_name("gaussSourceNorm").clear()
driver.find_element_by_name("gaussSourceNorm").send_keys(Brightness)
# Set Brightness Units
driver.find_element_by_xpath("//select[@name='gaussSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setUniformSource(driver, Brightness, Units):
logger = logging.getLogger('setUniformSource')
time.sleep(sleep)
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting uniform brightness to %s %s', Brightness, Units)
# Select Uniform Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='UNIFORM']").click()
# Set Brightness
driver.find_element_by_name("usbSourceNorm").clear()
driver.find_element_by_name("usbSourceNorm").send_keys(Brightness)
# Set Brightness Units
driver.find_element_by_xpath("//select[@name='usbSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setBrightnessNormalization(driver, Wavelength):
driver.find_element_by_xpath("""//select[@name='WavebandDefinition']/option[@value=""" + '"' + Wavelength + '"' + """]""").click()
#---------------------------------------------------------------------------------------------------
def setLibrarySpectrum(driver, Type):
#Set for Library Spectrum of a star with specific stellar type
driver.find_element_by_xpath("//input[@value='LIBRARY_STAR' and @name='Distribution']").click()
#Choose stellar type
driver.find_element_by_xpath("//select[@name='stSpectrumType']/option[@value='" + Type + "']").click()
#---------------------------------------------------------------------------------------------------
def setLibrarySpectrumNonStellar(driver, Type):
#Set for Library Spectrum of a non-stellar object
driver.find_element_by_xpath("//input[@value='LIBRARY_NON_STAR' and @name='Distribution']").click()
#Choose non-stellar object
driver.find_element_by_xpath("//select[@name='nsSpectrumType']/option[@value='" + Type + "']").click()
#---------------------------------------------------------------------------------------------------
def setPowerLawSpectrum(driver, Index):
logger = logging.getLogger('setPowerLawSpectrum')
time.sleep(sleep)
if type(Index) is int or type(Index) is float:
Index = str(Index)
logger.debug('Setting power law index to %s', Index)
# Set for Power Law Spectrum
driver.find_element_by_xpath("//input[@value='PLAW' and @name='Distribution']").click()
# Set Index
driver.find_element_by_name("powerIndex").clear()
driver.find_element_by_name("powerIndex").send_keys(Index)
#---------------------------------------------------------------------------------------------------
def setBlackBodySpectrum(driver, Temperature):
logger = logging.getLogger('setBlackBodySpectrum')
time.sleep(sleep)
if type(Temperature) is int or type(Temperature) is float:
Temperature = str(Temperature)
logger.debug('Setting blackbody temperature to %s deg', Temperature)
# Set for BlackBody
driver.find_element_by_xpath("//input[@value='BBODY' and @name='Distribution']").click()
# Set Temperature
driver.find_element_by_name("BBTemp").clear()
driver.find_element_by_name("BBTemp").send_keys(Temperature)
#---------------------------------------------------------------------------------------------------
def setEmissionLine(driver, Wavelength, LineFlux, LineFluxUnits, LineWidth, FluxDensity, FluxDensityUnits):
logger = logging.getLogger('setEmissionLine')
time.sleep(sleep)
# Choose Emission Line
driver.find_element_by_xpath("//input[@value='ELINE' and @name='Distribution']").click()
# Set Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting emission line wavelength to %s um', Wavelength)
driver.find_element_by_name("lineWavelength").clear()
driver.find_element_by_name("lineWavelength").send_keys(Wavelength)
# Set Line Flux
if type(LineFlux) is float:
LineFlux = str(LineFlux)
logger.debug('Setting emission line flux to %s %s', LineFlux, LineFluxUnits)
driver.find_element_by_name("lineFlux").clear()
driver.find_element_by_name("lineFlux").send_keys(LineFlux)
# Set Line Flux Units
driver.find_element_by_xpath("//select[@name='lineFluxUnits']/option[@value='" + LineFluxUnits + "']")
# Set Line Width
if type(LineWidth) is float:
LineWidth = str(LineWidth)
logger.debug('Setting emission line width to %s', LineWidth)
driver.find_element_by_name("lineWidth").clear()
driver.find_element_by_name("lineWidth").send_keys(LineWidth)
# Set Flux Density
if type(FluxDensity) is float:
FluxDensity = str(FluxDensity)
logger.debug('Setting emission line flux density to %s %s', FluxDensity, FluxDensityUnits)
driver.find_element_by_name("lineContinuum").clear()
driver.find_element_by_name("lineContinuum").send_keys(FluxDensity)
# Set Flux Density Units
driver.find_element_by_xpath("//select[@name='lineContinuumUnits']/option[@value='" + FluxDensityUnits + "']")
#---------------------------------------------------------------------------------------------------
# This is for the OLD GMOS ITC with EEV, Hamamatsu Red and Blue CCDS
def setDetectorPropertiesGMOS(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
#Set CCD
if "eev" in CCD.lower():
#Set to EEV array
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "red" in CCD.lower():
#Set to Hamamatsu Red
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
else:
#Set to Hamamatsu Blue
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
#Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
#Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# Set Detector Properties for GMOS-N
def setDetectorPropertiesGMOSN(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
# Set CCD
if "dd" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "leg" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "ham" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
# Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
# Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# Set Detector Properties for GMOS-S
def setDetectorPropertiesGMOSS(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
# Set CCD
if "eev" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "ham" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
# Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
# Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-05
def setDetectorPropertiesGSAOI(driver, Noise, Coating, Strehl, StrehlBand):
logger = logging.getLogger('setDetectorPropertiesGSAOI')
# Set Read Noise Level
if "veryfaint" in Noise.lower():
driver.find_element_by_xpath("//input[@value='VERY_FAINT' and @name='ReadMode']").click()
elif "faint" in Noise.lower():
driver.find_element_by_xpath("//input[@value='FAINT' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='BRIGHT' and @name='ReadMode']").click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Strehl
# if type(Strehl) is int or type(Strehl) is float:
# Strehl = str(Strehl)
#
# logger.debug('Setting Strehl to %s', Strehl)
# driver.find_element_by_name("avgStrehl").clear()
# driver.find_element_by_name("avgStrehl").send_keys(Strehl)
#
# # Set Strehl Band
# driver.find_element_by_xpath("//select[@name='strehlBand']/option[@value='" + StrehlBand + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-10
def setDetectorPropertiesF2(driver, Noise, Coating, Port, Wavefront):
#Set Read Noise Level
if "low" in Noise.lower():
driver.find_element_by_xpath("//input[@value='FAINT_OBJECT_SPEC' and @name='ReadMode']").click()
elif "med" in Noise.lower():
driver.find_element_by_xpath("//input[@value='MEDIUM_OBJECT_SPEC' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='BRIGHT_OBJECT_SPEC' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Port
if "side" in Port.lower():
driver.find_element_by_xpath("//input[@value='SIDE_LOOKING' and @name='IssPort']").click()
elif "up" in Port.lower():
driver.find_element_by_xpath("//input[@value='UP_LOOKING' and @name='IssPort']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesNIRI(driver, Bias, Noise, Coating, Wavefront):
#Set Detector Bias
if "low" in Bias.lower():
driver.find_element_by_xpath("//input[@value='SHALLOW' and @name='WellDepth']").click()
else:
driver.find_element_by_xpath("//input[@value='DEEP' and @name='WellDepth']").click()
#Set Read Noise Level
if "low" in Bias.lower():
driver.find_element_by_xpath("//input[@value='IMAG_SPEC_NB' and @name='ReadMode']").click()
elif "med" in Bias.lower():
driver.find_element_by_xpath("//input[@value='IMAG_1TO25' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='IMAG_SPEC_3TO5' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesNIFS(driver, Read, Coating, Wavefront):
#Set read Mode and Well Depth
if "bright" in Read.lower():
driver.find_element_by_xpath("//input[@value='BRIGHT_OBJECT_SPEC' and @name='ReadMode']").click()
elif "medium" in Read.lower():
driver.find_element_by_xpath("//input[@value='MEDIUM_OBJECT_SPEC' and @name='ReadMode']").click()
elif "faint" in Read.lower():
driver.find_element_by_xpath("//input[@value='FAINT_OBJECT_SPEC' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesGNIRS(driver, Read, Coating, Wavefront):
#Set read Mode and Well Depth
if "verybright" in Read.lower():
driver.find_element_by_xpath("//input[@value='VERY_BRIGHT' and @name='ReadMode']").click()
elif "bright" in Read.lower():
driver.find_element_by_xpath("//input[@value='BRIGHT' and @name='ReadMode']").click()
elif "faint" in Read.lower():
driver.find_element_by_xpath("//input[@value='FAINT' and @name='ReadMode']").click()
elif "veryfaint" in Read.lower():
driver.find_element_by_xpath("//input[@value='VERY_FAINT' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# For Michelle and TReCS
def setDetectorPropertiesMichelle(driver, Mirror, Port, Wavefront):
#Set Mirror Coating
if "silver" in Mirror.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Mirror.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Instrument Port
if "side" in Port.lower():
driver.find_element_by_xpath("//input[@value='SIDE_LOOKING' and @name='IssPort']").click()
else:
driver.find_element_by_xpath("//input[@value='UP_LOOKING' and @name='IssPort']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesTReCS(driver, Cryostat, Filter, FPM, Grating, Wavelength):
logger = logging.getLogger('setOpticalPropertiesTReCS')
time.sleep(sleep)
# Set Cryostat
driver.find_element_by_xpath("//select[@name='WindowWheel']/option[@value='" + Cryostat + "']").click()
# Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
# Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Spectrum Central Wavelength
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesMichelle(driver, Filter, FPM, Grating, Wavelength, Polarimetry):
logger = logging.getLogger('setOpticalPropertiesMichelle')
time.sleep(sleep)
# Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
# Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Spectrum Central Wavelength
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
# Set Polarimetry
if "dis" in Polarimetry.lower():
driver.find_element_by_xpath("//input[@value='NO' and @name='polarimetry']").click()
else:
driver.find_element_by_xpath("//input[@value='YES' and @name='polarimetry']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesNIFS(driver, Filter, Grating, Wavelength):
logger = logging.getLogger('setOpticalPropertiesNIFS')
time.sleep(sleep)
# Set Filter
if "zj" in Filter.lower() or "z-j" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='ZJ_FILTER']").click()
elif "jh" in Filter.lower() or "j-h" in Filter.lower() or "hj" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='JH_FILTER']").click()
elif "hk" in Filter.lower() or "h-k" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='HK_FILTER']").click()
# Set Grating
if "z" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='Z']").click()
elif "short" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K_SHORT']").click()
elif "long" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K_LONG']").click()
elif "j" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='J']").click()
elif "h" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='H']").click()
else:
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K']").click()
# Set Spectrum Central Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesGNIRS(driver, Camera, FPM, Grating, Wavelength, Cross):
logger = logging.getLogger('setOpticalPropertiesGNIRS')
time.sleep(sleep)
# Set Camera
driver.find_element_by_xpath("//select[@name='PixelScale']/option[@value='" + Camera + "']").click()
# Set Focal Plane Mask
driver.find_element_by_xpath("//select[@name='SlitWidth']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Central Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
# Set Cross Dispersed
if "no" in Cross.lower():
driver.find_element_by_xpath("//select[@name='CrossDispersed']/option[@value='NO']").click()
else:
driver.find_element_by_xpath("//select[@name='CrossDispersed']/option[@value='SXD']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesGMOS(driver, Grating, Filter, CentralWavelength, FPU):
logger = logging.getLogger('setOpticalPropertiesGMOS')
time.sleep(sleep)
# Set Grating
driver.find_element_by_xpath("//select[@name='instrumentDisperser']/option[@value='" + Grating + "']").click()
# Set Filter
driver.find_element_by_xpath("//select[@name='instrumentFilter']/option[@value='" + Filter + "']").click()
# Set Central Wavelength
logger.debug('Setting central wavelength to %s nm', CentralWavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(CentralWavelength)
# Alternatively:
#cwav = driver.find_element_by_name("instrumentCentralWavelength")
#cwav.clear()
#cwav.send_keys(CentralWavelength)
# or:
#cwav = driver.find_element_by_xpath("//input[@name='instrumentCentralWavelength']")
#cwav.clear()
#cwav.send_keys(CentralWavelength)
# Set Focal Plane Unit
driver.find_element_by_xpath("//select[@name='instrumentFPMask']/option[@value='" + FPU + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-06
def setOpticalPropertiesGSAOI(driver, Filter):
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-10
def setOpticalPropertiesF2(driver, Filter, Disperser, FPM):
time.sleep(sleep)
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#Set Disperser
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Disperser + "']").click()
#Set FPM
driver.find_element_by_xpath("//select[@name='FPUnit']/option[@value='" + FPM + "']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesNIRI(driver, Camera, Filter, Disperser, FPM):
time.sleep(sleep)
#Set Camera
driver.find_element_by_xpath("//select[@name='Camera']/option[@value='" + Camera + "']").click()
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#Set Disperser
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Disperser + "']").click()
#Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
#---------------------------------------------------------------------------------------------------
def setAltairProperties(driver, Seperation, Brightness, FieldLens, Mode):
#Set AO Guide Star Seperation
if type(Seperation) is float:
Seperation = str(Seperation)
driver.find_element_by_name("guideSep").clear()
driver.find_element_by_name("guideSep").send_keys(Seperation)
#Set Guide Star Brightness (R-Band)
if type(Brightness) is float:
Brightness = str(Brightness)
driver.find_element_by_name("guideMag").clear()
driver.find_element_by_name("guideMag").send_keys(Brightness)
#Set Field Lens
driver.find_element_by_xpath("//input[@value='" + FieldLens.upper() + "' and @name='FieldLens']").click()
#Set Altair Mode
if "ngs" in Mode.lower() or "natural" in Mode.lower():
driver.find_element_by_xpath("//input[@value='NGS' and @name='GuideStarType']").click()
else:
driver.find_element_by_xpath("//input[@value='LGS' and @name='GuideStarType']").click()
#---------------------------------------------------------------------------------------------------
def setObservingConditions(driver, ImageQuality, CloudCover, WaterVapour, SkyBackground, AirMass):
#set Image Quality
if ImageQuality == 20:
Value = "PERCENT_20"
elif ImageQuality == 70:
Value = "PERCENT_70"
elif ImageQuality == 85:
Value = "PERCENT_85"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='ImageQuality' and @value='" + Value + "']").click()
#Set Cloud Cover
if CloudCover == 50:
Value = "PERCENT_50"
elif CloudCover == 70:
Value = "PERCENT_70"
elif CloudCover == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='CloudCover' and @value='" + Value + "']").click()
#Set Water Vapour
if WaterVapour == 20:
Value = "PERCENT_20"
elif WaterVapour == 50:
Value = "PERCENT_50"
elif WaterVapour == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='WaterVapor' and @value='" + Value + "']").click()
#Set Sky Background
if SkyBackground == 20:
Value = "PERCENT_20"
elif SkyBackground == 50:
Value = "PERCENT_50"
elif SkyBackground == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
#If SkyBackground is set to 0, don't try to set it
if not SkyBackground == 0:
driver.find_element_by_xpath("//input[@name='SkyBackground' and @value='" + Value + "']").click()
#Set Air Mass
if type(AirMass) is int or type(AirMass) is float:
AirMass = str(AirMass)
driver.find_element_by_xpath("//input[@name='Airmass' and @value='" + AirMass + "']").click()
#---------------------------------------------------------------------------------------------------
# Calculation method for Michelle and TReCS
def setCalculationMethodMichelle(driver, ResultMethod, Value1, Fraction):
#Set Fraction to a string
Fraction = str(Fraction)
Value1 = str(Value1)
#Set Results Method, Total Integration or S/N Ratio
if "ratio" in ResultMethod.lower():
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Value1)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
#Choose Total Integration Time
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
#---------------------------------------------------------------------------------------------------
def setCalculationMethod(driver, ResultMethod, Value1, Time, Fraction, Choose=True):
# For instruments w/o coadd option
#Set Fraction to a string
Fraction = str(Fraction)
Value1 = str(Value1)
#Set the Results Method, Total Integration or S/N ratio
if "ratio" in ResultMethod.lower():
if Choose:
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("numExpA").clear()
driver.find_element_by_name("numExpA").send_keys(Value1)
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Time)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("expTimeC").clear()
driver.find_element_by_name("expTimeC").send_keys(Time)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
# ---------------------------------------------------------------------------------------------------
def setCalculationMethodCoadd(driver, ResultMethod, Value1, Ncoadd, Time, Fraction, Choose=True):
# For instruments with a coadd option
# Set Fraction to a string
Fraction = str(Fraction)
Ncoadd = str(Ncoadd)
Value1 = str(Value1)
# Set the Results Method, Total Integration or S/N ratio
if "ratio" in ResultMethod.lower():
if Choose:
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("numExpA").clear()
driver.find_element_by_name("numExpA").send_keys(Value1)
driver.find_element_by_name("numCoaddsA").clear()
driver.find_element_by_name("numCoaddsA").send_keys(Ncoadd)
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Time)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("numCoaddsC").clear()
driver.find_element_by_name("numCoaddsC").send_keys(Ncoadd)
driver.find_element_by_name("expTimeC").clear()
driver.find_element_by_name("expTimeC").send_keys(Time)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
#---------------------------------------------------------------------------------------------------
# Slit Length is only for user defined aperture
# If using optimum aperture, only pass 3 arguments (driver,Type,Times)
# Used for GMOS, Michelle and TReCS
def setAnalysisMethodGMOS(driver, Type, Times, SlitLength=0):
if type(SlitLength) is float:
SlitLength = str(SlitLength)
if type(Times) is float:
Times = str(Times)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
driver.find_element_by_xpath("//input[@value='autoAper' and @name='analysisMethod']").click()
driver.find_element_by_name("autoSkyAper").clear()
driver.find_element_by_name("autoSkyAper").send_keys(Times)
else:
driver.find_element_by_xpath("//input[@value='userAper' and @name='analysisMethod']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(SlitLength)
driver.find_element_by_name("userSkyAper").clear()
driver.find_element_by_name("userSkyAper").send_keys(Times)
#---------------------------------------------------------------------------------------------------
# Analysis Method procedure for most instruments other than GMOS
def setAnalysisMethod(driver, Type, Slitlength=0):
if type(Slitlength) is float:
Slitlength = str(Slitlength)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
#Set for Optimum S/N Ratio
driver.find_element_by_xpath("//input[@value='autoAper' and @name='aperType']").click()
else:
#Set for Apeture of diameter( slit length) = X
driver.find_element_by_xpath("//input[@value='userAper' and @name='aperType']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(Slitlength)
# ---------------------------------------------------------------------------------------------------
# Analysis Method procedure for most instruments other than GMOS
def setAnalysisMethodGSAOI(driver, Type, Offset=5.0, largeSkyOffset=0, aperDiam=2.0):
if type(Offset) is float:
Offset = str(Offset)
if type(largeSkyOffset) is int:
largeSkyOffset = str(largeSkyOffset)
if type(aperDiam) is float:
aperDiam = str(aperDiam)
driver.find_element_by_name("offset").clear()
driver.find_element_by_name("offset").send_keys(Offset)
driver.find_element_by_name("largeSkyOffset").clear()
driver.find_element_by_name("largeSkyOffset").send_keys(largeSkyOffset)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
# Set for Optimum S/N Ratio
driver.find_element_by_xpath("//input[@value='autoAper']").click()
else:
# Set for Apeture of diameter( slit length) = X
driver.find_element_by_xpath("//input[@value='userAper' and @name='aperType']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(aperDiam)
# ---------------------------------------------------------------------------------------------------
def setIFUSpectroscopy(driver, Type, Offset1, Offset2=0):
#Change Offsets to strings
if type(Offset1) is float:
Offset1 = str(Offset1)
if type(Offset2) is float:
Offset2 = str(Offset2)
#Choose the type
if "sum" in Type.lower():
driver.find_element_by_xpath("//input[@value='summedIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuNumX").clear()
driver.find_element_by_name("ifuNumX").send_keys(Offset1)
driver.find_element_by_name("ifuNumY").clear()
driver.find_element_by_name("ifuNumY").send_keys(Offset2)
elif "multi" in Type.lower():
#Choose Multiple IFU elements
driver.find_element_by_xpath("//input[@value='radialIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuMinOffset").clear()
driver.find_element_by_name("ifuMinOffset").send_keys(Offset1)
driver.find_element_by_name("ifuMaxOffset").clear()
driver.find_element_by_name("ifuMaxOffset").send_keys(Offset2)
else:
#Choose individual IFU element
driver.find_element_by_xpath("//input[@value='singleIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuOffset").clear()
driver.find_element_by_name("ifuOffset").send_keys(Offset1)
#---------------------------------------------------------------------------------------------------
def calculate(driver):
#Click Calculate button
driver.find_element_by_xpath("//input[@value='Calculate' and @type='submit']").click()
#---------------------------------------------------------------------------------------------------
def extractData(driver, Type, TestNumber, Instrument, Testing, Cross=False):
logger = logging.getLogger('extractData')
# Turn TestNumber into a str
if type(TestNumber) is int:
TestNumber = str(TestNumber)
# Check if Folders exist to save to, else create them
path = GetPath(Instrument, Testing)
if not os.path.exists(path):
os.mkdir(path)
FileLocation = path + '/Test' + TestNumber
# If using GNIRS Cross-Dispersed, no need to check Single Exposure S/N
if Cross:
FileList = ("signal spectrum", "background spectrum", "Final")
else:
FileList = ("signal spectrum", "background spectrum", "Single Exposure", "Final")
# Generate list of all open windows:
windowsList = driver.window_handles
# Switch to results window:
driver.switch_to.window(windowsList[1])
# Imaging
if "imag" in Type.lower():
pass
# Spectroscopy
else:
for fileToSave in FileList:
logger.debug('fileToSave = %s', fileToSave)
fileObject = driver.find_element_by_partial_link_text(fileToSave)
fileLink = fileObject.get_attribute("href")
logger.debug('fileLink = %s', fileLink)
# Open the file and write to output
u = urlopen(fileLink)
localFile = open(FileLocation + '-' + fileToSave.replace(' ','') + '.dat', 'wb')
localFile.write(u.read())
localFile.close()
pass
# Save the results page
pageData = driver.page_source
localFile = open(FileLocation + '-output.html', 'w')
localFile.write(pageData)
localFile.close()
#if not Archiving:
# compareData(driver,Type,TestNumber,Instrument,Cross)
#---------------------------------------------------------------------------------------------------
def ConfigureLogging(logfile=None, filelevel='INFO', screenlevel='INFO'):
logger = logging.getLogger()
# DEBUG Detailed information, typically of interest only whendiagnosing problems.
# INFO Confirmation that things are working as expected.
# WARNING An indication that something unexpected happened, orindicative of some problem in the near future.
# ERROR Due to a more serious problem, the software has notbeen able to perform some function.
# CRITICAL A serious error, indicating that the program itself maybe unable to continue running.
# set minimum threshold level for logger:
logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers:
#formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
if logfile: # create file handler:
logfilehandler = logging.FileHandler(logfile)
if filelevel.upper() == 'DEBUG':
logfilehandler.setLevel(logging.DEBUG)
elif filelevel.upper() == 'INFO':
logfilehandler.setLevel(logging.INFO)
elif filelevel.upper() == 'WARNING':
logfilehandler.setLevel(logging.WARNING)
elif filelevel.upper() == 'ERROR':
logfilehandler.setLevel(logging.ERROR)
elif filelevel.upper() == 'CRITICAL':
logfilehandler.setLevel(logging.CRITICAL)
else:
print ('ERROR: Unknown log error level')
logfilehandler.setLevel(logging.INFO)
logfilehandler.setFormatter(formatter)
logger.addHandler(logfilehandler)
# create console screen log handler:
consoleloghandler = logging.StreamHandler()
if screenlevel.upper() == 'DEBUG':
consoleloghandler.setLevel(logging.DEBUG)
elif screenlevel.upper() == 'INFO':
consoleloghandler.setLevel(logging.INFO)
elif screenlevel.upper() == 'WARNING':
consoleloghandler.setLevel(logging.WARNING)
elif screenlevel.upper() == 'ERROR':
consoleloghandler.setLevel(logging.ERROR)
elif screenlevel.upper() == 'CRITICAL':
consoleloghandler.setLevel(logging.CRITICAL)
else:
print ('ERROR: Unknown log error level')
consoleloghandler.setLevel(logging.INFO)
consoleloghandler.setFormatter(formatter)
logger.addHandler(consoleloghandler)
return(logger)
#---------------------------------------------------------------------------------------------------
|
[
"os.mkdir",
"logging.FileHandler",
"selenium.webdriver.Firefox",
"logging.StreamHandler",
"os.path.exists",
"urllib.request.urlopen",
"time.sleep",
"logging.Formatter",
"sys.setdefaultencoding",
"selenium.webdriver.firefox.options.Options",
"datetime.datetime.now",
"os.getenv",
"logging.getLogger"
] |
[((529, 559), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf8"""'], {}), "('utf8')\n", (551, 559), False, 'import sys\n'), ((3623, 3642), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3640, 3642), False, 'import logging\n'), ((4783, 4818), 'selenium.webdriver.firefox.options.Options', 'webdriver.firefox.options.Options', ([], {}), '()\n', (4816, 4818), False, 'from selenium import webdriver\n'), ((4891, 4956), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'executable_path': '"""geckodriver"""', 'options': 'options'}), "(executable_path='geckodriver', options=options)\n", (4908, 4956), False, 'from selenium import webdriver\n'), ((5231, 5266), 'logging.getLogger', 'logging.getLogger', (['"""setPointSource"""'], {}), "('setPointSource')\n", (5248, 5266), False, 'import logging\n'), ((6004, 6042), 'logging.getLogger', 'logging.getLogger', (['"""setGaussianSource"""'], {}), "('setGaussianSource')\n", (6021, 6042), False, 'import logging\n'), ((7101, 7138), 'logging.getLogger', 'logging.getLogger', (['"""setUniformSource"""'], {}), "('setUniformSource')\n", (7118, 7138), False, 'import logging\n'), ((7144, 7161), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (7154, 7161), False, 'import time\n'), ((9089, 9129), 'logging.getLogger', 'logging.getLogger', (['"""setPowerLawSpectrum"""'], {}), "('setPowerLawSpectrum')\n", (9106, 9129), False, 'import logging\n'), ((9138, 9155), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (9148, 9155), False, 'import time\n'), ((9721, 9762), 'logging.getLogger', 'logging.getLogger', (['"""setBlackBodySpectrum"""'], {}), "('setBlackBodySpectrum')\n", (9738, 9762), False, 'import logging\n'), ((9771, 9788), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (9781, 9788), False, 'import time\n'), ((10455, 10491), 'logging.getLogger', 'logging.getLogger', (['"""setEmissionLine"""'], {}), "('setEmissionLine')\n", (10472, 10491), False, 'import logging\n'), ((10497, 10514), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (10507, 10514), False, 'import time\n'), ((17373, 17420), 'logging.getLogger', 'logging.getLogger', (['"""setDetectorPropertiesGSAOI"""'], {}), "('setDetectorPropertiesGSAOI')\n", (17390, 17420), False, 'import logging\n'), ((25840, 25886), 'logging.getLogger', 'logging.getLogger', (['"""setOpticalPropertiesTReCS"""'], {}), "('setOpticalPropertiesTReCS')\n", (25857, 25886), False, 'import logging\n'), ((25892, 25909), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (25902, 25909), False, 'import time\n'), ((26876, 26925), 'logging.getLogger', 'logging.getLogger', (['"""setOpticalPropertiesMichelle"""'], {}), "('setOpticalPropertiesMichelle')\n", (26893, 26925), False, 'import logging\n'), ((26931, 26948), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (26941, 26948), False, 'import time\n'), ((28019, 28064), 'logging.getLogger', 'logging.getLogger', (['"""setOpticalPropertiesNIFS"""'], {}), "('setOpticalPropertiesNIFS')\n", (28036, 28064), False, 'import logging\n'), ((28070, 28087), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (28080, 28087), False, 'import time\n'), ((29932, 29978), 'logging.getLogger', 'logging.getLogger', (['"""setOpticalPropertiesGNIRS"""'], {}), "('setOpticalPropertiesGNIRS')\n", (29949, 29978), False, 'import logging\n'), ((29984, 30001), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (29994, 30001), False, 'import time\n'), ((31190, 31235), 'logging.getLogger', 'logging.getLogger', (['"""setOpticalPropertiesGMOS"""'], {}), "('setOpticalPropertiesGMOS')\n", (31207, 31235), False, 'import logging\n'), ((31241, 31258), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (31251, 31258), False, 'import time\n'), ((32740, 32757), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (32750, 32757), False, 'import time\n'), ((33302, 33319), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (33312, 33319), False, 'import time\n'), ((46080, 46112), 'logging.getLogger', 'logging.getLogger', (['"""extractData"""'], {}), "('extractData')\n", (46097, 46112), False, 'import logging\n'), ((47934, 47953), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (47951, 47953), False, 'import logging\n'), ((48710, 48804), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-8s %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(asctime)s %(levelname)-8s %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (48727, 48804), False, 'import logging\n'), ((49661, 49684), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (49682, 49684), False, 'import logging\n'), ((2933, 2950), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (2942, 2950), False, 'import os\n'), ((4316, 4336), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4330, 4336), False, 'import os\n'), ((4346, 4360), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (4354, 4360), False, 'import os\n'), ((46326, 46346), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (46340, 46346), False, 'import os\n'), ((46356, 46370), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (46364, 46370), False, 'import os\n'), ((48867, 48895), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (48886, 48895), False, 'import logging\n'), ((47283, 47300), 'urllib.request.urlopen', 'urlopen', (['fileLink'], {}), '(fileLink)\n', (47290, 47300), False, 'from urllib.request import urlopen\n'), ((3292, 3315), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3313, 3315), False, 'import datetime\n')]
|
from injector import inject
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListMapping import GetDataOperationJobExecutionLogListMapping
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListQuery import GetDataOperationJobExecutionLogListQuery
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse import GetDataOperationJobExecutionLogListResponse
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListSpecifications import GetDataOperationJobExecutionLogListSpecifications
from infrastructure.cqrs.IQueryHandler import IQueryHandler
from infrastructure.data.RepositoryProvider import RepositoryProvider
from infrastructure.dependency.scopes import IScoped
class GetDataOperationJobExecutionLogListQueryHandler(IQueryHandler[GetDataOperationJobExecutionLogListQuery], IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider,
specifications: GetDataOperationJobExecutionLogListSpecifications):
self.repository_provider = repository_provider
self.specifications = specifications
def handle(self, query: GetDataOperationJobExecutionLogListQuery) -> GetDataOperationJobExecutionLogListResponse:
result = GetDataOperationJobExecutionLogListResponse()
data_query = self.specifications.specify(query=query)
result.Data = GetDataOperationJobExecutionLogListMapping.to_dtos(data_query)
return result
|
[
"domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse.GetDataOperationJobExecutionLogListResponse",
"domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListMapping.GetDataOperationJobExecutionLogListMapping.to_dtos"
] |
[((1364, 1409), 'domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse.GetDataOperationJobExecutionLogListResponse', 'GetDataOperationJobExecutionLogListResponse', ([], {}), '()\n', (1407, 1409), False, 'from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse import GetDataOperationJobExecutionLogListResponse\n'), ((1494, 1556), 'domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListMapping.GetDataOperationJobExecutionLogListMapping.to_dtos', 'GetDataOperationJobExecutionLogListMapping.to_dtos', (['data_query'], {}), '(data_query)\n', (1544, 1556), False, 'from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListMapping import GetDataOperationJobExecutionLogListMapping\n')]
|
"""
Copyright (c) 2017, 2019, Oracle Corporation and/or its affiliates. All rights reserved.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import unittest
from wlsdeploy.exception import exception_helper
from wlsdeploy.exception.expection_types import ExceptionType
class ExceptionHelperTestCase(unittest.TestCase):
def testCreateException(self):
ex = exception_helper.create_exception(ExceptionType.CREATE, 'WLSDPLY-12400',
'createDomain', '-oracle_home')
self.assertNotEquals(ex, None)
return
|
[
"wlsdeploy.exception.exception_helper.create_exception"
] |
[((429, 537), 'wlsdeploy.exception.exception_helper.create_exception', 'exception_helper.create_exception', (['ExceptionType.CREATE', '"""WLSDPLY-12400"""', '"""createDomain"""', '"""-oracle_home"""'], {}), "(ExceptionType.CREATE, 'WLSDPLY-12400',\n 'createDomain', '-oracle_home')\n", (462, 537), False, 'from wlsdeploy.exception import exception_helper\n')]
|
#!/usr/bin/env python3
from subprocess import check_output
def sed(fn, in_reg_exp, out_reg_exp, inline=True):
""" wrapper around sed """
ret = check_output(["sed", "-i", "s/" + in_reg_exp + "/" + out_reg_exp + "/g", fn])
def clean_block_from_file(fn, block_starts, block_end, replace):
""" cleans everything from block_start to block_end and replace it """
with open(fn, "r") as f:
lines = f.readlines()
with open(fn, "w") as f:
skip = False
for line in lines:
is_start = [block_start in line for block_start in block_starts]
if any(is_start):
skip = True
if skip and block_end in line:
skip = False
f.write(replace)
if not skip:
f.write(line)
def read_block_from_file(fn, block_starts, block_end):
ret = []
with open(fn, "r") as f:
lines = f.readlines()
started = False
for line in lines:
is_start = [block_start in line for block_start in block_starts]
if started:
ret.append(line)
if any(is_start):
ret.append(line)
started = True
if started and block_end in line:
return ret
return []
def find_in_block(fn, field, keyword, default):
block = read_block_from_file(fn, ['"' + field + '.*"', field + "\n"], "}")
for line in block:
for token in line.split(";"):
if keyword in token:
return token.split()[-1]
return default
def get_executor(fn, field):
return find_in_block(fn, field, "executor", "Serial")
def get_matrix_solver(fn, field):
return find_in_block(fn, field, "solver", "unknown")
def get_preconditioner(fn, field):
return find_in_block(fn, field, "preconditioner", "unknown")
def set_cells(blockMeshDict, old_cells, new_cells):
""" """
sed(blockMeshDict, old_cells, new_cells)
def set_mesh_boundary_type_to_wall(blockMeshDict):
""" """
print("DEPRECATED")
sed(blockMeshDict, "type[ ]*cyclic", "type wall")
def set_p_init_value(p):
""" """
sed(p, "type[ ]*cyclic;", "type zeroGradient;")
def set_U_init_value(U):
""" """
sed(U, "type[ ]*cyclic;", "type fixedValue;value uniform (0 0 0);")
def add_libOGL_so(controlDict):
with open(controlDict, "a") as ctrlDict_handle:
ctrlDict_handle.write('libs ("libOGL.so");')
def get_process(cmd):
try:
return check_output(cmd).decode("utf-8")
except Exception as e:
print(e)
def get_end_time(controlDict):
import re
ret = check_output(["grep", "endTime", controlDict])
ret = ret.decode("utf-8").replace(";", "").replace("\n", "")
ret = re.compile(r"[.0-9]+").findall(ret)
return ret[0]
def get_application_solver(controlDict):
ret = check_output(["grep", "application", controlDict])
return ret.decode("utf-8").split()[-1].replace(";", "")
def set_write_interval(controlDict, interval):
sed(
controlDict,
"^writeInterval[ ]*[0-9.]*;",
"writeInterval {};".format(interval),
)
def set_number_of_subdomains(decomposeParDict, subDomains):
print("setting number of subdomains", subDomains, decomposeParDict)
sed(
decomposeParDict,
"numberOfSubdomains[ ]*[0-9.]*;",
"numberOfSubdomains {};".format(subDomains),
)
def set_end_time(controlDict, endTime):
sed(controlDict, "^endTime[ ]*[0-9.]*;", "endTime {};".format(endTime))
def get_number_of_subDomains(case):
import os
_, folder, _ = next(os.walk(case))
return len([f for f in folder if "processor" in f])
def read_block(blockMeshDict):
import re
ret = check_output(["grep", "hex", blockMeshDict]).decode("utf-8")
num_cells = re.findall("[(][0-9 ]*[)]", ret)[1]
return list(map(int, re.findall("[0-9]+", num_cells)))
def read_deltaT(controlDict):
ret = (
check_output(["grep", "deltaT", controlDict])
.split()[-1]
.decode("utf-8")
.replace(";", "")
)
return float(ret)
def set_deltaT(controlDict, deltaT):
sed(controlDict, "deltaT[ ]*[0-9.]*", "deltaT {}".format(deltaT))
def set_writeInterval(controlDict, writeInterval):
sed(controlDict, "writeInterval[ ]*[0-9.]*", "writeInterval " + str(writeInterval))
def add_or_set_solver_settings(fvSolution, field, keyword, value):
# TODO check if keyword is already present
block = read_block_from_file(fvSolution, ['"' + field + '.*"{'], "}")
# clear_solver_settings(fvSolution, field)
block.insert(1, "{} {};\n".format(keyword["name"], value))
clean_block_from_file(fvSolution, [field + '.*"{'], "}\n", " ".join(block[:-1]))
def clear_solver_settings(fvSolution, field):
clean_block_from_file(
fvSolution,
[" {}\n".format(field), '"' + field + '.*"'],
" }\n",
field + "{}\n",
)
def ensure_path(path):
print("creating", path)
check_output(["mkdir", "-p", path])
|
[
"subprocess.check_output",
"os.walk",
"re.findall",
"re.compile"
] |
[((153, 230), 'subprocess.check_output', 'check_output', (["['sed', '-i', 's/' + in_reg_exp + '/' + out_reg_exp + '/g', fn]"], {}), "(['sed', '-i', 's/' + in_reg_exp + '/' + out_reg_exp + '/g', fn])\n", (165, 230), False, 'from subprocess import check_output\n'), ((2648, 2694), 'subprocess.check_output', 'check_output', (["['grep', 'endTime', controlDict]"], {}), "(['grep', 'endTime', controlDict])\n", (2660, 2694), False, 'from subprocess import check_output\n'), ((2877, 2927), 'subprocess.check_output', 'check_output', (["['grep', 'application', controlDict]"], {}), "(['grep', 'application', controlDict])\n", (2889, 2927), False, 'from subprocess import check_output\n'), ((5011, 5046), 'subprocess.check_output', 'check_output', (["['mkdir', '-p', path]"], {}), "(['mkdir', '-p', path])\n", (5023, 5046), False, 'from subprocess import check_output\n'), ((3622, 3635), 'os.walk', 'os.walk', (['case'], {}), '(case)\n', (3629, 3635), False, 'import os\n'), ((3828, 3860), 're.findall', 're.findall', (['"""[(][0-9 ]*[)]"""', 'ret'], {}), "('[(][0-9 ]*[)]', ret)\n", (3838, 3860), False, 'import re\n'), ((2770, 2791), 're.compile', 're.compile', (['"""[.0-9]+"""'], {}), "('[.0-9]+')\n", (2780, 2791), False, 'import re\n'), ((3751, 3795), 'subprocess.check_output', 'check_output', (["['grep', 'hex', blockMeshDict]"], {}), "(['grep', 'hex', blockMeshDict])\n", (3763, 3795), False, 'from subprocess import check_output\n'), ((3889, 3920), 're.findall', 're.findall', (['"""[0-9]+"""', 'num_cells'], {}), "('[0-9]+', num_cells)\n", (3899, 3920), False, 'import re\n'), ((2512, 2529), 'subprocess.check_output', 'check_output', (['cmd'], {}), '(cmd)\n', (2524, 2529), False, 'from subprocess import check_output\n'), ((3975, 4020), 'subprocess.check_output', 'check_output', (["['grep', 'deltaT', controlDict]"], {}), "(['grep', 'deltaT', controlDict])\n", (3987, 4020), False, 'from subprocess import check_output\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from synapse.federation.pdu_codec import (
PduCodec, encode_event_id, decode_event_id
)
from synapse.federation.units import Pdu
#from synapse.api.events.room import MessageEvent
from synapse.server import HomeServer
from mock import Mock
class PduCodecTestCase(unittest.TestCase):
def setUp(self):
self.hs = HomeServer("blargle.net")
self.event_factory = self.hs.get_event_factory()
self.codec = PduCodec(self.hs)
def test_decode_event_id(self):
self.assertEquals(
("foo", "bar.com"),
decode_event_id("<EMAIL>", "A")
)
self.assertEquals(
("foo", "bar.com"),
decode_event_id("foo", "bar.com")
)
def test_encode_event_id(self):
self.assertEquals("A@B", encode_event_id("A", "B"))
def test_codec_event_id(self):
event_id = "<EMAIL>"
self.assertEquals(
event_id,
encode_event_id(*decode_event_id(event_id, None))
)
pdu_id = ("aa", "bb.com")
self.assertEquals(
pdu_id,
decode_event_id(encode_event_id(*pdu_id), None)
)
def test_event_from_pdu(self):
pdu = Pdu(
pdu_id="foo",
context="rooooom",
pdu_type="m.room.message",
origin="bar.com",
ts=12345,
depth=5,
prev_pdus=[("alice", "bob.<EMAIL>")],
is_state=False,
content={"msgtype": u"test"},
)
event = self.codec.event_from_pdu(pdu)
self.assertEquals("<EMAIL>", event.event_id)
self.assertEquals(pdu.context, event.room_id)
self.assertEquals(pdu.is_state, event.is_state)
self.assertEquals(pdu.depth, event.depth)
self.assertEquals(["<EMAIL>"], event.prev_events)
self.assertEquals(pdu.content, event.content)
def test_pdu_from_event(self):
event = self.event_factory.create_event(
etype="m.room.message",
event_id="gargh_id",
room_id="rooom",
user_id="sender",
content={"msgtype": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals(event.event_id, pdu.pdu_id)
self.assertEquals(self.hs.hostname, pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
event = self.event_factory.create_event(
etype="m.room.message",
event_id="<EMAIL>",
room_id="rooom",
user_id="sender",
content={"msgtype": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals("gargh_id", pdu.pdu_id)
self.assertEquals("bob.com", pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
def test_event_from_state_pdu(self):
pdu = Pdu(
pdu_id="foo",
context="rooooom",
pdu_type="m.room.topic",
origin="bar.com",
ts=12345,
depth=5,
prev_pdus=[("alice", "bob.com")],
is_state=True,
content={"topic": u"test"},
state_key="",
)
event = self.codec.event_from_pdu(pdu)
self.assertEquals("<EMAIL>", event.event_id)
self.assertEquals(pdu.context, event.room_id)
self.assertEquals(pdu.is_state, event.is_state)
self.assertEquals(pdu.depth, event.depth)
self.assertEquals(["<EMAIL>"], event.prev_events)
self.assertEquals(pdu.content, event.content)
self.assertEquals(pdu.state_key, event.state_key)
def test_pdu_from_state_event(self):
event = self.event_factory.create_event(
etype="m.room.topic",
event_id="gargh_id",
room_id="rooom",
user_id="sender",
content={"topic": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals(event.event_id, pdu.pdu_id)
self.assertEquals(self.hs.hostname, pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
self.assertEquals(event.state_key, pdu.state_key)
|
[
"synapse.federation.pdu_codec.decode_event_id",
"synapse.federation.pdu_codec.PduCodec",
"synapse.server.HomeServer",
"synapse.federation.units.Pdu",
"synapse.federation.pdu_codec.encode_event_id"
] |
[((960, 985), 'synapse.server.HomeServer', 'HomeServer', (['"""blargle.net"""'], {}), "('blargle.net')\n", (970, 985), False, 'from synapse.server import HomeServer\n'), ((1065, 1082), 'synapse.federation.pdu_codec.PduCodec', 'PduCodec', (['self.hs'], {}), '(self.hs)\n', (1073, 1082), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n'), ((1836, 2029), 'synapse.federation.units.Pdu', 'Pdu', ([], {'pdu_id': '"""foo"""', 'context': '"""rooooom"""', 'pdu_type': '"""m.room.message"""', 'origin': '"""bar.com"""', 'ts': '(12345)', 'depth': '(5)', 'prev_pdus': "[('alice', 'bob.<EMAIL>')]", 'is_state': '(False)', 'content': "{'msgtype': u'test'}"}), "(pdu_id='foo', context='rooooom', pdu_type='m.room.message', origin=\n 'bar.com', ts=12345, depth=5, prev_pdus=[('alice', 'bob.<EMAIL>')],\n is_state=False, content={'msgtype': u'test'})\n", (1839, 2029), False, 'from synapse.federation.units import Pdu\n'), ((3691, 3889), 'synapse.federation.units.Pdu', 'Pdu', ([], {'pdu_id': '"""foo"""', 'context': '"""rooooom"""', 'pdu_type': '"""m.room.topic"""', 'origin': '"""bar.com"""', 'ts': '(12345)', 'depth': '(5)', 'prev_pdus': "[('alice', 'bob.com')]", 'is_state': '(True)', 'content': "{'topic': u'test'}", 'state_key': '""""""'}), "(pdu_id='foo', context='rooooom', pdu_type='m.room.topic', origin=\n 'bar.com', ts=12345, depth=5, prev_pdus=[('alice', 'bob.com')],\n is_state=True, content={'topic': u'test'}, state_key='')\n", (3694, 3889), False, 'from synapse.federation.units import Pdu\n'), ((1191, 1222), 'synapse.federation.pdu_codec.decode_event_id', 'decode_event_id', (['"""<EMAIL>"""', '"""A"""'], {}), "('<EMAIL>', 'A')\n", (1206, 1222), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n'), ((1305, 1338), 'synapse.federation.pdu_codec.decode_event_id', 'decode_event_id', (['"""foo"""', '"""bar.com"""'], {}), "('foo', 'bar.com')\n", (1320, 1338), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n'), ((1419, 1444), 'synapse.federation.pdu_codec.encode_event_id', 'encode_event_id', (['"""A"""', '"""B"""'], {}), "('A', 'B')\n", (1434, 1444), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n'), ((1744, 1768), 'synapse.federation.pdu_codec.encode_event_id', 'encode_event_id', (['*pdu_id'], {}), '(*pdu_id)\n', (1759, 1768), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n'), ((1590, 1621), 'synapse.federation.pdu_codec.decode_event_id', 'decode_event_id', (['event_id', 'None'], {}), '(event_id, None)\n', (1605, 1621), False, 'from synapse.federation.pdu_codec import PduCodec, encode_event_id, decode_event_id\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, dirname, exists
from os import makedirs, pardir
FOLDER_REAL_DATA = join(dirname(__file__), 'real_data')
FOLDER_SIMULATOR_INPUT = join(dirname(__file__), 'simulator_input')
FOLDER_REAL_DATA_ANALYSIS = join(FOLDER_REAL_DATA, 'analysis')
FOLDER_SIMULATOR_LOG = join(pardir, 'experiments/results')
# create the above folders if they don't exist yet
for folder in [FOLDER_REAL_DATA, FOLDER_SIMULATOR_INPUT, FOLDER_SIMULATOR_LOG, FOLDER_REAL_DATA_ANALYSIS]:
if not exists(folder):
makedirs(folder)
FILE_ANONYMIZED_DATASET = join(FOLDER_REAL_DATA, 'anonymized_dataset.csv')
FILE_REAL_LOG = join(FOLDER_REAL_DATA, 'transaction_log.csv')
FILE_SIMULATOR_LOG = join(FOLDER_SIMULATOR_LOG, 'transaction_log.csv')
def get_dataset(file):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param file:
:return:
"""
# get dataset from file
dataset01 = pd.read_csv(file)
# cast "date" column datetime objects
dataset01["Global_Date"] = pd.to_datetime(dataset01["Global_Date"])
dataset01["Local_Date"] = pd.to_datetime(dataset01["Local_Date"])
# for convenience split the dataset into non-fraud(0)/fraud(1)
dataset0 = dataset01[dataset01["Target"] == 0]
dataset1 = dataset01[dataset01["Target"] == 1]
# give the datasets names
dataset01.name = 'all'
dataset0.name = 'non-fraud'
dataset1.name = 'fraud'
return dataset01, dataset0, dataset1
def get_real_dataset():
file = join(FOLDER_REAL_DATA, 'transaction_log.csv')
return get_dataset(file)
def get_simulated_dataset(result_idx):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param data_source: where data comes from, type: str, value: 'real' or 'simulator'
:return:
"""
file = join(FOLDER_SIMULATOR_LOG, '{}_transaction_log.csv'.format(result_idx))
return get_dataset(file)
def get_real_data_stats():
datasets = get_real_dataset()
return get_data_stats(datasets)
def get_simulated_data_stats(result_idx):
datasets = get_simulated_dataset(result_idx)
return get_data_stats(datasets)
def get_data_stats(datasets):
data_stats_cols = ['all', 'non-fraud', 'fraud']
data_stats = pd.DataFrame(columns=data_stats_cols)
data_stats.loc['transactions'] = [d.shape[0] for d in datasets]
data_stats.loc['transactions/hour'] = [round(d['Local_Date'].apply(lambda x: x.hour).value_counts().sum()/24/366, 2) for d in datasets]
data_stats.loc['transactions/day'] = [round(d['Local_Date'].apply(lambda x: x.day).value_counts().sum() / 366, 2) for d in datasets]
data_stats.loc['transactions/week'] = [round(d['Local_Date'].apply(lambda x: x.week).value_counts().sum() / 52, 2) for d in datasets]
data_stats.loc['transactions/month'] = [round(d['Local_Date'].apply(lambda x: x.month).value_counts().sum() / 12, 2) for d in datasets]
data_stats.loc['cards'] = [len(d["CardID"].unique()) for d in datasets]
data_stats.loc['cards, single use'] = [sum(d["CardID"].value_counts() == 1) for d in datasets]
data_stats.loc['cards, multi use'] = [sum(d["CardID"].value_counts() > 1) for d in datasets]
cards_genuine = datasets[1]['CardID'].unique()
cards_fraud = datasets[2]['CardID'].unique()
data_stats.loc['fraud cards in genuine'] = ['-', '-', len(np.intersect1d(cards_genuine, cards_fraud)) / len(cards_fraud)]
data_stats.loc['first transaction'] = [min(d["Global_Date"]).date() for d in datasets]
data_stats.loc['last transaction'] = [max(d["Global_Date"]).date() for d in datasets]
data_stats.loc['min amount'] = [min(d["Amount"]) for d in datasets]
data_stats.loc['max amount'] = [max(d["Amount"]) for d in datasets]
data_stats.loc['avg amount'] = [np.average(d["Amount"]) for d in datasets]
data_stats.loc['num merchants'] = [len(d["MerchantID"].unique()) for d in datasets]
data_stats.loc['countries'] = [len(d["Country"].unique()) for d in datasets]
data_stats.loc['currencies'] = [len(d["Currency"].unique()) for d in datasets]
data_stats.loc['min trans/card'] = [min(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['max trans/card'] = [max(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['avg trans/card'] = [np.average(d["CardID"].value_counts()) for d in datasets]
return data_stats
def get_grouped_prob(group_by, col_name):
grouped_prob = get_dataset()[0].groupby([group_by, col_name]).size()
grouped_prob = grouped_prob.groupby(level=0).apply(lambda x: x / sum(x))
return grouped_prob
def get_transaction_dist(col_name):
""" calculate fractions of transactions for given column """
possible_vals = get_dataset()[0][col_name].value_counts().unique()
trans_count = pd.DataFrame(0, index=possible_vals, columns=['all', 'non-fraud', 'fraud'])
trans_count['all'] = get_dataset()[0][col_name].value_counts().value_counts()
trans_count['non-fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count['fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count = trans_count.fillna(0)
trans_count /= np.sum(trans_count.values, axis=0)
# save
trans_count.to_csv(join(FOLDER_SIMULATOR_INPUT, 'fract-dist.csv'.format(col_name)), index_label=False)
# print
print(col_name)
print(trans_count)
print("")
return trans_count
def plot_hist_num_transactions(trans_frac, col_name):
""" method to plot histogram of number of transactions for a column """
plt.figure(figsize=(10, 7))
for i in range(3):
plt.subplot(3, 1, i+1)
plt.bar(range(trans_frac.shape[0]), trans_frac.values[:, i], label=trans_frac.index[i])
plt.ylabel('num transactions')
if i == 2:
plt.xlabel(col_name)
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_hist'.format(col_name)))
plt.close()
def plot_bar_trans_prob(trans_frac, col_name, file_name=None):
""" method to plot bar plot of number of transactions for a column """
plt.figure()
bottoms = np.vstack((np.zeros(3), np.cumsum(trans_frac, axis=0)))
for i in range(trans_frac.shape[0]):
plt.bar((0, 1, 2), trans_frac.values[i], label=trans_frac.index[i], bottom=bottoms[i])
plt.xticks([0, 1, 2], ['all', 'non-fraud', 'fraud'])
h = plt.ylabel('%')
h.set_rotation(0)
plt.title("{} Distribution".format(col_name))
plt.legend()
if not file_name:
file_name = col_name
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_bar'.format(file_name)))
plt.close()
|
[
"numpy.sum",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"os.path.dirname",
"os.path.exists",
"numpy.cumsum",
"numpy.intersect1d",
"matplotlib.pyplot.xticks",
"numpy.average",
"matplotlib.pyplot.legend",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"os.makedirs",
"numpy.zeros",
"matplotlib.pyplot.xlabel"
] |
[((298, 332), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""analysis"""'], {}), "(FOLDER_REAL_DATA, 'analysis')\n", (302, 332), False, 'from os.path import join, dirname, exists\n'), ((357, 392), 'os.path.join', 'join', (['pardir', '"""experiments/results"""'], {}), "(pardir, 'experiments/results')\n", (361, 392), False, 'from os.path import join, dirname, exists\n'), ((631, 679), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""anonymized_dataset.csv"""'], {}), "(FOLDER_REAL_DATA, 'anonymized_dataset.csv')\n", (635, 679), False, 'from os.path import join, dirname, exists\n'), ((696, 741), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""transaction_log.csv"""'], {}), "(FOLDER_REAL_DATA, 'transaction_log.csv')\n", (700, 741), False, 'from os.path import join, dirname, exists\n'), ((763, 812), 'os.path.join', 'join', (['FOLDER_SIMULATOR_LOG', '"""transaction_log.csv"""'], {}), "(FOLDER_SIMULATOR_LOG, 'transaction_log.csv')\n", (767, 812), False, 'from os.path import join, dirname, exists\n'), ((170, 187), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (177, 187), False, 'from os.path import join, dirname, exists\n'), ((232, 249), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (239, 249), False, 'from os.path import join, dirname, exists\n'), ((1004, 1021), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1015, 1021), True, 'import pandas as pd\n'), ((1095, 1135), 'pandas.to_datetime', 'pd.to_datetime', (["dataset01['Global_Date']"], {}), "(dataset01['Global_Date'])\n", (1109, 1135), True, 'import pandas as pd\n'), ((1166, 1205), 'pandas.to_datetime', 'pd.to_datetime', (["dataset01['Local_Date']"], {}), "(dataset01['Local_Date'])\n", (1180, 1205), True, 'import pandas as pd\n'), ((1573, 1618), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""transaction_log.csv"""'], {}), "(FOLDER_REAL_DATA, 'transaction_log.csv')\n", (1577, 1618), False, 'from os.path import join, dirname, exists\n'), ((2327, 2364), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'data_stats_cols'}), '(columns=data_stats_cols)\n', (2339, 2364), True, 'import pandas as pd\n'), ((4864, 4939), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'possible_vals', 'columns': "['all', 'non-fraud', 'fraud']"}), "(0, index=possible_vals, columns=['all', 'non-fraud', 'fraud'])\n", (4876, 4939), True, 'import pandas as pd\n'), ((5253, 5287), 'numpy.sum', 'np.sum', (['trans_count.values'], {'axis': '(0)'}), '(trans_count.values, axis=0)\n', (5259, 5287), True, 'import numpy as np\n'), ((5637, 5664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (5647, 5664), True, 'import matplotlib.pyplot as plt\n'), ((5994, 6005), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6003, 6005), True, 'import matplotlib.pyplot as plt\n'), ((6150, 6162), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6160, 6162), True, 'import matplotlib.pyplot as plt\n'), ((6373, 6425), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 1, 2]', "['all', 'non-fraud', 'fraud']"], {}), "([0, 1, 2], ['all', 'non-fraud', 'fraud'])\n", (6383, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6434, 6449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {}), "('%')\n", (6444, 6449), True, 'import matplotlib.pyplot as plt\n'), ((6526, 6538), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6536, 6538), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6689), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6687, 6689), True, 'import matplotlib.pyplot as plt\n'), ((563, 577), 'os.path.exists', 'exists', (['folder'], {}), '(folder)\n', (569, 577), False, 'from os.path import join, dirname, exists\n'), ((587, 603), 'os.makedirs', 'makedirs', (['folder'], {}), '(folder)\n', (595, 603), False, 'from os import makedirs, pardir\n'), ((3853, 3876), 'numpy.average', 'np.average', (["d['Amount']"], {}), "(d['Amount'])\n", (3863, 3876), True, 'import numpy as np\n'), ((5696, 5720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(i + 1)'], {}), '(3, 1, i + 1)\n', (5707, 5720), True, 'import matplotlib.pyplot as plt\n'), ((5823, 5853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num transactions"""'], {}), "('num transactions')\n", (5833, 5853), True, 'import matplotlib.pyplot as plt\n'), ((6282, 6373), 'matplotlib.pyplot.bar', 'plt.bar', (['(0, 1, 2)', 'trans_frac.values[i]'], {'label': 'trans_frac.index[i]', 'bottom': 'bottoms[i]'}), '((0, 1, 2), trans_frac.values[i], label=trans_frac.index[i], bottom=\n bottoms[i])\n', (6289, 6373), True, 'import matplotlib.pyplot as plt\n'), ((5885, 5905), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col_name'], {}), '(col_name)\n', (5895, 5905), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6199), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6196, 6199), True, 'import numpy as np\n'), ((6201, 6230), 'numpy.cumsum', 'np.cumsum', (['trans_frac'], {'axis': '(0)'}), '(trans_frac, axis=0)\n', (6210, 6230), True, 'import numpy as np\n'), ((3426, 3468), 'numpy.intersect1d', 'np.intersect1d', (['cards_genuine', 'cards_fraud'], {}), '(cards_genuine, cards_fraud)\n', (3440, 3468), True, 'import numpy as np\n')]
|
import numpy as np
from numpy import linalg as la
import invprob.sparse as sparse
def fb_lasso(A, y, reg_param, iter_nb, x_ini=None, inertia=False, verbose=False):
''' Use the Forward-Backward algorithm to find a minimizer of:
reg_param*norm(x,1) + 0.5*norm(Ax-y,2)**2
Eventually outputs the functional values and support of the iterates
while running the method
reg_param is either a number, in which case we use it all along the iterations
or a sequence of size iter_nb
'''
# Manage optional input/output
if verbose: # Optional output
regret = np.zeros(iter_nb)
sparsity = np.zeros(iter_nb)
support = []
path = np.zeros((A.shape[1], iter_nb))
if x_ini is not None: # Optional initialization
x = x_ini
else:
x = np.zeros((A.shape[1], 1))
if isinstance(reg_param, (int, float)): # Fixed or not parameter
param = reg_param * np.ones(iter_nb)
else:
param = reg_param
if inertia:
alpha = [k/(k+3) for k in np.arange(iter_nb)] # asymptotically equivalent to Nesterov
else:
alpha = np.zeros(iter_nb) # no inertia
# The core of the algorithm
stepsize = 0.5 * 2 / (la.norm(A, 2)**2)
T = A.T@A
ATy = A.T@y
gradient = lambda x: x - stepsize*(T@x - ATy)
forward_backward = lambda x, param: sparse.soft_thresholding(gradient(x), param*stepsize)
x_old = x
for k in range(iter_nb):
if verbose:
regret[k] = 0.5 * la.norm(A@x - y, 2)**2 + param[k] * la.norm(x, 1)
support.append( tuple(np.where(np.abs(x) > 1e-15)[0]) )
sparsity[k] = len(support[k])
path[:, k] = x.reshape((x.shape[0]))
x, x_old = forward_backward( (1+alpha[k])*x - alpha[k]*x_old, param[k] ), x
# Output
if verbose:
details = {
"function_value": regret,
"iterate_support": support,
"iterate_sparsity": sparsity,
"iterate_path": path
}
return x, details
else:
return x
|
[
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.linalg.norm"
] |
[((628, 645), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (636, 645), True, 'import numpy as np\n'), ((665, 682), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (673, 682), True, 'import numpy as np\n'), ((719, 750), 'numpy.zeros', 'np.zeros', (['(A.shape[1], iter_nb)'], {}), '((A.shape[1], iter_nb))\n', (727, 750), True, 'import numpy as np\n'), ((844, 869), 'numpy.zeros', 'np.zeros', (['(A.shape[1], 1)'], {}), '((A.shape[1], 1))\n', (852, 869), True, 'import numpy as np\n'), ((1157, 1174), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (1165, 1174), True, 'import numpy as np\n'), ((968, 984), 'numpy.ones', 'np.ones', (['iter_nb'], {}), '(iter_nb)\n', (975, 984), True, 'import numpy as np\n'), ((1247, 1260), 'numpy.linalg.norm', 'la.norm', (['A', '(2)'], {}), '(A, 2)\n', (1254, 1260), True, 'from numpy import linalg as la\n'), ((1071, 1089), 'numpy.arange', 'np.arange', (['iter_nb'], {}), '(iter_nb)\n', (1080, 1089), True, 'import numpy as np\n'), ((1568, 1581), 'numpy.linalg.norm', 'la.norm', (['x', '(1)'], {}), '(x, 1)\n', (1575, 1581), True, 'from numpy import linalg as la\n'), ((1532, 1553), 'numpy.linalg.norm', 'la.norm', (['(A @ x - y)', '(2)'], {}), '(A @ x - y, 2)\n', (1539, 1553), True, 'from numpy import linalg as la\n'), ((1625, 1634), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1631, 1634), True, 'import numpy as np\n')]
|