text string | size int64 | token_count int64 |
|---|---|---|
import os
from fastapi import HTTPException
from fastapi_cloudauth.auth0 import Auth0CurrentUser
from app.api.models.auth0 import Auth0User
current_user = Auth0CurrentUser(
domain=os.environ["AUTH0_DOMAIN"],
client_id=os.environ["AUTH0_CLIENTID_FRONTEND"]
)
current_user.user_info = Auth0User
async def check_email_verified(user: Auth0User):
if not user.is_verified:
raise HTTPException(status_code=401, detail="User e-mail not verified")
| 466 | 159 |
N = int(input())
X = list(map(int, input().split()))
W = list(map(int, input().split()))
out = 0
for i in range(N):
out += X[i] * W[i]
print(round(out/sum(W),1))
| 167 | 74 |
import tcod as libtcod
import math
from components.item import Item
from render_functions import RenderOrder
class Entity:
def __init__(self, id, x, y, char, color, name, weight=0, blocks=False,
render_order = RenderOrder.CORPSE, fighter=None, ai=None,
item=None, inventory=None, stairs=None, level=None,
equipment=None, equippable=None, valuable=None, door=None,
animation=None, hunger=None, food=None, trap=None,
classification=[], sign=None, identity=None):
self.id = id
self.x = x
self.y = y
self.char = char
self.color = color
self.name = name
self.weight = weight
self.blocks = blocks
self.render_order = render_order
self.fighter = fighter
self.ai = ai
self.item = item
self.inventory = inventory
self.stairs = stairs
self.level = level
self.equipment = equipment
self.equippable = equippable
self.valuable = valuable
self.door = door
self.animation = animation
self.hunger = hunger
self.food = food
self.trap = trap
self.classification = classification
self.sign = sign
self.identity = identity
if self.fighter:
self.fighter.owner = self
if self.ai:
self.ai.owner = self
if self.item:
self.item.owner = self
if self.inventory:
self.inventory.owner = self
if self.stairs:
self.stairs.owner = self
if self.door:
self.door.owner = self
if self.level:
self.level.owner = self
if self.equipment:
self.equipment.owner = self
if self.equippable:
self.equippable.owner = self
if not self.item:
item = Item(1)
self.item = item
self.item.owner = self
if self.valuable:
self.valuable.owner = self
if self.animation:
self.animation.owner = self
if self.hunger:
self.hunger.owner = self
if self.sign:
self.sign.owner = self
if self.trap:
self.trap.owner = self
def __str__(self):
return "Entity \'{0}\' is represented by {1} at location ({2}, {3}).".format(self.name, self.char, self.x, self.y)
@property
def get_name(self):
if self.identity and not self.identity.identified:
return self.identity.name
return self.name
@property
def get_char(self):
if self.item and self.item.light_source:
return self.item.light_source.get_char
return self.char
@property
def get_color(self):
if self.identity and not self.identity.identified:
return self.identity.color
return self.color
def move(self, dx, dy):
self.x += dx
self.y += dy
def move_towards(self, target_x, target_y, game_map, entities):
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt((dx ** 2) + (dy ** 2))
if distance == 0:
return
dx = int(round(dx / distance))
dy = int(round(dy / distance))
if not (game_map.is_blocked(self.x + dx, self.y + dy) or
get_blocking_entities_at_location(entities, self.x + dx, self.y + dy)):
self.move(dx, dy)
def distance(self, x, y):
return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)
def move_astar(self, target, entities, game_map):
# Create a FOV map that has the dimensions of the map
fov = libtcod.map_new(game_map.width, game_map.height)
# Scan the current map each turn and set all the walls as unwalkable
for y1 in range(game_map.height):
for x1 in range(game_map.width):
libtcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight,
not game_map.tiles[x1][y1].blocked)
# Scan all the objects to see if there are objects that must be navigated around
# Check also that the object isn't self or the target (so that the start and the end points are free)
# The AI class handles the situation if self is next to the target so it will not use this A* function anyway
for entity in entities:
if entity.blocks and entity != self and entity != target:
# Set the tile as a wall so it must be navigated around
libtcod.map_set_properties(fov, entity.x, entity.y, True, False)
# Allocate a A* path
# The 1.41 is the normal diagonal cost of moving, it can be set as 0.0 if diagonal moves are prohibited
my_path = libtcod.path_new_using_map(fov, 1.41)
# Compute the path between self's coordinates and the target's coordinates
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
# Check if the path exists, and in this case, also the path is shorter than 25 tiles
# The path size matters if you want the monster to use alternative longer paths (for
# example through other rooms) if for example the player is in a corridor
# It makes sense to keep path size relatively low to keep the monsters from running around
# the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
# Find the next coordinates in the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
# Set self's coordinates to the next path tile
self.x = x
self.y = y
else:
# Keep the old move function as a backup so that if there are no paths
# (for example another monster blocks a corridor)
# it will still try to move towards the player (closer to the corridor opening)
self.move_towards(target.x, target.y, game_map, entities)
# Delete the path to free memory
libtcod.path_delete(my_path)
def flee_astar(self, predator, entities, game_map, safe_range):
target_locations = []
fov = libtcod.map_new(game_map.width, game_map.height)
for y1 in range(game_map.height):
for x1 in range(game_map.width):
libtcod.map_set_properties(fov, x1, y1, not game_map.tiles[x1][y1].block_sight,
not game_map.tiles[x1][y1].blocked)
for entity in entities:
if entity.blocks and entity != self and entity != predator:
libtcod.map_set_properties(fov, entity.x, entity.y, True, False)
my_path = libtcod.path_new_using_map(fov, 1.41)
# Compute the path between self's coordinates and the target's coordinates
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
# Check if the path exists, and in this case, also the path is shorter than 25 tiles
# The path size matters if you want the monster to use alternative longer paths (for
# example through other rooms) if for example the player is in a corridor
# It makes sense to keep path size relatively low to keep the monsters from running around
# the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
# Find the next coordinates in the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
# Set self's coordinates to the next path tile
self.x = x
self.y = y
else:
# Keep the old move function as a backup so that if there are no paths
# (for example another monster blocks a corridor)
# it will still try to move towards the player (closer to the corridor opening)
self.move_towards(target.x, target.y, game_map, entities)
# Delete the path to free memory
libtcod.path_delete(my_path)
def distance_to(self, other):
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt((dx ** 2) + (dy ** 2))
def get_blocking_entities_at_location(entities, destination_x, destination_y):
for entity in entities:
if entity.blocks and entity.x == destination_x and entity.y == destination_y:
if entity.fighter:
if entity.fighter.is_effect("invisible"):
return None
else:
return entity
else:
return entity
return None
def get_entities_at_location(entities, destination_x, destination_y):
found_entities = []
for entity in entities:
if entity and entity.x == destination_x and entity.y == destination_y:
found_entities.append(entity)
return found_entities
| 9,141 | 2,656 |
"""Tests RVK data source."""
from slub_docsa.data.load.rvk import read_rvk_subjects
def test_rvk_first_level_classes():
"""Check that there are 34 first level classes in RVK."""
assert len(list(read_rvk_subjects(depth=1))) == 34
| 240 | 91 |
from typing import Dict, List, TextIO, Optional, Set, Tuple
from overrides import overrides
import torch
from torch.nn.modules import Linear, Dropout
from torch.autograd import Variable
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp.nn.util import batched_index_select
from allennlp.training.metrics import SpanBasedF1Measure
from qasrl.modules.sentence_encoder import SentenceEncoder
from qasrl.modules.slot_sequence_generator import SlotSequenceGenerator
from qasrl.metrics.question_metric import QuestionMetric
@Model.register("qasrl_question")
class QuestionModel(Model):
def __init__(self, vocab: Vocabulary,
sentence_encoder: SentenceEncoder,
question_generator: SlotSequenceGenerator,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None):
super(QuestionModel, self).__init__(vocab, regularizer)
self._sentence_encoder = sentence_encoder
self._question_generator = question_generator
if self._sentence_encoder.get_output_dim() != self._question_generator.get_input_dim():
raise ConfigurationError(
("Input dimension of question generator (%s) must be " % self._question_generator.get_input_dim()) + \
("equal to the output dimension of the sentence encoder (%s)." % self._sentence_encoder.get_output_dim()))
self.metric = QuestionMetric(vocab, self._question_generator.get_slot_names())
def get_slot_names(self):
return self._question_generator.get_slot_names()
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
predicate_indicator: torch.LongTensor,
predicate_index: torch.LongTensor,
**kwargs):
# slot_name -> Shape: batch_size, 1
gold_slot_labels = self._get_gold_slot_labels(kwargs)
if gold_slot_labels is None:
raise ConfigurationError("QuestionModel requires gold labels for teacher forcing when running forward. "
"You may wish to run beam_decode instead.")
# Shape: batch_size, num_tokens, self._sentence_encoder.get_output_dim()
encoded_text, text_mask = self._sentence_encoder(text, predicate_indicator)
# Shape: batch_size, self._sentence_encoder.get_output_dim()
pred_rep = batched_index_select(encoded_text, predicate_index).squeeze(1)
# slot_name -> Shape: batch_size, slot_name_vocab_size
slot_logits = self._question_generator(pred_rep, **gold_slot_labels)
batch_size, _ = pred_rep.size()
# Shape: <scalar>
slot_nlls, neg_log_likelihood = self._get_cross_entropy(slot_logits, gold_slot_labels)
self.metric(slot_logits, gold_slot_labels, torch.ones([batch_size]), slot_nlls, neg_log_likelihood)
return {**slot_logits, "loss": neg_log_likelihood}
def beam_decode(self,
text: Dict[str, torch.LongTensor],
predicate_indicator: torch.LongTensor,
predicate_index: torch.LongTensor,
max_beam_size: int,
min_beam_probability: float,
clause_mode: bool = False):
# Shape: batch_size, num_tokens, self._sentence_encoder.get_output_dim()
encoded_text, text_mask = self._sentence_encoder(text, predicate_indicator)
# Shape: batch_size, self._sentence_encoder.get_output_dim()
pred_rep = batched_index_select(encoded_text, predicate_index).squeeze(1)
return self._question_generator.beam_decode(pred_rep, max_beam_size, min_beam_probability, clause_mode)
def get_metrics(self, reset: bool = False):
return self.metric.get_metric(reset=reset)
def _get_cross_entropy(self, slot_logits, gold_slot_labels):
slot_xes = {}
xe = None
for slot_name in self.get_slot_names():
slot_xe = F.cross_entropy(slot_logits[slot_name], gold_slot_labels[slot_name].squeeze(-1), reduction = "sum")
slot_xes[slot_name] = slot_xe
if xe is None:
xe = slot_xe
else:
xe = xe + slot_xe
return slot_xes, xe
def _get_gold_slot_labels(self, instance_slot_labels_dict):
# each of gold_slot_labels[slot_name] is of
# Shape: batch_size
gold_slot_labels = {}
for slot_name in self.get_slot_names():
if slot_name in instance_slot_labels_dict and instance_slot_labels_dict[slot_name] is not None:
gold_slot_labels[slot_name] = instance_slot_labels_dict[slot_name].unsqueeze(-1)
for slot_name in self.get_slot_names():
if slot_name not in instance_slot_labels_dict or instance_slot_labels_dict[slot_name] is None:
gold_slot_labels = None
return gold_slot_labels
| 5,459 | 1,680 |
from ._operator import PercentileOperator
from .maximum import Maximum
from .median import Median
from .minimum import Minimum
from .percentile import Percentile
from .quantile import Quantile
__all__ = [
"PercentileOperator",
"Maximum",
"Minimum",
"Median",
"Quantile",
"Percentile",
]
| 312 | 98 |
from typing import Any, List
import random
import time
import datetime
import numpy as np
import pandas as pd
import libkloudtrader.stocks as stocks
from libkloudtrader.exceptions import InvalidAlgorithmMode, EmptySymbolBucket, InvalidDataFeedType
from libkloudtrader.enumerables import Data_Types
import libkloudtrader.processing as processing
from libkloudtrader.logs import start_logger
import libkloudtrader.backtest as bt
import libkloudtrader.crypto as crypto
import libkloudtrader.analysis as analysis
from tqdm import tqdm
#pd.set_option('display.max_columns', None) # or 1000
#pd.set_option('display.max_rows', None) # or 1000
#pd.set_option('display.max_colwidth', -1) # or 199
logger = start_logger(__name__, ignore_module='libkloudtrader.analysis')
def run_backtest(strategy: str,
symbol_bucket: List[str],
data: str,
start: Any,
end: Any,
data_interval:str="1d",
preferred_price_point: str = 'close',
preferred_benchmark: str = 'SPY',
initial_capital: float = 100000,
commission: float = 0,
slippage=True):
"""Backtester function"""
try:
logger.info(
'Starting Backtest for {} from {} to {} with initial capital = {}'.
format(strategy.__name__, start, end, initial_capital))
data_to_backtest_on = Data_Types[data].value
for symbol in symbol_bucket:
data_batch = data_to_backtest_on(symbol=symbol,
start=start,
end=end,interval=data_interval
)
batch = processing.Buffer(len(data_batch), dtype=object)
backtest=bt.Backtest(capital=100000,commission=1,enable_slippage=True)
for datetime, bar in data_batch.iterrows():
batch.append(bar)
backtest.update_bar(datetime,bar)
data_batch = pd.DataFrame(batch)
locals()['strategy'](backtest,data_batch)
print(backtest.get_trade_log)
del backtest
'''
for symbol in symbol_bucket:
data_batch = data_to_backtest_on(symbol,
start,
end,
interval=data_interval)
for symbol in symbol_bucket:
a = bt.Backtest(locals()['strategy'](data_batch),
preferred_price_point)
print(a.preferred_price_point)
signals=locals()['strategy'](data_batch)
df=pd.DataFrame()
df['buy']=signals['buy']
df['sell']=signals['sell']
df['short']=signals['short']
df['cover']=signals['cover']
'''
#bt = Backtest(locals()['strategy'](data_batch), strategy.__name__)
#df=bt.signals
#df['positions']=bt.positions
#df['price']=bt.trades['price']
#df['trade volume']=bt.trades['vol']
#df['trade_price']=bt.trade_price
#df['equity']=bt.equity
#df['trades']=bt.trades
#df['positions in '+symbol]=100*df['positions']
#print(bt.trades)
#logger.info("Received Signals from {}".format(strategy.__name__))
except (KeyboardInterrupt, SystemExit):
print('\n')
logger.critical("User's keyboard prompt stopped {}".format(
strategy.__name__))
except Exception as exception:
logger.critical('Exiting {}...‼️'.format(strategy.__name__))
logger.error(
'Oops! Something went wrong while your algorithm was being backtested. ⚠️'
)
raise exception
exit()
#print(return_data_from_enum(a,symbol,start, end))
#print(locals()[a](symbol, start, end))
def run_live(strategy: str,
symbol_bucket: list,
data_feed_type: str,
exempted_states: list = [''],
exempted_days:list=[''],
exempted_dates:list=[''],
batch_size: int = 1000,
data_feed_delay: float = 1.0,
fake_feed: bool = False):
try:
logger.info("{} is now entering the live markets. 📈\n".format(
strategy.__name__))
[x.lower() for x in exempted_states]
[x.lower() for x in exempted_days]
if isinstance(symbol_bucket, list):
symbol_bucket = np.array(symbol_bucket)
elif type(symbol_bucket) not in (numpy.ndarray, list):
raise TypeError('Symbol bucket must be a list or numpy array')
if data_feed_type not in ('CRYPTO_live_feed', 'US_STOCKS_live_feed',
'CRYPTO_live_feed_level2'):
raise InvalidDataFeedType(
'This Data Feed is not available for live trading.'
)
if data_feed_type in ("CRYPTO_live_feed", 'CRYPTO_live_feed_level2'):
data_feed_delay = crypto.exchange_attribute('rateLimit')
data_feed = Data_Types[data_feed_type].value
while stocks.intraday_status()['state'] not in exempted_states: #and datetime.datetime.now().strftime("%A").lower() not in exempted_days:
batch = processing.Buffer(batch_size, dtype=object)
while len(batch) < batch_size:
for symbol in symbol_bucket:
batch.append(data_feed(symbol, fake_feed=fake_feed))
data_batch = pd.DataFrame(batch)
locals()['strategy'](data_batch)
if len(batch) == batch_size:
batch.popleft()
time.sleep(data_feed_delay / 1000)
except (KeyboardInterrupt, SystemExit):
print('\n')
logger.critical("User's keyboard prompt stopped {}".format(
strategy.__name__))
except Exception as exception:
logger.critical('Exiting {}...‼️'.format(strategy.__name__))
logger.error('Oops! Something went wrong ⚠️')
raise exception
exit()
'''
def generate_positions_and_handle_portfolio(symbol, signals, data, commission,
initial_capital, quantity):
try:
initial_capital = float(initial_capital)
positions = pd.DataFrame(index=signals.index).fillna(0.0)
positions['Positions in' + " " +
symbol] = (quantity * signals['signal']) + commission
portfolio = positions.multiply(data['close'], axis=0)
poss_diff = positions.diff()
portfolio['holdings'] = (positions.multiply(data['close'],
axis=0)).sum(axis=1)
return portfolio
except Exception as exception:
raise exception
'''
| 7,059 | 1,988 |
__all__ = ["NeoPixelColors"] | 28 | 12 |
#!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import numpy as np
import copy
import scipy.optimize
from skimage import filters
from skimage import morphology
from scipy import interpolate
from astropy.stats import biweight_location, mad_std
from collections import OrderedDict
import scipy.constants
import logging
import datetime
import astropy
import astropy.time
""" This is a library of functions that are called in the
wavelength calibration.
"""
def fgauss(x, center, sigma, amp):
"""A Gaussian function.
This is a standard Gaussian function.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
return(amp * np.exp(-((x - center) / sigma) ** 2.))
def fgauss_const(x, center, sigma, amp, offset):
"""Gaussian + offset function.
This is a Gaussian with a constant offset.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
offset : float or ndarray of float
Offset for the Gaussian
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = float(offset)
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset)
def fgauss_line(x, center, sigma, amp, offset, slope):
"""Gaussian + line function.
This is a Gaussian with a linear offset.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian
offset : float or ndarray of float
Offset for the Gaussian linear offset (y-intercept)
slope : float or ndarray of float
Slope for the Gaussian linear offset
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = float(offset)
slope = float(slope)
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset + x * slope)
def fgauss_from_1(x, center, sigma, amp):
"""Gaussian + offset function.
This is a Gaussian with a fixed offset (1 = continuum). Convenience function.
Parameters
----------
x : float or ndarray of float
Independent variable for the Gaussian
center : float or ndarray of float
Mean for the Gaussian
sigma : float or ndarray of float
Standard deviation (sigma) for the Gaussian
amp : float or ndarray of float
Amplitude of the Gaussian (negative to )
"""
center = float(center)
sigma = float(sigma)
amp = float(amp)
offset = 1.
return(float(amp) * np.exp(-((x - center) / sigma) ** 2.) + offset)
def discretize_oversample(func, x, *args, **kwargs):
"""Upsample a function.
This function enables discrete "upsampling" of a function
by an arbitrary (integer) factor.
Parameters
----------
func : function
The mathematical function to be upsampled. First argument must be
independent variable.
x : float or ndarray of float
Independent variable for the function prior to upsampling.
*args : list
Arguments to be passed to the mathematical function.
**kwargs : dict
Keywords for the upsampling. Includes "factor" as upsampling factor.
"""
if 'factor' in kwargs.keys():
factor = kwargs['factor']
else:
factor = 10
assert factor > 1
x1 = np.amin(x)
x2 = np.amax(x)
xx = np.arange(x1 - 0.5 * (1 - 1 / factor),
x2 + 0.5 * (1 + 1 / factor), 1. / factor) + 0.5 / factor
values = func(xx, *args)
values = np.reshape(values, (xx.size // factor, factor))
return(values.sum(axis=1) * 1. / factor)
def dfgauss(x, *args, **kwargs):
"""Upsampled Gaussian function.
This is an "upsampled" Gaussian, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss, x, *args, **kwargs))
def dfgauss_const(x, *args, **kwargs):
"""Upsampled Gaussian+constant function.
This is an "upsampled" Gaussian + constant, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude, offset
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss_const, x, *args, **kwargs))
def dfgauss_line(x, *args, **kwargs):
"""Upsampled Gaussian+line function.
This is an "upsampled" Gaussian + line, implemented for convenience of testing
fit sensitivity to choices about how to discretize the line function.
Parameters
----------
x : float or ndarray of float
Independent variable for Gaussian
*args : list
Arguments for gaussian, in order: mean, sigma, amplitude, offset
**kwargs : dict
Keywords for discretization, including: factor
"""
return(discretize_oversample(fgauss_line, x, *args, **kwargs))
def make_legendre(legendre_order, legfix, legbound):
"""Make a Legendre function.
This generates a Legendre function with arbitrary fixed/constrained
parameters. This is potentially useful for measuring and accounting
for wavelength calibration drift, if it affects some Legendre
coefficients and not others.
Parameters
----------
legendre_order : int
The order of the desired Legendre function (e.g. order 2 = 3
coefficients).
legfix : list of booleans
For each Legendre coefficient, is it fixed (True) or variable (False)
legbound : list
For each Legendre coefficient, a single value (for fixed coeffs) or
an arbtitrary placeholder.
Returns
-------
function
Legendre function with arguments (x,*args); x is independent variable
and *args are the fitted coefficients for the Legendre polynomial
"""
assert len(legbound) == (legendre_order + 1)
assert len(legfix) == (legendre_order + 1)
# build a flexible legendre function, isolating the fitted variables
# so curve_fit can digest
def flexible_Legendre(x, *args):
# make sure that the number of coefficients is adequate
assert len(args) == (legendre_order - np.count_nonzero(legfix) + 1)
# intialize the coefficients
coeffs = [0 for i in range(legendre_order + 1)]
iused = 0
# for each coefficient, feed the function its fixed value
# or acknowledge that it is a variable and move on
for i in range(legendre_order + 1):
if legfix[i]:
coeffs[i] = legbound[i]
else:
coeffs[i] = args[iused]
iused = iused + 1
return np.polynomial.legendre.legval(x, coeffs)
return flexible_Legendre
def rescale(x, oldmin, oldmax, newmin, newmax):
"""Linearly rescale and offset a series.
This function takes a series and linearly scales/offsets
it to a new domain. Useful for calling Legendre polynomial, e.g.
Parameters
----------
x : ndarray
Original series, float
oldmin : float
min of old domain
oldmax : float
max of old domain
newmin : float
min of new domain
newmax : float
max of new domain
Returns
-------
Standardized series.
"""
out = (newmax - newmin) / (oldmax - oldmin) * (x - oldmin) + newmin
return(out)
def fitProfile(inp_x, inp_y, fit_center_in, fit_width=8, sigma=None,
func='fgauss_const', return_residuals=False,p0=None,bounds=(-np.inf,np.inf)):
"""Perform a least-squares fit to a peak-like function.
Parameters
----------
inp_x : ndarray of float
x-values of line to be fit (full array; subset is
taken based on fit width)
inp_y : ndarray of float
y-values of line to be fit (full array; subset is
taken based on fit width)
fit_center_in : float
Index value of estimated location of line center;
used to select region for fitting
fit_width : int, optional
Half-width of fitting window. (the default is 8)
sigma : ndarray of float, optional
The standard error for each x/y value in the fit.
(the default is None, which implies an unweighted fit)
func : {'fgauss','fgauss_const','fgauss_line','fgauss_from_1'} , optional
The function to use for the fit. (the default is 'fgauss')
return_residuals : bool, optional
Output the fit residuals (the default is False)
p0 : list of first-guess coefficients. The fit can be quite sensitive to these
choices.
bounds : Directly sent to scipy.optimize.curve_fit()
Returns
-------
dict of fit parameters:
{'centroid': fitted centroid
'e_centroid': std error of fitted peak centroid (covar diagonals)
'sigma': fitted sigma of peak
'e_sigma': std error of fitted sigma of peak (covar diagonals)
'nanflag': are there NaNs present
'pcov': covariance array - direct output of optimize.curve_fit
'popt': parameter array - direct output of optimize.curve_fit
'function_used': function used for fitting
'tot_counts_in_line': simple sum of y-values in used line region
'fit_successful': bool, did the fit give a non-errored output?
'scale_value': scaling factor used to normalize y-values
'residuals': optional, differences btwn data and optimized model output}
"""
# select out the region to fit
# this will be only consistent to +- integer pixels
fit_center = copy.copy(fit_center_in)
xx_index = np.arange(len(inp_x))
assert len(inp_x) == len(inp_y)
j1 = int(np.round(np.amax([0, fit_center - fit_width])))
j2 = int(round(np.amin([np.amax(xx_index), fit_center + fit_width])))
# define sub-arrays to fit
sub_x1 = inp_x[j1:j2]
sub_y1 = inp_y[j1:j2]
tot_counts_in_line = float(np.nansum(sub_y1))
# normalize the sub-array
try:
scale_value = np.nanmax(sub_y1)
except ValueError as e:
print(e,j1,j2,sub_x1,sub_y1)
sub_y_norm1 = sub_y1 / scale_value
# select out the finite elements
ii_good = np.isfinite(sub_y_norm1)
sub_x = sub_x1[ii_good]
sub_y_norm = sub_y_norm1[ii_good]
if sigma is not None:
sub_sigma1 = sigma[j1:j2]
ii_good = np.isfinite(sub_y_norm1) & (np.isfinite(sub_sigma1))
sub_sigma = sub_sigma1[ii_good]
sub_y_norm = sub_y_norm1[ii_good]
else:
sub_sigma = None
# note whether any NaNs were present
if len(sub_x) == len(sub_x1):
nanflag = False
else:
nanflag = True
# set up initial parameter guesses, function names, and bounds.
# initial guess assumes that the gaussian is centered at the middle of the input array
# the sigma is "1" in x units
# the amplitude is -0.1.
# for the functions with an additional constant and line, the constant defaults to 1.
if func == 'fgauss':
if p0 is None:
p0 = (np.mean(sub_x), 5., -0.5)
use_function = fgauss
elif func == 'fgauss_const':
if p0 is None:
p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm), np.nanmedian(sub_y_norm))
use_function = fgauss_const
elif func == 'fgauss_line':
if p0 is None:
p0 = (np.mean(sub_x), 1., -0.5, 1., 0.)
use_function = fgauss_line
elif func == 'fgauss_from_1':
if p0 is None:
p0 = (np.mean(sub_x),1., -np.ptp(sub_y_norm))
use_function = fgauss_from_1
else:
raise ValueError
# perform the least squares fit
try:
popt, pcov = scipy.optimize.curve_fit(use_function,
sub_x,
sub_y_norm,
p0=p0,
sigma=sub_sigma,
maxfev=10000,
bounds=bounds)
# Pull out fit results
# fitted values (0 is the centroid, 1 is the sigma, 2 is the amp)
# lists used to facilitate json recording downstream
errs = np.diag(pcov)
centroid = popt[0]
centroid_error = np.sqrt(errs[0])
width = popt[1]
width_error = np.sqrt(errs[1])
fit_successful = True
pcov_list = pcov.tolist()
popt_list = popt.tolist()
except RuntimeError:
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except ValueError as e:
print('ValueError: {}'.format(e))
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except TypeError as e:
print('TypeError: {}'.format(e))
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
except:
print('unknown error')
errs = np.NaN
centroid = np.NaN
centroid_error = np.NaN
width = np.NaN
width_error = np.NaN
fit_successful = False
pcov_list = []
popt_list = []
if np.isnan(centroid_error) or np.isnan(centroid):
fit_successful = False
# build the returned dictionary
retval = {'centroid': centroid,
'e_centroid': centroid_error,
'sigma': width,
'e_sigma': width_error,
'nanflag': nanflag,
'pcov': pcov_list,
'popt': popt_list,
'indices_used': (j1, j2),
'function_used': func,
'tot_counts_in_line': tot_counts_in_line,
'fit_successful': fit_successful,
'scale_value':float(scale_value)}
# since residual array can be large, optionally include it
if return_residuals:
if fit_successful:
predicted = use_function(sub_x, *popt)
residuals = (predicted - sub_y_norm).tolist()
else:
residuals = np.NaN
retval['residuals'] = residuals
#return(retval['popt'][0], retval['popt'][1], retval['popt'][2], retval)
return(retval)
def generate_comb_wavelengths(mode_numbers,comb_f0,comb_fr):
""" Use the comb equation to generate known wavelengths.
Frequency comb equation f_n = f_0 + n * f_r used to define wavelengths for
arbitrary set of mode numbers.
Parameters
----------
mode_numbers : ndarray, int
Mode indices used; positive integers
comb_f0 : float
Offset frequency of comb in Hz
comb_fr : float
Repetition rate of comb in Hz
Returns
-------
ndarray
Comb wavelengths in angstroms
"""
# catch if a list is accidentally fed:
if isinstance(mode_numbers,list):
mode_numbers = np.ndarray(mode_numbers)
freqs = comb_f0 + comb_fr * mode_numbers
wavelengths_vac = scipy.constants.c / freqs
wavelengths_vac_angstrom = wavelengths_vac / 1e-10
return(wavelengths_vac_angstrom)
def bugfix_biweight_location(array,**kargs):
""" Temperory bug fix for biweight_location which returns nan for zero varience array """
array = array[~np.isnan(array)] # Remove any nans
if np.any(mad_std(array,**kargs)==0):
return np.median(array,**kargs)
else:
return biweight_location(array,**kargs)
def subtract_Continuum_fromlines(inputspec,refspec=None,thresh_mask=None,thresh_window=21,mask_dilation=2,spline_kind='cubic'):
""" Returns a smooth continuum subtracted `inputspec` . If `refspec` is provided, it is used to create the mask fo the continuum region.
"""
# Use inputspec for thersholding if refspec is not provided
if refspec is None:
refspec = inputspec
Xaxis = np.arange(len(refspec))
if thresh_mask is None:
# Create a mask for the emission lines
ThresholdMask = np.atleast_2d(refspec) > filters.threshold_local(np.atleast_2d(refspec), thresh_window,offset=0)
# Dilate the mask
ThresholdMask = morphology.binary_dilation(ThresholdMask,selem=np.array([[1]*mask_dilation+[1]+[1]*mask_dilation]))[0]
else:
ThresholdMask = thresh_mask
pix_pos_list = []
continuum_list = []
for sli in np.ma.clump_unmasked(np.ma.array(refspec,mask=ThresholdMask)):
pix_pos_list.append(np.mean(Xaxis[sli]))
continuum_list.append(bugfix_biweight_location(inputspec[sli]))
Continuum_Func = interpolate.interp1d(pix_pos_list,continuum_list,kind=spline_kind,fill_value='extrapolate')
Continuum = Continuum_Func(Xaxis)
outspec = inputspec - Continuum
return outspec, Continuum, ThresholdMask
def measure_peaks_order(wl,fl,peak_locs,xx=None,pix_to_wvl=None,pix_to_wvl_per_pix=None,fitfunc='fgauss_const',continuum_subtract=False,
continuum_subtract_kw={}):
""" Fit all peaks in an order
Parameters
----------
wl : float arr
ndarray of wavelengths
fl : flat arr
ndarray of fluxes
peak_locs : list
List of peak locations to center fit windows on
xx : None, optional
Index values to use for fits - defaults to pixel number
pix_to_wvl : None, optional
Function for translating pixel to wavelength
pix_to_wvl_per_pix : None, optional
Function for translating pixel to dispersion
fitfunc : str, optional
Function to fit to lines
continuum_subtract : bool, optional
Subtract background continuum (only typically for comb)
continuum_subtract_kw : dict, optional
Keywords to pass to continuum subtraction method
Returns
-------
measure_peaks_order OrderedDict of fit results
.. deprecated:: 0.1
This function is only retained for backwards-compatibility - use fit_lines_order instead
"""
if xx is None:
xx = np.arange(len(wl))
if not isinstance(peak_locs,dict):
peak_locs_dict = OrderedDict()
mode_names = range(len(peak_locs))
for mi in mode_names:
peak_locs_dict[mi] = peak_locs[mi]
else:
peak_locs_dict = copy.deepcopy(peak_locs)
out = OrderedDict()
if pix_to_wvl is None:
pix_to_wvl = scipy.interpolate.interp1d(xx,wl,kind='cubic',bounds_error=False)
if pix_to_wvl_per_pix is None:
dwl = np.diff(wl)
dwl = np.append(dwl,dwl[-1])
pix_to_wvl_per_pix = scipy.interpolate.interp1d(xx,dwl,kind='cubic',bounds_error=False)
if continuum_subtract:
fl_subtracted, _, _ = subtract_Continuum_fromlines(fl,*continuum_subtract_kw)
fl = fl_subtracted
for mi in peak_locs_dict.keys():
loc_this = peak_locs_dict[mi]
if fitfunc == 'fgauss_const':
p0 = [loc_this,2.5,1.,0.]
elif fitfunc == 'fgauss_line':
p0 = [loc_this,2.5,1.,0.,0.]
elif fitfunc == 'fgauss':
p0 = [loc_this,2.1,1.]
else:
raise ValueError
tmp = fitProfile(xx,fl,loc_this,fit_width=8,sigma=None,
func=fitfunc,p0=p0)
#tmp['centroid_wl'] = interp(tmp['centroid'],xx_pix,xx_test)
dwl_per_pix = pix_to_wvl_per_pix(tmp['centroid'])
centroid_pix = tmp['centroid']
centroid_wl = pix_to_wvl(centroid_pix)[()]
fwhm_pix = 2.36 * tmp['sigma']
fwhm_wl = fwhm_pix * dwl_per_pix
fwhm_vel = fwhm_wl / centroid_wl * 3e8
peak_counts = tmp['scale_value']
out1 = OrderedDict()
out1['fit_output'] = tmp
out1['centroid_pix'] = centroid_pix
out1['centroid_wl'] = centroid_wl
out1['fwhm_pix'] = fwhm_pix
out1['fwhm_wl'] = fwhm_wl
out1['snr_peak'] = np.sqrt(peak_counts)
out1['prec_est'] = 0.4 * fwhm_vel / (np.sqrt(fwhm_pix) * np.sqrt(peak_counts))
out[mi] = out1
return(out)
def fit_lines_order(xx,fl,peak_locs,sigma=None,wl=None,pix_to_wvl=None,pix_to_wvl_per_pix=None,fitfunction='fgauss_const',
fit_width_pix=8,basic_window_check=True):
""" Fit all peaks in an order
This is a wrapper for the fitProfile function to do the (often used) task of repeated fitting of many lines
in a single spectral order.
Parameters
----------
xx : ndarray of float
ndarray of pixel values
fl : ndarray of float
ndarray of fluxes
peak_locs : list or dict
List of peak locations to center fit windows on.
If dict, the peaks are labaled by their resspective keys.
If list, the peaks are given sequential labels.
sigma : optional, ndarray of float
ndarray of sigma values to send to fitter
wl : optional, ndarray of float
ndarray of wavelength values.
If not provided, no wavelength values are output.
pix_to_wvl : optional, function
Function for translating pixel to wavelength
If not provided, a cubic spline is used
pix_to_wvl_per_pix : optional, function
Function for translating pixel to dispersion
If not provided, a cubic spline is used
fitfunction : str, optional
Name of function to fit to lines, name must be accepted by fitProfile
fit_width_pix : int
Half-width of fitting window in pixels
basic_window_check : bool
Check whether the fitted centroid falls in given peak_loc +- fit_width_pix
Return NaNs if not
Returns
-------
OrderedDict of fit results. Each entry has (key,value) where
key = peak label as defined by input dictionary (or sequential labels if not provided)
value = OrderedDict of fit parameters as given by fitProfile.
.. note::
The interpolation functions are planned to be upgraded to a more stable form
(e.g. cumsum or PCHIP based)
"""
if not isinstance(peak_locs,dict):
peak_locs_dict = OrderedDict()
mode_names = range(len(peak_locs))
for mi in mode_names:
peak_locs_dict[mi] = peak_locs[mi]
else:
peak_locs_dict = copy.deepcopy(peak_locs)
out = OrderedDict()
# if we have a wavelength array, also translate the (d)pixels to (d)wavelengths
if wl is not None:
if pix_to_wvl is None:
pix_to_wvl = scipy.interpolate.interp1d(xx,wl,kind='cubic',bounds_error=False)
if pix_to_wvl_per_pix is None:
dwl = np.diff(wl)
dwl = np.append(dwl,dwl[-1])
pix_to_wvl_per_pix = scipy.interpolate.interp1d(xx,dwl,kind='cubic',bounds_error=False)
for mi in peak_locs_dict.keys():
loc_this = peak_locs_dict[mi]
if fitfunction == 'fgauss_const':
p0 = [loc_this,2.5,1.,0.]
elif fitfunction == 'fgauss_line':
p0 = [loc_this,2.5,1.,0.,0.]
elif fitfunction == 'fgauss':
p0 = [loc_this,2.1,1.]
else:
raise ValueError
try:
tmp = fitProfile(xx,fl,loc_this,fit_width=fit_width_pix,sigma=sigma,
func=fitfunction,p0=p0)
except (RuntimeError, ValueError, RuntimeWarning) as e:
tmp = OrderedDict()
tmp['fit_successful'] = False
tmp['sigma'] = np.NaN
tmp['scale_value'] = np.NaN
tmp['centroid'] = np.NaN
logging.warning(' ... ... Raised "{0}" on mode {1}'.format(e, mi))
#tmp['centroid_wl'] = interp(tmp['centroid'],xx_pix,xx_test)
centroid_pix = tmp['centroid']
if basic_window_check:
check_val = np.abs(loc_this - float(centroid_pix))
if check_val > fit_width_pix:
centroid_pix = np.nan
tmp['fit_successful'] = False
if wl is not None:
dwl_per_pix = pix_to_wvl_per_pix(tmp['centroid'])
centroid_wl = pix_to_wvl(centroid_pix)[()]
else:
dwl_per_pix = np.NaN
centroid_wl = np.NaN
fwhm_pix = 2.36 * tmp['sigma']
fwhm_wl = fwhm_pix * dwl_per_pix
fwhm_vel = fwhm_wl / centroid_wl * 3e8
peak_counts = tmp['scale_value']
if not tmp['fit_successful']:
fwhm_pix = np.NaN
fwhm_wl = np.NaN
fwhm_vel = np.NaN
peak_counts = np.NaN
out1 = OrderedDict()
out1['fit_output'] = tmp
out1['centroid_pix'] = centroid_pix
out1['centroid_wl'] = centroid_wl
out1['fwhm_pix'] = fwhm_pix
out1['fwhm_wl'] = fwhm_wl
out1['snr_peak'] = np.sqrt(peak_counts)
out1['prec_est'] = 0.4 * fwhm_vel / (np.sqrt(fwhm_pix) * np.sqrt(peak_counts))
#print(mi)
#if mi == 89:
# print(out1)
out[mi] = out1
return(out)
def redshift(x, vo=0., ve=0.,def_wlog=False):
"""
Doppler shift a wavelength array.
Parameters
----------
x : float or ndarray of float
The wavelengths to be shifted.
vo : optional, float
The velocity of the observer [m/s]. (the default is 0.)
ve : optional, float
The velocity of the emitter [m/s]. (the default is 0.)
def_wlog : bool, optional
Is the input in logarithmic wavelength? (the default is False)
Returns
-------
float or ndarray of float
The emitted wavelength(s).
"""
if np.isnan(vo):
vo = 0 # propagate nan as zero
a = (1.0+vo/scipy.constants.c) / (1.0+ve/scipy.constants.c)
if def_wlog:
return x + np.log(a) # logarithmic
#return x + a # logarithmic + approximation v << c
else:
return x * a
#return x / (1.0-v/c)
def datetime_avg(dates):
''' Return the average time for a list of datetime objects
Parameters
----------
dates : list of datetimese
Returns
-------
datetime
Average datetime
'''
reference_date = datetime.datetime(1900, 1, 1)
if len(dates) == 0:
return None
else:
return(reference_date + sum([date - reference_date for date in dates],
datetime.timedelta()) / len(dates))
def getData(dataObj,fiber,choice,justext=False):
""" Helper function to retrieve level 1 data from neidData object
Parameters
----------
neidDataObj : neidData instance
neidData object to be parsed
fiber: str {SCI, CAL, or SKY}
fiber to be returned
choice: str {flux, wave, var}
type of data array to be returned
justext: optional, bool
only return extension number for fiber. Default is False
Return
------
register name or data value, depending on justname
"""
#Check input validity
if fiber.upper() not in ['SCI','CAL','SKY']:
raise ValueError('fiber must be SCI / CAL / SKY, not "{}"'.format(fiber))
if choice.lower() not in ['flux','var','wave']:
raise ValueError('choice must be flux / var / wave, not "{}"'.format(choice))
cf = fiber.upper()
cc = choice.lower()
if cf == 'SCI' and cc == 'flux':
ext = 1
elif cf == 'SCI' and cc == 'var':
ext = 4
elif cf == 'SCI' and cc == 'wave':
ext = 7
elif cf == 'CAL' and cc == 'flux':
ext = 3
elif cf == 'CAL' and cc == 'var':
ext = 6
elif cf == 'CAL' and cc == 'wave':
ext = 9
elif cf == 'SKY' and cc == 'flux':
ext = 2
elif cf == 'SKY' and cc == 'var':
ext = 5
elif cf == 'SKY' and cc == 'wave':
ext = 8
if justext:
return(ext)
else:
return(dataObj[ext].data)
def pool_measure_velshift(cmdset):
""" Parallelization wrapper for measure_velshift
This function translates a list of dicts into the appropriate kw/args for
the measure_velshift function. This facilitates parallelization with the
method adopted in the NEID-DRP.
Parameters
----------
cmdset : list
Each element is a dctionary with keys "precal", "postcal" and "wcal"
Returns
-------
velocity_shift : list of float
List of velocity shifts corresponding to input line fits.
"""
out = measure_velshift(cmdset['Precal'],cmdset['Postcal'],cmdset['Wcal'])
return out
def measure_velshift(fits1,fits2,wave,pix_to_wavl_funcs=None):
''' Measure the velocity shift between two collections of mode fits
Assuming a certain wavelength solution, translate the pixel position
change of each mode into a velocity.
Parameters
----------
fits1 : Centroid dictionary
indexed as dict[spectrum_index][order_index][mode_index]
the stored value is just the pixel centroid (for start of night)
fits2 : Centroid dictionary
indexed as dict[spectrum_index][order_index][mode_index]
the stored value is just the pixel centroid (for start of night)
wave : ndarray
Wavecal array
pix_to_wavl_funcs : optional wavelength solution function
How to translate pixel to wavelength.
If not provided, a cubic spline is used.
Returns
-------
List of float
List of velocity differences corresponding to each line fit in the "fits" inputs.
Notes
-----
This function requires that the two fit dictionaries have the same indexing. If order or mode index
keys are missing, the function will return None.
'''
allvals = []
if pix_to_wavl_funcs is None:
xx = np.arange(9216)
pix_to_wavl_funcs = OrderedDict()
for oi in fits1.keys():
pix_to_wavl_funcs[oi] = scipy.interpolate.interp1d(xx,wave[oi],kind='cubic',bounds_error=False)
for oi in fits1.keys():
if oi not in fits2.keys():
logging.warning('Order index {} is not shared between the fit sets being compared'.format(oi))
return(None)
for mi in fits1[oi].keys():
if mi not in fits2[oi].keys():
logging.warning('Order index {}, mode {} is not shared between the fit sets being compared'.format(oi,mi))
return(None)
pix_to_wavl = pix_to_wavl_funcs[oi]
pix1 = fits1[oi][mi]
pix2 = fits2[oi][mi]
wavl1 = pix_to_wavl(pix1)
wavl2 = pix_to_wavl(pix2)
diff = (wavl2 - wavl1)/wavl1 * 3e8
allvals.append(diff)
return(allvals)
def combine_peak_locations(fitlist):
''' Combine fit of pixel centroids for etalon or comb
Parameters
----------
fitlist : list of fit dictionaries
Each fit dictionary is indexed as dict[spectrum_index][order_index][mode_index]
and the stored value (itself also a dict) must have the 'centroid_pix' key
Returns
-------
velocity_shift : list of float
List of velocity shifts corresponding to input line fits.
'''
nvals = len(fitlist)
out = OrderedDict() #fitlist[0]
for oi in fitlist[0].keys():
out[oi] = OrderedDict()
for oi in fitlist[0].keys():
for mi in fitlist[0][oi].keys():
vals = []
for si in range(nvals):
vals.append(fitlist[si][oi][mi]['centroid_pix'])
out[oi][mi] = astropy.stats.biweight_location(vals,ignore_nan=True)
return(out)
def getLFCf0offset(isot,offsetfile):
''' Return the manual f0 LFC offset
This function reads a master file that has as columns:
effective_date_1[isot] f0_offset_1[hz]
effective_date_2[isot] f0_offset_2[hz]
interpreted as time between effective_date_1 - effective_date_2 has f0_offset_1
(i.e. f0 offset for that row applies beginning at the effective date)
Assumption is that last row on the table applies currently
and there is a 0th row that precedes all observations.
Parameters
----------
isot : str [isot]
Date of current wavecal.
offsetfile : str
Path to file described above.
Returns
-------
f0_offset : float
LFC offset frequency correction corresponding to input date.
'''
offsetTable = np.genfromtxt(offsetfile,
names=('isot','offset'),
dtype=[('isot','U19'),('offset',float)],
delimiter=None)
this_dt = astropy.time.Time(isot).to_datetime()
for i in range(len(offsetTable)):
compare_dt = astropy.time.Time(offsetTable['isot'][i]).to_datetime()
if compare_dt < this_dt:
offsetUse = offsetTable['offset'][i]
else:
break
return(offsetUse)
| 34,164 | 10,936 |
import json
import os
from itertools import chain, groupby
from operator import itemgetter, sub
from urllib.parse import urlparse
import git
import numpy as np
import validators
from mdutils.mdutils import MdUtils
LCS_REPO_NAME = 'learning-computer-science'
def walk_max_depth(top, maxdepth):
dirs, nondirs = [], []
for entry in os.scandir(top):
(dirs if entry.is_dir() else nondirs).append(entry.path)
yield top, dirs, nondirs
if maxdepth > 1:
for path in dirs:
for x in walk_max_depth(path, maxdepth - 1):
yield x
def find_files():
"""Return the list of files to process."""
result = {}
root_dir = "./repositories"
cwd = os.getcwd()
#print(os.listdir(root_dir))
for root, dirs, files in walk_max_depth(root_dir, 2):
dirs.sort()
for file in files:
if file.endswith("metadata.json"):
metadatafile = os.path.normpath(os.path.join(cwd, file))
metadata_file = open(metadatafile)
metadata = json.load(metadata_file)
result[os.path.normpath(root)] = (metadatafile, metadata)
return result
def get_submodules(files):
submodules_result = {}
cwd = os.getcwd()
for key, value in files.items():
repo = git.Repo(key)
for submodule in repo.submodules:
path_to_submodule_part = os.path.normpath(
os.path.join(key, submodule.path))
path_to_metadata_file = os.path.normpath(
os.path.join(cwd, path_to_submodule_part, 'metadata.json'))
metadata_file = open(path_to_metadata_file)
metadata = json.load(metadata_file)
submodules_result[path_to_submodule_part] = (submodule.url,
metadata)
return dict(
chain.from_iterable(d.items() for d in (files, submodules_result)))
def get_data(files):
data = []
for key, value in files.items():
data_dict = {}
data_dict['type'] = value[1]['type']
data_dict['name'] = value[1]['name']
valid = validators.url(value[0])
if valid == True:
parse_object = urlparse(value[0])
url_paths = parse_object.path.split('/')
repo_name = url_paths[-1]
else:
local_path_parts = value[0].split(os.path.sep)
repo_name = local_path_parts[-2]
data_dict[
'url'] = f'https://github.com/computer-science-engineering/{repo_name}'
if data_dict['type'] == 'Reading':
data_dict['reading_sub_header'] = get_reading_sub_header(value[1])
data.append(data_dict)
return data
def create_file(files):
data = get_data(files)
md_file = MdUtils(file_name='repositories')
md_file.new_header(level=1, title='Repositories')
grouped_by_type = groupby(data, key=itemgetter('type'))
for key, value in grouped_by_type:
value_sorted = sorted(value, key=lambda x: x['name'])
md_file.new_header(level=2, title=key)
if key == 'Reading':
write_reading_entries(value_sorted, md_file)
else:
for item in value_sorted:
write_item(item, md_file)
md_file.new_line()
md_file.create_md_file()
def write_reading_entries(data, md_file):
grouped_by_sub_heading = groupby(data,
key=itemgetter('reading_sub_header'))
for key, value in grouped_by_sub_heading:
md_file.new_header(level=3, title=key)
for item in value:
write_item(item, md_file)
def write_item(item, md_file):
md_file.new_line(
'- ' + md_file.new_inline_link(link=item['url'], text=item['name']))
def get_reading_sub_header(file):
if 'origin' in file and 'name' in file['origin'] and len(
file['origin']['name']) > 0:
if 'Notes - ' in file['origin']['name']:
return file['origin']['name'].replace('Notes - ', '')
def main():
"""main method."""
files = find_files()
files_including_submodules = get_submodules(files)
create_file(files_including_submodules)
if __name__ == '__main__':
main()
| 4,215 | 1,341 |
# Calculator for calculating x and y intercepts
xaxis = int(input("Enter the x axis(excluding the variable): "))
yaxis = int(input("Enter the y axis(excluding the variable): "))
equality = int(input("What is the equation equal to: "))
# example 3x + 4y = 6
# x - intercept can be found out by supposing y = 0
# 3x = 6
# x = 6/3
# x = 2
def x_intercept():
xint = equality / xaxis
printansx = str(xint)
print("x-intercept = " + printansx)
def y_intercept():
yint = equality /yaxis
printansy = str(yint)
print("y-intercept = " + printansy)
x_intercept()
y_intercept() | 625 | 238 |
#!/usr/bin/python
import RPi.GPIO as GPIO
import pygame
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
from time import sleep
pygame.init()
currState = False
prevState = False
while True:
try:
sleep(0.1)
prevState = currState
currState = GPIO.input(7)
if currState != prevState:
newState = "HIGH" if currState else "LOW"
print "GPIO pin %s is %s" % (7, newState)
GPIO.output(18, True)
sleep(1)
pygame.mixer.music.load("/home/pi/halloween_scarer/zombie_roar.ogg")
pygame.mixer.music.play()
sleep(5)
GPIO.output(18, False)
sleep(5)
except (KeyboardInterrupt, SystemExit):
GPIO.cleanup()
print ("Keyboard stop")
exit()
except:
# report error and proceed
GPIO.cleanup()
print ("FUBAR")
exit()
| 953 | 345 |
CONTROL_CODES = {
"Questions": 456,
"Funny": 58917,
"Gaming": 40358,
"Movies": 46273,
"Science": 2090,
"Politics": 16360,
"Technologies": 32516,
"Books": 6665,
"Links": 63674,
"Reviews": 41671,
"News": 4256
}
| 253 | 152 |
from flask import send_from_directory
from sqlalchemy.exc import ProgrammingError
from emonitor.utils import Module
from emonitor.extensions import babel
from emonitor.modules.messages.messages import Messages
from emonitor.modules.messages.message_weather import WeatherWidget
from emonitor.modules.messages.message_base import MessageWidget
from emonitor.modules.messages.content_admin import getAdminContent, getAdminData
from emonitor.modules.messages.content_frontend import getFrontendContent, getFrontendData
exttypes = []
def addMessageType(item):
"""add external messagetype to local type variable"""
exttypes.append(item)
class MessagesModule(Module):
info = dict(area=['admin', 'frontend', 'widget'], name='messages', path='messages', icon='fa-newspaper-o', version='0.1')
def __repr__(self):
return "messages"
def __init__(self, app):
Module.__init__(self, app)
# add template path
app.jinja_loader.searchpath.append("%s/emonitor/modules/messages/templates" % app.config.get('PROJECT_ROOT'))
# subnavigation
self.adminsubnavigation = [('/admin/messages', 'messages.main'), ('/admin/messages/types', 'messages.types')]
self.widgets = [MessageWidget('messages', size=(4, 2), template='widget.message.messages.html'), WeatherWidget('weather', size=(5, 4), template='widget.message.weather.html')]
# static folders
@app.route('/messages/inc/<path:filename>')
def messages_static(filename):
if filename.startswith('message/'):
filename = filename.split('/')
return send_from_directory("{}messages/{}/".format(app.config.get('PATH_DATA'), filename[-2]), filename[-1])
else:
return send_from_directory("%s/emonitor/modules/messages/inc/" % app.config.get('PROJECT_ROOT'), filename)
# translations
babel.gettext(u'module.messages')
babel.gettext(u'messages.main')
babel.gettext(u'messages.types')
babel.gettext(u'weather')
babel.gettext(u'messages')
babel.gettext(u'messagestate.1') # activated
babel.gettext(u'messagestate.0') # deactivated
babel.gettext(u'message.state.1') # active
babel.gettext(u'message.state.0') # in-active
# init
# Do init script for messages at start and add active messages
try:
Messages.initMessageTrigger()
except ProgrammingError:
pass
def frontendContent(self):
return 1
def getFrontendContent(self, **params):
return getFrontendContent(**params)
def getFrontendData(self):
return getFrontendData(self)
def getAdminContent(self, **params):
return getAdminContent(self, **params)
def getAdminData(self, **params):
return getAdminData(self, **params)
| 2,872 | 837 |
import imagehash
def matrix_slice(l, dim, start, end):
# start and end are tuples
startX, startY = start
endX, endY = end
res = []
print(f'len l is {len(l)}')
for y in range(startY, endY):
for x in range(startX, endX):
i = x + (y*dim)
print(f'{x}, {y} ({i}) = {l[i]}')
res.append(l[i])
return res
class SymbolicHash:
def __init__(self, binary_array, dim=8):
if isinstance(binary_array, imagehash.ImageHash):
binary_array = binary_array.hash
self.full = imagehash.ImageHash(binary_array)
self.center = self._imagehash_slice(binary_array, (1, 1), (dim-1, dim-1))
corners = [
[(0, 0), (dim-2, dim-2)],
[(0, 1), (dim-2, dim-1)],
[(0, 2), (dim-2, dim)],
[(1, 0), (dim-1, dim-2)],
[(1, 1), (dim-1, dim-1)],
[(1, 2), (dim-1, dim)],
[(2, 0), (dim, dim-2)],
[(2, 1), (dim, dim-1)],
[(2, 2), (dim, dim)],
]
self.corners = [self._imagehash_slice(binary_array, *c) for c in corners]
def _imagehash_slice(self, binary_array, start, end):
startX, startY = start
endX, endY = end
res = binary_array[startX:endX, startY:endY]
return imagehash.ImageHash(res)
def __hash__(self):
return hash(self.full)
def __eq__(self, other):
return self - other == 0 # for now...
def __sub__(self, other):
# compare both centers to everything
# compare both full
mind = self.full - other.full
mind = min(mind, self.center - other.center)
for c in other.corners:
mind = min(mind, self.center - c)
for c in self.corners:
mind = min(mind, other.center - c)
return mind
def symhash(img, size=8):
baseline = imagehash.average_hash(img, size)
return SymbolicHash(baseline, size)
| 1,950 | 689 |
# Um programa que vai dar um aumento para um determinado funcionário, sendo necessário informar o
# preço e a porcetagem #
x = float(input('Veja o aumento do seu funcionário!\nDigite o salário dele: R$'))
y = float(input('Digite agora a porcetagem: '))
p = x+(x*(y/100))
print(f'O salário do seu funcionário vai para: R${p:.2f}') | 330 | 123 |
import random
import uuid
import factory
from django.contrib.auth import get_user_model
from applications.enums import ApplicationStatus
from applications.models import Application, SummerVoucher
from companies.tests.factories import CompanyFactory
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = get_user_model()
username = factory.Sequence(lambda n: "user_%s" % uuid.uuid4())
email = factory.Faker("email")
first_name = factory.Faker("first_name")
last_name = factory.Faker("last_name")
class SummerVoucherFactory(factory.django.DjangoModelFactory):
summer_voucher_id = factory.Faker("md5")
contact_name = factory.Faker("name")
contact_email = factory.Faker("email")
work_postcode = factory.Faker("postcode")
employee_name = factory.Faker("name")
employee_school = factory.Faker("lexify", text="????? School")
employee_ssn = factory.Faker("bothify", text="######-###?")
employee_phone_number = factory.Faker("phone_number")
is_unnumbered_summer_voucher = factory.Faker("boolean")
unnumbered_summer_voucher_reason = factory.Faker("text")
class Meta:
model = SummerVoucher
class ApplicationFactory(factory.django.DjangoModelFactory):
company = factory.SubFactory(CompanyFactory)
summer_vouchers = factory.RelatedFactoryList(
SummerVoucherFactory,
factory_related_name="application",
size=lambda: random.randint(1, 3),
)
status = factory.Faker("random_element", elements=ApplicationStatus.values)
invoicer_name = factory.Faker("name")
invoicer_email = factory.Faker("email")
invoicer_phone_number = factory.Faker("phone_number")
class Meta:
model = Application
| 1,744 | 531 |
from django.contrib import admin
from .models import Census
class CensusAdmin(admin.ModelAdmin):
list_display = ('voting_id', 'voter_id')
list_filter = ('voting_id', )
search_fields = ('voter_id', )
admin.site.register(Census, CensusAdmin)
| 258 | 87 |
import frappe
from datetime import *
@frappe.whitelist()
def add_leave_encashment(doc, method):
from_date = (datetime.strptime(doc.start_date, "%Y-%m-%d")).date()
to_date = (datetime.strptime(doc.end_date, "%Y-%m-%d")).date()
salary_structure = frappe.db.sql(""" SELECT * FROM `tabSalary Structure Assignment` WHERE salary_structure=%s and employee=%s""",(doc.salary_structure,doc.employee),as_dict=1)
amount = 0
leave = 0
while (from_date <= to_date):
leave_application = get_leave_application(from_date, doc.employee)
if len(leave_application) > 0:
leave += 1
from_date = (from_date + timedelta(days=1))
reg = 30 - leave
doc.total_leaves = leave
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """,doc.employee,as_dict=1)[0].leave_balance)
quarters = [{"quarter":"First Quarter", "days": 90}, {"quarter":"Second Quarter", "days": 60}, {"quarter":"Third Quarter", "days": 30}, {"quarter":"Fourth Quarter", "days": 0}]
for i in quarters:
if remaining_leaves > i.get("days") and leave > 0:
leave_deduction = remaining_leaves - i.get("days") #90 - 60
if leave_deduction >= leave:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave
remaining_leaves = remaining_leaves - leave
leave = 0
else:
leave_type = get_leave_type("Sick Leave", i.get("quarter"))
amount += ((leave_type[0].percentage / 100) * (salary_structure[0].base / 30)) * leave_deduction
remaining_leaves = remaining_leaves - leave
leave -= leave_deduction
add = True
for ii in doc.earnings:
if ii.__dict__['salary_component'] == "Basic":
add = False
ii.__dict__['amount'] = amount + ((salary_structure[0].base / 30) * reg)
if amount > 0 and add:
doc.append("earnings", {
"salary_component": "Basic",
"amount": amount + ((salary_structure[0].base / 30) * reg)
})
doc.remaining_leaves = remaining_leaves - leave
def update_leave_employee(leave,employee):
frappe.db.sql(""" UPDATE tabEmployee SET leave_balance=%s WHERE name=%s""",(str(leave),employee))
frappe.db.commit()
def get_leave_application(from_date, employee):
query = """ SELECT * FROM `tabLeave Application` WHERE '{0}' BETWEEN from_date and to_date and employee='{1}' and status='{2}' """.format(str(from_date), employee, "Approved")
return frappe.db.sql(query, as_dict=1)
def get_leave_balances(name):
query = """ SELECT * FROM `tabLeave Balances` WHERE parent='{0}' ORDER BY idx DESC """.format(name)
return frappe.db.sql(query, as_dict=1)
def get_leave_type(leave_type, quarter):
return frappe.db.sql(""" SELECT * FROM `tabLeave Type Quarter Percentages` AS LTQP WHERE parent=%s and LTQP.type=%s""", (leave_type,quarter), as_dict=True)
def submit_salary_slip(doc, method):
update_leave_employee(doc.remaining_leaves, doc.employee)
def cancel_salary_slip(doc, method):
remaining_leaves = int(frappe.db.sql(""" SELECT * FROM `tabEmployee` WHERE name=%s """, doc.employee, as_dict=1)[0].leave_balance)
update_leave_employee(remaining_leaves + doc.total_leaves, doc.employee)
| 3,417 | 1,167 |
import os
import shutil
from setuptools import setup, find_packages
from setuptools.command.install import install
from setuptools.command.test import test
version = '0.1'
class InstallScripts(install):
"""
install scripts
"""
def run(self):
install.run(self)
class TestCommand(test):
"""
test cases
"""
def run_tests(self):
os.system("py.test-%s %s" % (3, "tests"))
s = setup(name='atune_collector',
version=version,
description="The tool for data collection and analysis",
classifiers=[],
keywords='collection analysis',
url='',
license='MulanPSL-2.0',
packages=find_packages(".", exclude=['tests']),
data_files=[('/etc/atune_collector', ['atune_collector/collect_data.json',
'atune_collector/plugin/configurator/bootloader/grub2.json'])],
include_package_data=True,
zip_safe=False,
install_requires=['dict2xml'],
cmdclass={
'install': InstallScripts,
'test': TestCommand,
},
)
if 'install' in s.command_obj:
src_dir = "atune_collector/scripts"
dst_dir = os.path.join(s.command_obj['install'].install_lib, src_dir)
shutil.rmtree(dst_dir, ignore_errors=True)
shutil.copytree(src_dir, dst_dir)
os.system("chmod -R 750 %s" % dst_dir)
| 1,427 | 442 |
import os
import random
import cv2
import detectron2.utils.comm as comm
from detectron2 import model_zoo
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, build_detection_train_loader, get_detection_dataset_dicts, \
MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch, DefaultPredictor
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
from detectron2.utils.logger import setup_logger
from detectron2.data.datasets import register_coco_instances
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
import logging
import glob
import data
def setup(args):
logging.basicConfig(level=logging.DEBUG)
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 8
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def main(args):
cfg = setup(args)
predictor = DefaultPredictor(cfg)
if args.img != '':
show_prediction(args.img, predictor, args.scale)
elif args.dir != '':
files = []
for ext in ['/*.jpg', '/*.png']:
files.extend(glob.glob(args.dir + ext))
for file in files:
key = show_prediction(file, predictor, args.scale)
if key == ord('q'):
cv2.destroyAllWindows()
break
else:
dataset_dicts = get_detection_dataset_dicts(['bizcard_val'])
for d in random.sample(dataset_dicts, 300):
key = show_prediction(d['file_name'], predictor, args.scale)
if key == ord('q'):
cv2.destroyAllWindows()
break
def show_prediction(img_file, predictor, scale):
im = cv2.imread(img_file)
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=MetadataCatalog.get('bizcard_val'),
scale=scale,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imshow('', v.get_image()[:, :, ::-1])
key = cv2.waitKey(0)
return key
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument('--img', default='', type=str)
parser.add_argument('--dir', default='', type=str)
parser.add_argument('--scale', default=0.4, type=float)
args = parser.parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 3,002 | 984 |
import os
import pickle
import logging
import numpy as np
import pandas as pd
from scipy.stats import qmc
from romshake.sample import voronoi
from romshake.core.reduced_order_model import ReducedOrderModel
FNAME = 'rom_builder.pkl'
class NumericalRomBuilder():
def __init__(self, folder, simulator, n_seeds_initial,
n_seeds_refine,
n_seeds_stop, samp_method,
bounds,
ranks=[], update_basis=False,
ml_regressors={}, rbf_kernels=[],
k_val=5, vor_kval_refine=None, vor_interp_refine=None):
"""Class for building reduced-order models from numerical simulations.
Args:
folder (str): Path associated with ROM data.
simulator (object): Simulator for generating
new data from parameters. Can be either analytic or launch
numerical simulation jobs.
n_seeds_initial (int): Number of seeds for the first iteration.
n_seeds_refine (int): Number of seeds to generate with each
iteration.
n_seeds_stop (int): Maximum number of seeds.
samp_method (str): Sampling refinement strategy.
bounds (dict): Min/max values for the parameter space.
rank (int, optional): Rank of the basis. Defaults to None.
update_basis (bool, optional): Whether to update the basis with
each iteration. Defaults to False.
ml_regressors (dict, optional): Scikit-learn ML regressors.
The keys are strings identifying the regressors and
the values are Scikit learn regressors. Defaults to None.
rbf_kernels (list, optional): List of scipy rbf kernels (strings).
Defaults to [].
k_val (int, optional): k value for k-fold errors.
Defaults to None.
vor_kval_refine (int, optional): k-value for Voronoi refinement.
Defaults to None.
vor_interp_refine (str, optional): interpolator (string) for
Voronoi refinement. Defaults to None.
"""
self.folder = folder
self.simulator = simulator
self.n_seeds_initial = n_seeds_initial
self.n_seeds_refine = n_seeds_refine
self.n_seeds_stop = n_seeds_stop
self.samp_method = samp_method
self.bounds = bounds
self.ranks = ranks
self.update_basis = update_basis
self.ml_regressors = ml_regressors
self.rbf_kernels = rbf_kernels
self.k_val = k_val
self.vor_kval_refine = vor_kval_refine
self.vor_interp_refine = vor_interp_refine
self.dim = len(self.bounds.keys())
self.halton_sampler = qmc.Halton(d=self.dim, seed=0)
if not os.path.exists(folder):
os.makedirs(folder)
else:
raise ValueError(
'A ROM builder has already been started in the folder %s.'
' You should load that instead.' % folder)
# Set up the logging file
logfile = os.path.join(folder, 'output.log')
logging.basicConfig(
filename=logfile, level=logging.DEBUG,
format='%(asctime)s %(message)s')
initial_params, initial_indices = self.draw_samples(
'halton', n_seeds_initial)
initial_params, initial_data = self.run_forward_models(
initial_params, initial_indices)
# Create ROM from the initial data/parameters
self.rom = ReducedOrderModel(
initial_params, initial_data,
ranks, ml_regressors, rbf_kernels)
# Get k-fold errors and store
_, kf_error_means = self.rom.get_kfold_errors(self.k_val)
self.error_history = {rank: {
interp_name: [kf_error_means[rank][interp_name]]
for interp_name in kf_error_means[rank].keys()} for rank in ranks}
self.nsamples_history = [self.rom.P.shape[0]]
# Iteratively update the reduced order model
self.train()
@classmethod
def from_folder(cls, folder):
with open(os.path.join(folder, FNAME), 'rb') as f:
return pickle.load(f)
def draw_samples(self, sampling_method, n_samps=None):
"""Draws new samples to feed into the reduced order model.
Args:
sampling_method (str): Sampling method.
n_samps (int, optional): Number of samples to draw (for sampling
methods that use it). Defaults to None.
Returns:
tuple: Tuple of the samples and the indices.
"""
logging.info('Drawing new samples..')
min_vals = np.array([val[0] for val in self.bounds.values()])
max_vals = np.array([val[1] for val in self.bounds.values()])
if sampling_method == 'halton':
samples = qmc.scale(self.halton_sampler.random(
n=n_samps), min_vals, max_vals)
else:
kf_errors, _ = self.rom.get_kfold_errors(self.k_val)
samples = voronoi.voronoi_sample(
self.rom.P, min_vals, max_vals, kf_errors, sampling_method,
n_samps, self.vor_kval_refine, self.vor_interp_refine)
# Discard any samples that we already have run.
if hasattr(self, 'rom'):
new_samples_idxs = [
sample.tolist() not in self.rom.P.tolist()
for sample in samples]
samples = samples[new_samples_idxs]
logging.info('Drew %s new samples.' % len(samples))
# Store samples in a dataframe
newdf = pd.DataFrame(samples, columns=list(self.bounds.keys()))
if hasattr(self, 'df'):
start_idx = max(self.df.index) + 1
self.df = pd.concat([self.df, newdf]).reset_index(drop=True)
else:
self.df = newdf
start_idx = 0
indices = list(range(start_idx, start_idx + samples.shape[0]))
return samples, indices
def run_forward_models(self, params, indices):
"""Execute the forward models.
Args:
params (array): Array of the parameter values. Each row is a
forward model and each column is a parameter.
indices (list): List of the indices.
Returns:
tuple: Tuple contain the array of parameters that were succesfully
executed and the associated data.
"""
logging.info(
'Running forward models for simulation indices %s' % indices)
labels = list(self.bounds.keys())
params_dict = {label: param for label, param in zip(labels, params.T)}
return self.simulator.evaluate(
params_dict, indices=indices, folder=self.folder)
def train(self):
"""Run the training loop to build the reduced order model.
"""
while self.rom.P.shape[0] < self.n_seeds_stop:
logging.info(
'Current number of simulations: %s', self.rom.P.shape[0])
new_params, new_indices = self.draw_samples(
self.samp_method, self.n_seeds_refine)
new_params, new_data = self.run_forward_models(
new_params, new_indices)
self.rom.update(new_params, new_data, self.update_basis)
_, kf_error_means = self.rom.get_kfold_errors(self.k_val)
for rank in kf_error_means.keys():
for interp_name in kf_error_means[rank].keys():
self.error_history[rank][interp_name].append(
kf_error_means[rank][interp_name])
self.nsamples_history.append(self.rom.P.shape[0])
# Save the updated ROM builder
with open(os.path.join(self.folder, FNAME), 'wb') as outp:
pickle.dump(self, outp)
logging.info(
'Finished training the ROM. Ended with %s simulations.' %
self.rom.P.shape[0])
| 7,953 | 2,317 |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from torch.autograd import Variable
import math
import numpy as np
import random
import pdb
import pickle
import misc.utils as utils
from misc.CaptionModelBU import CaptionModel
from misc.transformer import Transformer, TransformerDecoder
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.detect_size = opt.detect_size # number of object classes
self.input_encoding_size = opt.input_encoding_size
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.seg_info_size = 50
self.fc_feat_size = opt.fc_feat_size+self.seg_info_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.seq_per_img = opt.seq_per_img
self.itod = opt.itod
self.att_input_mode = opt.att_input_mode
self.transfer_mode = opt.transfer_mode
self.test_mode = opt.test_mode
self.enable_BUTD = opt.enable_BUTD
self.w_grd = opt.w_grd
self.w_cls = opt.w_cls
self.num_sampled_frm = opt.num_sampled_frm
self.num_prop_per_frm = opt.num_prop_per_frm
self.att_model = opt.att_model
self.unk_idx = int(opt.wtoi['UNK'])
if opt.region_attn_mode == 'add':
self.alpha_net = nn.Linear(self.att_hid_size, 1)
elif opt.region_attn_mode == 'cat':
self.alpha_net = nn.Linear(self.att_hid_size*2, 1)
self.stride = 32 # downsizing from input image to feature map
self.t_attn_size = opt.t_attn_size
self.tiny_value = 1e-8
if self.enable_BUTD:
assert(self.att_input_mode == 'region')
self.pool_feat_size = self.att_feat_size
else:
self.pool_feat_size = self.att_feat_size+300+self.detect_size+1
self.min_value = -1e8
opt.beta = 1
self.beta = opt.beta
self.loc_fc = nn.Sequential(nn.Linear(5, 300),
nn.ReLU(),
nn.Dropout(inplace=True))
self.embed = nn.Sequential(nn.Embedding(self.vocab_size,
self.input_encoding_size), # det is 1-indexed
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
if self.transfer_mode in ('none', 'cls'):
self.vis_encoding_size = 2048
elif self.transfer_mode == 'both':
self.vis_encoding_size = 2348
elif self.transfer_mode == 'glove':
self.vis_encoding_size = 300
else:
raise NotImplementedError
self.vis_embed = nn.Sequential(nn.Embedding(self.detect_size+1,
self.vis_encoding_size), # det is 1-indexed
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)
)
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.seg_info_embed = nn.Sequential(nn.Linear(4, self.seg_info_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.att_embed = nn.ModuleList([nn.Sequential(nn.Linear(2048, self.rnn_size//2), # for rgb feature
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)),
nn.Sequential(nn.Linear(1024, self.rnn_size//2), # for motion feature
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))])
self.att_embed_aux = nn.Sequential(nn.BatchNorm1d(self.rnn_size),
nn.ReLU())
self.pool_embed = nn.Sequential(nn.Linear(self.pool_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.ctx2pool = nn.Linear(self.rnn_size, self.att_hid_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size)
if opt.obj_interact:
n_layers = 2
n_heads = 6
attn_drop = 0.2
self.obj_interact = Transformer(self.rnn_size, 0, 0,
d_hidden=int(self.rnn_size/2),
n_layers=n_layers,
n_heads=n_heads,
drop_ratio=attn_drop,
pe=False)
if self.att_model == 'transformer':
n_layers = 2
n_heads = 6
attn_drop = 0.2
print('initiailze language decoder transformer...')
self.cap_model = TransformerDecoder(self.rnn_size, 0, self.vocab_size, \
d_hidden = self.rnn_size//2, n_layers=n_layers, n_heads=n_heads, drop_ratio=attn_drop)
if opt.t_attn_mode == 'bilstm': # frame-wise feature encoding
n_layers = 2
attn_drop = 0.2
self.context_enc = nn.LSTM(self.rnn_size, self.rnn_size//2, n_layers, dropout=attn_drop, \
bidirectional=True, batch_first=True)
elif opt.t_attn_mode == 'bigru':
n_layers = 2
attn_drop = 0.2
self.context_enc = nn.GRU(self.rnn_size, self.rnn_size//2, n_layers, dropout=attn_drop, \
bidirectional=True, batch_first=True)
else:
raise NotImplementedError
self.ctx2pool_grd = nn.Sequential(nn.Linear(self.att_feat_size, self.vis_encoding_size), # fc7 layer
nn.ReLU(),
nn.Dropout(self.drop_prob_lm, inplace=True)
)
self.critLM = utils.LMCriterion(opt)
# initialize the glove weight for the labels.
# self.det_fc[0].weight.data.copy_(opt.glove_vg_cls)
# for p in self.det_fc[0].parameters(): p.requires_grad=False
# self.embed[0].weight.data.copy_(torch.cat((opt.glove_w, opt.glove_clss)))
# for p in self.embed[0].parameters(): p.requires_grad=False
# weights transfer for fc7 layer
with open('data/detectron_weights/fc7_w.pkl') as f:
fc7_w = torch.from_numpy(pickle.load(f))
with open('data/detectron_weights/fc7_b.pkl') as f:
fc7_b = torch.from_numpy(pickle.load(f))
self.ctx2pool_grd[0].weight[:self.att_feat_size].data.copy_(fc7_w)
self.ctx2pool_grd[0].bias[:self.att_feat_size].data.copy_(fc7_b)
if self.transfer_mode in ('cls', 'both'):
# find nearest neighbour class for transfer
with open('data/detectron_weights/cls_score_w.pkl') as f:
cls_score_w = torch.from_numpy(pickle.load(f)) # 1601x2048
with open('data/detectron_weights/cls_score_b.pkl') as f:
cls_score_b = torch.from_numpy(pickle.load(f)) # 1601x2048
assert(len(opt.itod)+1 == opt.glove_clss.size(0)) # index 0 is background
assert(len(opt.vg_cls) == opt.glove_vg_cls.size(0)) # index 0 is background
sim_matrix = torch.matmul(opt.glove_vg_cls/torch.norm(opt.glove_vg_cls, dim=1).unsqueeze(1), \
(opt.glove_clss/torch.norm(opt.glove_clss, dim=1).unsqueeze(1)).transpose(1,0))
max_sim, matched_cls = torch.max(sim_matrix, dim=0)
self.max_sim = max_sim
self.matched_cls = matched_cls
vis_classifiers = opt.glove_clss.new(self.detect_size+1, cls_score_w.size(1)).fill_(0)
self.vis_classifiers_bias = nn.Parameter(opt.glove_clss.new(self.detect_size+1).fill_(0))
vis_classifiers[0] = cls_score_w[0] # background
self.vis_classifiers_bias[0].data.copy_(cls_score_b[0])
for i in range(1, self.detect_size+1):
vis_classifiers[i] = cls_score_w[matched_cls[i]]
self.vis_classifiers_bias[i].data.copy_(cls_score_b[matched_cls[i]])
if max_sim[i].item() < 0.9:
print('index: {}, similarity: {:.2}, {}, {}'.format(i, max_sim[i].item(), \
opt.itod[i], opt.vg_cls[matched_cls[i]]))
if self.transfer_mode == 'cls':
self.vis_embed[0].weight.data.copy_(vis_classifiers)
else:
self.vis_embed[0].weight.data.copy_(torch.cat((vis_classifiers, opt.glove_clss), dim=1))
elif self.transfer_mode == 'glove':
self.vis_embed[0].weight.data.copy_(opt.glove_clss)
elif self.transfer_mode == 'none':
print('No knowledge transfer...')
else:
raise NotImplementedError
# for p in self.ctx2pool_grd.parameters(): p.requires_grad=False
# for p in self.vis_embed[0].parameters(): p.requires_grad=False
if opt.enable_visdom:
import visdom
self.vis = visdom.Visdom(server=opt.visdom_server, env='vis-'+opt.id)
def forward(self, segs_feat, seq, gt_seq, num, ppls, gt_boxes, mask_boxes, ppls_feat, frm_mask, sample_idx, pnt_mask, opt, eval_opt = {}):
if opt == 'MLE':
return self._forward(segs_feat, seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask)
elif opt == 'GRD':
return self._forward(segs_feat, seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask, True)
elif opt == 'sample':
seq, seqLogprobs, att2, sim_mat = self._sample(segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, eval_opt)
return Variable(seq), Variable(att2), Variable(sim_mat)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()),
Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()))
def _grounder(self, xt, att_feats, mask, bias=None):
# xt - B, seq_cnt, enc_size
# att_feats - B, rois_num, enc_size
# mask - B, rois_num
#
# dot - B, seq_cnt, rois_num
B, S, _ = xt.size()
_, R, _ = att_feats.size()
if hasattr(self, 'alpha_net'):
# Additive attention for grounding
if self.alpha_net.weight.size(1) == self.att_hid_size:
dot = xt.unsqueeze(2) + att_feats.unsqueeze(1)
else:
dot = torch.cat((xt.unsqueeze(2).expand(B, S, R, self.att_hid_size),
att_feats.unsqueeze(1).expand(B, S, R, self.att_hid_size)), 3)
dot = F.tanh(dot)
dot = self.alpha_net(dot).squeeze(-1)
else:
# Dot-product attention for grounding
assert(xt.size(-1) == att_feats.size(-1))
dot = torch.matmul(xt, att_feats.permute(0,2,1).contiguous()) # B, seq_cnt, rois_num
if bias is not None:
assert(bias.numel() == dot.numel())
dot += bias
if mask.dim() == 2:
expanded_mask = mask.unsqueeze(1).expand_as(dot)
elif mask.dim() == 3: # if expanded already
expanded_mask = mask
else:
raise NotImplementedError
dot.masked_fill_(expanded_mask, self.min_value)
return dot
def _forward(self, segs_feat, input_seq, gt_seq, ppls, gt_boxes, mask_boxes, num, ppls_feat, frm_mask, sample_idx, pnt_mask, eval_obj_ground=False):
seq = gt_seq[:, :self.seq_per_img, :].clone().view(-1, gt_seq.size(2)) # choose the first seq_per_img
seq = torch.cat((Variable(seq.data.new(seq.size(0), 1).fill_(0)), seq), 1)
input_seq = input_seq.view(-1, input_seq.size(2), input_seq.size(3)) # B*self.seq_per_img, self.seq_length+1, 5
input_seq_update = input_seq.data.clone()
batch_size = segs_feat.size(0) # B
seq_batch_size = seq.size(0) # B*self.seq_per_img
rois_num = ppls.size(1) # max_num_proposal of the batch
state = self.init_hidden(seq_batch_size) # self.num_layers, B*self.seq_per_img, self.rnn_size
rnn_output = []
roi_labels = [] # store which proposal match the gt box
att2_weights = []
h_att_output = []
max_grd_output = []
frm_mask_output = []
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
# pooling the conv_feats
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
# calculate the overlaps between the rois/rois and rois/gt_bbox.
# apply both frame mask and proposal mask
overlaps = utils.bbox_overlaps(ppls.data, gt_boxes.data, \
(frm_mask | pnt_mask[:, 1:].unsqueeze(-1)).data)
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(input_seq.type()))
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
# region-class similarity matrix
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static.view(batch_size, 1, self.detect_size+1, rois_num) \
.expand(batch_size, self.seq_per_img, self.detect_size+1, rois_num).contiguous() \
.view(seq_batch_size, self.detect_size+1, rois_num)
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if self.test_mode:
cls_pred = 0
else:
sim_target = utils.sim_mat_target(overlaps, gt_boxes[:,:,5].data) # B, num_box, num_rois
sim_mask = (sim_target > 0)
if not eval_obj_ground:
masked_sim = torch.gather(sim_mat_static, 1, sim_target)
masked_sim = torch.masked_select(masked_sim, sim_mask)
cls_loss = F.binary_cross_entropy(masked_sim, masked_sim.new(masked_sim.size()).fill_(1))
else:
# region classification accuracy
sim_target_masked = torch.masked_select(sim_target, sim_mask)
sim_mat_masked = torch.masked_select(torch.max(sim_mat_static, dim=1)[1].unsqueeze(1).expand_as(sim_target), sim_mask)
cls_pred = torch.stack((sim_target_masked, sim_mat_masked), dim=1).data
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), \
F.layer_norm(loc_feats, [loc_feats.size(-1)]), F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# replicate the feature to map the seq size.
fc_feats = fc_feats.view(batch_size, 1, self.fc_feat_size)\
.expand(batch_size, self.seq_per_img, self.fc_feat_size)\
.contiguous().view(-1, self.fc_feat_size)
pool_feats = pool_feats.view(batch_size, 1, rois_num, self.pool_feat_size)\
.expand(batch_size, self.seq_per_img, rois_num, self.pool_feat_size)\
.contiguous().view(-1, rois_num, self.pool_feat_size)
g_pool_feats = g_pool_feats.view(batch_size, 1, rois_num, self.vis_encoding_size) \
.expand(batch_size, self.seq_per_img, rois_num, self.vis_encoding_size) \
.contiguous().view(-1, rois_num, self.vis_encoding_size)
pnt_mask = pnt_mask.view(batch_size, 1, rois_num+1).expand(batch_size, self.seq_per_img, rois_num+1)\
.contiguous().view(-1, rois_num+1)
overlaps = overlaps.view(batch_size, 1, rois_num, overlaps.size(2)) \
.expand(batch_size, self.seq_per_img, rois_num, overlaps.size(2)) \
.contiguous().view(-1, rois_num, overlaps.size(2))
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
pool_feats = self.pool_embed(pool_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats) # same here
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
conv_feats = conv_feats.view(batch_size, 1, self.t_attn_size, self.rnn_size)\
.expand(batch_size, self.seq_per_img, self.t_attn_size, self.rnn_size)\
.contiguous().view(-1, self.t_attn_size, self.rnn_size)
p_conv_feats = self.ctx2att(conv_feats) # self.rnn_size (1024) -> self.att_hid_size (512)
else:
# dummy
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
if self.att_model == 'transformer': # Masked Transformer does not support box supervision yet
if self.att_input_mode == 'both':
lm_loss = self.cap_model([conv_feats, pool_feats], seq)
elif self.att_input_mode == 'featmap':
lm_loss = self.cap_model([conv_feats, conv_feats], seq)
elif self.att_input_mode == 'region':
lm_loss = self.cap_model([pool_feats, pool_feats], seq)
return lm_loss.unsqueeze(0), lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0), \
lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0), lm_loss.new(1).fill_(0)
elif self.att_model == 'topdown':
for i in range(self.seq_length):
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].data.sum() == 0:
break
xt = self.embed(it)
if not eval_obj_ground:
roi_label = utils.bbox_target(mask_boxes[:,:,:,i+1], overlaps, input_seq[:,i+1], \
input_seq_update[:,i+1], self.vocab_size) # roi_label if for the target seq
roi_labels.append(roi_label.view(seq_batch_size, -1))
# use frame mask during training
box_mask = mask_boxes[:,0,:,i+1].contiguous().unsqueeze(1).expand((
batch_size, rois_num, mask_boxes.size(2)))
frm_mask_on_prop = (torch.sum((~(box_mask | frm_mask)), dim=2)<=0)
frm_mask_on_prop = torch.cat((frm_mask_on_prop.new(batch_size, 1).fill_(0.), \
frm_mask_on_prop), dim=1) | pnt_mask
output, state, att2_weight, att_h, max_grd_val, grd_val = self.core(xt, fc_feats, \
conv_feats, p_conv_feats, pool_feats, p_pool_feats, pnt_mask, frm_mask_on_prop, \
state, sim_mat_static_update)
frm_mask_output.append(frm_mask_on_prop)
else:
output, state, att2_weight, att_h, max_grd_val, grd_val = self.core(xt, fc_feats, \
conv_feats, p_conv_feats, pool_feats, p_pool_feats, pnt_mask, pnt_mask, \
state, sim_mat_static_update)
att2_weights.append(att2_weight)
h_att_output.append(att_h) # the hidden state of attention LSTM
rnn_output.append(output)
max_grd_output.append(max_grd_val)
seq_cnt = len(rnn_output)
rnn_output = torch.cat([_.unsqueeze(1) for _ in rnn_output], 1) # seq_batch_size, seq_cnt, vocab
h_att_output = torch.cat([_.unsqueeze(1) for _ in h_att_output], 1)
att2_weights = torch.cat([_.unsqueeze(1) for _ in att2_weights], 1) # seq_batch_size, seq_cnt, att_size
max_grd_output = torch.cat([_.unsqueeze(1) for _ in max_grd_output], 1)
if not eval_obj_ground:
frm_mask_output = torch.cat([_.unsqueeze(1) for _ in frm_mask_output], 1)
roi_labels = torch.cat([_.unsqueeze(1) for _ in roi_labels], 1)
decoded = F.log_softmax(self.beta * self.logit(rnn_output), dim=2) # text word prob
decoded = decoded.view((seq_cnt)*seq_batch_size, -1)
# object grounding
h_att_all = h_att_output # hidden states from the Attention LSTM
xt_clamp = torch.clamp(input_seq[:, 1:seq_cnt+1, 0].clone()-self.vocab_size, min=0)
xt_all = self.vis_embed(xt_clamp)
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias[xt_clamp].type(xt_all.type()) \
.unsqueeze(2).expand(seq_batch_size, seq_cnt, rois_num)
else:
bias = 0
if not eval_obj_ground:
# att2_weights/ground_weights with both proposal mask and frame mask
ground_weights = self._grounder(xt_all, g_pool_feats, frm_mask_output[:,:,1:], bias+att2_weights)
lm_loss, att2_loss, ground_loss = self.critLM(decoded, att2_weights, ground_weights, \
seq[:, 1:seq_cnt+1].clone(), roi_labels[:, :seq_cnt, :].clone(), input_seq[:, 1:seq_cnt+1, 0].clone())
return lm_loss.unsqueeze(0), att2_loss.unsqueeze(0), ground_loss.unsqueeze(0), cls_loss.unsqueeze(0)
else:
# att2_weights/ground_weights with proposal mask only
ground_weights = self._grounder(xt_all, g_pool_feats, pnt_mask[:,1:], bias+att2_weights)
return cls_pred, torch.max(att2_weights.view(seq_batch_size, seq_cnt, self.num_sampled_frm, \
self.num_prop_per_frm), dim=-1)[1], torch.max(ground_weights.view(seq_batch_size, \
seq_cnt, self.num_sampled_frm, self.num_prop_per_frm), dim=-1)[1]
def _sample(self, segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
inference_mode = opt.get('inference_mode', True)
batch_size = segs_feat.size(0)
rois_num = ppls.size(1)
if beam_size > 1:
return self._sample_beam(segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt)
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
att_mask = pnt_mask.clone()
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(fc_feats.type())).long()
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), F.layer_norm(loc_feats, \
[loc_feats.size(-1)]), F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# embed fc and att feats
pool_feats = self.pool_embed(pool_feats)
fc_feats = self.fc_embed(fc_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats)
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
p_conv_feats = self.ctx2att(conv_feats)
else:
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
if self.att_model == 'transformer':
if self.att_input_mode == 'both':
seq = self.cap_model([conv_feats, pool_feats], [], infer=True, seq_length=self.seq_length)
elif self.att_input_mode == 'featmap':
seq = self.cap_model([conv_feats, conv_feats], [], infer=True, seq_length=self.seq_length)
elif self.att_input_mode == 'region':
seq = self.cap_model([pool_feats, pool_feats], [], infer=True, seq_length=self.seq_length)
return seq, seq.new(batch_size, 1).fill_(0), seq.new(batch_size, 1).fill_(0).long()
elif self.att_model == 'topdown':
state = self.init_hidden(batch_size)
seq = []
seqLogprobs = []
att2_weights = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
elif sample_max:
sampleLogprobs_tmp, it_tmp = torch.topk(logprobs.data, 2, dim=1)
unk_mask = (it_tmp[:,0] != self.unk_idx) # mask on non-unk
sampleLogprobs = unk_mask.float()*sampleLogprobs_tmp[:,0] + (1-unk_mask.float())*sampleLogprobs_tmp[:,1]
it = unk_mask.long()*it_tmp[:,0] + (1-unk_mask.long())*it_tmp[:,1]
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data) # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature))
it = torch.multinomial(prob_prev, 1)
sampleLogprobs = logprobs.gather(1, Variable(it)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = self.embed(Variable(it))
if t >= 1:
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
if t < self.seq_length:
rnn_output, state, att2_weight, att_h, _, _ = self.core(xt, fc_feats, conv_feats, \
p_conv_feats, pool_feats, p_pool_feats, att_mask, pnt_mask, state, \
sim_mat_static_update)
decoded = F.log_softmax(self.beta * self.logit(rnn_output), dim=1)
logprobs = decoded
att2_weights.append(att2_weight)
seq = torch.cat([_.unsqueeze(1) for _ in seq], 1)
seqLogprobs = torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
att2_weights = torch.cat([_.unsqueeze(1) for _ in att2_weights], 1) # batch_size, seq_cnt, att_size
return seq, seqLogprobs, att2_weights, sim_mat_static
def _sample_beam(self, segs_feat, ppls, num, ppls_feat, sample_idx, pnt_mask, opt={}):
batch_size = ppls.size(0)
rois_num = ppls.size(1)
beam_size = opt.get('beam_size', 10)
conv_feats = segs_feat
sample_idx_mask = conv_feats.new(batch_size, conv_feats.size(1), 1).fill_(1).byte()
for i in range(batch_size):
sample_idx_mask[i, sample_idx[i,0]:sample_idx[i,1]] = 0
fc_feats = torch.mean(segs_feat, dim=1)
fc_feats = torch.cat((F.layer_norm(fc_feats, [self.fc_feat_size-self.seg_info_size]), \
F.layer_norm(self.seg_info_embed(num[:, 3:7].float()), [self.seg_info_size])), dim=-1)
pool_feats = ppls_feat
pool_feats = self.ctx2pool_grd(pool_feats)
g_pool_feats = pool_feats
# visual words embedding
vis_word = Variable(torch.Tensor(range(0, self.detect_size+1)).type(fc_feats.type())).long()
vis_word_embed = self.vis_embed(vis_word)
assert(vis_word_embed.size(0) == self.detect_size+1)
p_vis_word_embed = vis_word_embed.view(1, self.detect_size+1, self.vis_encoding_size) \
.expand(batch_size, self.detect_size+1, self.vis_encoding_size).contiguous()
if hasattr(self, 'vis_classifiers_bias'):
bias = self.vis_classifiers_bias.type(p_vis_word_embed.type()) \
.view(1,-1,1).expand(p_vis_word_embed.size(0), \
p_vis_word_embed.size(1), g_pool_feats.size(1))
else:
bias = None
sim_mat_static = self._grounder(p_vis_word_embed, g_pool_feats, pnt_mask[:,1:], bias)
sim_mat_static_update = sim_mat_static
sim_mat_static = F.softmax(sim_mat_static, dim=1)
if not self.enable_BUTD:
loc_input = ppls.data.new(batch_size, rois_num, 5)
loc_input[:,:,:4] = ppls.data[:,:,:4] / 720.
loc_input[:,:,4] = ppls.data[:,:,4]*1./self.num_sampled_frm
loc_feats = self.loc_fc(Variable(loc_input)) # encode the locations
label_feat = sim_mat_static.permute(0,2,1).contiguous()
pool_feats = torch.cat((F.layer_norm(pool_feats, [pool_feats.size(-1)]), F.layer_norm(loc_feats, [loc_feats.size(-1)]), \
F.layer_norm(label_feat, [label_feat.size(-1)])), 2)
# embed fc and att feats
pool_feats = self.pool_embed(pool_feats)
fc_feats = self.fc_embed(fc_feats)
# object region interactions
if hasattr(self, 'obj_interact'):
pool_feats = self.obj_interact(pool_feats)
# Project the attention feats first to reduce memory and computation comsumptions.
p_pool_feats = self.ctx2pool(pool_feats)
if self.att_input_mode in ('both', 'featmap'):
conv_feats_splits = torch.split(conv_feats, 2048, 2)
conv_feats = torch.cat([m(c) for (m,c) in zip(self.att_embed, conv_feats_splits)], dim=2)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.att_embed_aux(conv_feats)
conv_feats = conv_feats.permute(0,2,1).contiguous() # inconsistency between Torch TempConv and PyTorch Conv1d
conv_feats = self.context_enc(conv_feats)[0]
conv_feats = conv_feats.masked_fill(sample_idx_mask, 0)
p_conv_feats = self.ctx2att(conv_feats)
else:
conv_feats = pool_feats.new(1,1).fill_(0)
p_conv_feats = pool_feats.new(1,1).fill_(0)
vis_offset = (torch.arange(0, beam_size)*rois_num).view(beam_size).type_as(ppls.data).long()
roi_offset = (torch.arange(0, beam_size)*(rois_num+1)).view(beam_size).type_as(ppls.data).long()
seq = ppls.data.new(self.seq_length, batch_size).zero_().long()
seqLogprobs = ppls.data.new(self.seq_length, batch_size).float()
att2 = ppls.data.new(self.seq_length, batch_size).fill_(-1).long()
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
beam_fc_feats = fc_feats[k:k+1].expand(beam_size, fc_feats.size(1))
beam_pool_feats = pool_feats[k:k+1].expand(beam_size, rois_num, self.rnn_size).contiguous()
if self.att_input_mode in ('both', 'featmap'):
beam_conv_feats = conv_feats[k:k+1].expand(beam_size, conv_feats.size(1), self.rnn_size).contiguous()
beam_p_conv_feats = p_conv_feats[k:k+1].expand(beam_size, conv_feats.size(1), self.att_hid_size).contiguous()
else:
beam_conv_feats = beam_pool_feats.new(1,1).fill_(0)
beam_p_conv_feats = beam_pool_feats.new(1,1).fill_(0)
beam_p_pool_feats = p_pool_feats[k:k+1].expand(beam_size, rois_num, self.att_hid_size).contiguous()
beam_ppls = ppls[k:k+1].expand(beam_size, rois_num, 7).contiguous()
beam_pnt_mask = pnt_mask[k:k+1].expand(beam_size, rois_num+1).contiguous()
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(Variable(it))
beam_sim_mat_static_update = sim_mat_static_update[k:k+1].expand(beam_size, self.detect_size+1, rois_num)
rnn_output, state, att2_weight, att_h, _, _ = self.core(xt, beam_fc_feats, beam_conv_feats,
beam_p_conv_feats, beam_pool_feats, beam_p_pool_feats, beam_pnt_mask, beam_pnt_mask,
state, beam_sim_mat_static_update)
assert(att2_weight.size(0) == beam_size)
att2[0, k] = torch.max(att2_weight, 1)[1][0]
self.done_beams[k] = self.beam_search(state, rnn_output, beam_fc_feats, beam_conv_feats, beam_p_conv_feats, \
beam_pool_feats, beam_p_pool_feats, beam_sim_mat_static_update, beam_ppls, beam_pnt_mask, vis_offset, roi_offset, opt)
seq[:, k] = self.done_beams[k][0]['seq'].cuda() # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps'].cuda()
att2[1:, k] = self.done_beams[k][0]['att2'][1:].cuda()
return seq.t(), seqLogprobs.t(), att2.t()
| 37,610 | 13,559 |
# 前台
from flask.views import MethodView
from apps.front.forms import SendSmsCodeForm,SignupFrom,FindpwdFrom,SendCodeForm,AddPostForm,SigninFrom
from flask import Blueprint,make_response
from flask import render_template,session
from flask import views,request,jsonify
import string,random
from apps.common.baseResp import *
import json
from dysms_python.demo_sms_send import send_sms
from apps.common.captcha.xtcaptcha import Captcha
from io import BytesIO
from apps.common.memcachedUtil import saveCache,delete,getCache
from apps.front.models import *
from apps.common.models import Banner,Board,Post
from functools import wraps
from config import FRONT_USER_ID
from flask import redirect
from flask import url_for
#
bp = Blueprint('front',__name__)
def lonigDecotor(func):
"""限制登录的装饰器"""
@wraps(func)
def inner(*args,**kwargs):
if not session.get(FRONT_USER_ID,None): # 没有登陆
return redirect(location=url_for("front.signin"))
else:
r = func(*args,**kwargs)
return r
return inner
@bp.route("/")
def loginView():
# 查出来轮播图(4)
banners = Banner.query.order_by(Banner.priority.desc()).limit(4)
board = Board.query.all()
posts=Post.query.all()
context = {
'banners':banners,
'boards':board,
'posts':posts
}
return render_template("front/index.html",**context)
class Signup(MethodView):
def get(self):
# 从那个页面点击的注册按钮 (Referer: http://127.0.0.1:9000/signin/)
location = request.headers.get("Referer")
if not location : # 如果直接输入的注册的连接,location为空
location = '/'
context = {
'location':location
}
return render_template("front/signup.html",**context)
class Signup(MethodView):
def get(self):
return render_template("front/signup.html")
def post(self):
fm = SigninFrom(formdata=request.form)
if fm.validate():
# 把这个用户保存到数据库中
u = FrontUser(telephone=fm.telephone.data,
username=fm.username.data,
password=fm.password.data)
db.session.add(u)
db.session.commit()
delete(fm.telephone.data) #注册成功,删除手机验证码
return jsonify(respSuccess("注册成功,真不容易啊"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/send_sms_code/",methods=['post'])
def sendSMSCode():
fm = SendSmsCodeForm(formdata=request.form)
if fm.validate():
#生成验证码
source = string.digits
source = ''.join(random.sample(source, 4))
#发送验证码
r = send_sms(phone_numbers=fm.telephone.data,smscode=source) #b'{"Message":"OK","RequestId":"26F47853-F6CD-486A-B3F7-7DFDCE119713","BizId":"102523637951132428^0","Code":"OK"}'
if json.loads(r.decode("utf-8"))['Code'] == 'OK':
# 存到缓存中
saveCache(fm.telephone.data,source,30*60)
return jsonify(respSuccess("短信验证码发送成功,请查收"))
else: # 发送失败
return jsonify(respParamErr("请检查网络"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/img_code/")
def ImgCode():
# 生成6位的字符串
# 把这个字符串放在图片上
# 用特殊字体
# 添加横线
# 添加噪点
text,img = Captcha.gene_code() # 通过工具类生成验证码
print(text)
out = BytesIO() # 初始化流对象
img.save(out, 'png') # 保存成png格式
out.seek(0) # 从文本的开头开始读
saveCache(text,text,60)
resp = make_response(out.read()) # 根据流对象生成一个响应
resp.content_type = "image/png" # 设置响应头中content-type
return resp
class Singnin(MethodView):
def get(self):
return render_template("front/signin.html")
def post(self):
fm=SigninFrom(formdata=request.form)
if fm.validate():
#通过电话查询密码
user=FrontUser.query.filter(FrontUser.telephone==fm.telephone.data).first()
if not user:
return jsonify(respParamErr("未注册"))
# 密码进行比较
r=user.checkPwd(fm.password.data)
if r :
return jsonify(respSuccess("登录成功"))
else:
return jsonify(respParamErr("密码错误"))
else:
return jsonify(respParamErr(fm.err))
class Addpost(views.MethodView):
decorators = [lonigDecotor]
def get(self):
# 查询所有的板块
board = Board.query.all()
context = {
"boards": board
}
return render_template("front/addpost.html",**context)
def post(self):
fm = AddPostForm(formdata=request.form)
if fm.validate() :
# 存储到数据库中
user_id = session[FRONT_USER_ID]
post = Post(title=fm.title.data,content=fm.content.data,
board_id=fm.boarder_id.data,user_id=user_id)
db.session.add(post)
db.session.commit()
return jsonify(respSuccess("发布成功"))
else:
print(respParamErr(fm.err))
return jsonify(respParamErr(fm.err))
bp.add_url_rule("/addpost/",endpoint='addpost',view_func=Addpost.as_view('addpost'))
bp.add_url_rule("/signin/",endpoint='signin',view_func=Singnin.as_view('signin'))
bp.add_url_rule("/signup/",endpoint='signup',view_func=Signup.as_view('signup'))
# 验证码
# 在阿里云申请账号
# 申请accesskey
# 申请签名和模板
# 下载pythondemo
# 修改demo中demo_sms_send.py
# 在项目中进行调用
# 图片验证码
# 1.使用PIL这个库生成图片验证码
# 2.返回给客户端
# 3.通过js变换scr的值进行切换图片 | 5,371 | 2,111 |
"""This module provides general methods for logging puprposes.
Basic operations are:
* info
* error
with these operations it is easier to track from where the information is printed
"""
import os
import inspect
from datetime import datetime
import sublime
__LOG = None
'''
Called automatically from ST3 if plugin is loaded
# Is required now due to async call and ignoring sublime.* from main routine
'''
__SETTING_KEY = "rainmeter_enable_logging"
def plugin_loaded():
"""Will be called when sublime API is ready to use."""
settings = __load_settings()
settings.add_on_change(__SETTING_KEY, __load_settings)
info("Logger succesfully loaded.")
def __load_settings():
settings = sublime.load_settings("Rainmeter.sublime-settings")
global __LOG
__LOG = settings.get(__SETTING_KEY, False)
return settings
def info(message):
"""
Display information about the current state it is in.
Only shown if logging is enabled.
"""
if __LOG:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = calframe[1]
caller_name = caller[3]
caller_file = caller[1]
_log("info", caller_file, caller_name, message)
def error(message):
"""
Display error states.
Always shown because supposed not to reach that level.
"""
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
caller = calframe[1]
caller_name = caller[3]
caller_file = caller[1]
_log("error", caller_file, caller_name, message)
def _log(error_type, file_path, function, string):
now = datetime.now()
timestamp = now.strftime("%H:%M:%S.%f")[:-3]
filename = os.path.basename(file_path)
withoutext = os.path.splitext(filename)[0]
print("[" + timestamp + "]", "[" + error_type + "]", withoutext + "." + function + ':', string)
| 1,911 | 587 |
from app.constants.gamemodes import GameMode
mode_2_str = {
0: "standard",
1: "taiko",
2: "catch",
3: "mania"
}
mods2str = {
"vn": "vanilla",
"rx": "relax",
"ap": "autopilot"
}
mode2gulag = {
"0.vn": 0,
"1.vn": 1,
"2.vn": 2,
"3.vn": 3,
"0.rx": 4,
"1.rx": 5,
"2.rx": 6,
"0.ap": 7,
}
gulag2mode = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 0,
5: 1,
6: 2,
7: 0
}
modemods2object = {
"0.vn": GameMode.VANILLA_OSU,
"1.vn": GameMode.VANILLA_TAIKO,
"2.vn": GameMode.VANILLA_CATCH,
"3.vn": GameMode.VANILLA_MANIA,
"0.rx": GameMode.RELAX_OSU,
"1.rx": GameMode.RELAX_TAIKO,
"2.rx": GameMode.RELAX_CATCH,
"0.ap": GameMode.AUTOPILOT_OSU,
}
emotes = {
"F": "<:rankf:853753898954391572>",
"D": "<:rankd:853753898682155009>",
"C": "<:rankc:853753898912448553>",
"B": "<:rankb:853753899089657866>",
"A": "<:ranka:853753899000004618>",
"S": "<:ranks:853753899135402044>",
"SH": "<:ranksh:853753899072094208>",
"X": "<:rankx:853753898817028147>",
"XH": "<:rankxh:853753899206311976>",
"offline": "<:status_offline:938167346873380874>",
"online": "<:status_online:906478682967793705>",
}
class colors:
red = 0xe74c3c
purple = 0x8e44ad
blue = 0x2980b9
teal = 0x16a085
green = 0x27ae60
yellow = 0xf1c40f
ACTION_STRINGS = {
"restrict": "Restricted for",
"unrestrict": "Unrestricted for",
"silence": "Silenced for",
"unsilence": "Unsilenced for",
"note": "Note added:",
} | 1,552 | 886 |
from pathlib import Path
from py_md_doc import PyMdDoc
import re
if __name__ == "__main__":
# API documentation.
md = PyMdDoc(input_directory=Path("../multimodal_challenge"), files=["dataset/dataset_trial.py",
"dataset/env_audio_materials.py",
"multimodal_object_init_data.py",
"multimodal_base.py",
"trial.py"])
md.get_docs(output_directory=Path("../doc/api"))
# Multimodal API documentation.
md = PyMdDoc(input_directory=Path("../multimodal_challenge"), files=["multimodal.py"],
metadata_path=Path("doc_metadata.json"))
doc = md.get_doc(Path("../multimodal_challenge/multimodal.py"))
# Get the Magnebot API. This assumes that it's located in the home directory.
magnebot_api = Path.home().joinpath("magnebot/doc/api/magnebot_controller.md").read_text(encoding="utf-8")
# Fix relative links.
magnebot_api = re.sub(r"\[(.*?)\]\((?!https)(.*?)\.md\)",
r"[\1](https://github.com/alters-mit/magnebot/blob/main/doc/api/\2.md)", magnebot_api)
# Remove code examples.
magnebot_api = re.sub(r"(```python((.|\n)*?)```\n)", "", magnebot_api)
# Remove this paragraph.
magnebot_api = re.sub(r"(Images of occupancy maps can be found(.*)\.\n\n)", "", magnebot_api, flags=re.MULTILINE)
# Remove this sentence.
magnebot_api = magnebot_api.replace("This only works if you've loaded an occupancy map via "
"`self.init_floorplan_scene()`.\n\n\n", "")
# Get all of the movement actions from the Magnebot API.
api_txt = re.search(r"(### Movement((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
for action in ["turn_by", "turn_to", "move_by", "move_to"]:
api_txt += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Get all of the movement actions from the Magnebot API.
api_txt = re.search(r"(### Arm Articulation((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
for action in ["reach_for", "grasp", "drop", "reset_arm"]:
api_txt += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append the movement actions before the Torso section.
doc = re.sub(r"((.|\n)*?)(### Torso)", r"\1" + api_txt + "***\n\n" + r"\3", doc)
# Append camera actions.
doc += "### Camera\n\n_These commands rotate the Magnebot's camera or add additional camera to the scene." \
" They advance the simulation by exactly 1 frame._\n\n"
for action in ["rotate_camera", "reset_camera"]:
doc += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append misc.
doc += "### Misc.\n\n_These are utility functions that won't advance the simulation by any frames._\n\n"
for action in ["get_occupancy_position", "get_visible_objects", "end"]:
doc += re.search(f"(#### {action}((.|\n)*?))#", magnebot_api, flags=re.MULTILINE).group(1)
# Append class variables.
magnebot_class_vars = re.search(f"## Class Variables" + r"\n((.|\n)*?)\*$", magnebot_api,
flags=re.MULTILINE).group(1). \
replace("| Variable | Type | Description |\n| --- | --- | --- |", "").strip()
doc = re.sub(f"## Class Variables" + r"\n((.|\n)*?)\n\*", "## Class Variables\n" + r"\1" + magnebot_class_vars, doc)
# Append fields.
magnebot_fields = re.search(f"## Fields" + r"\n((.|\n)*?)\*$", magnebot_api, flags=re.MULTILINE).group(1)
doc = re.sub(f"## Fields" + r"\n((.|\n)*?)\n\*", "## Fields" + r"\1" + magnebot_fields, doc)
# Append other sections.
sections = ""
for s in ["Frames", "Parameter types"]:
section = re.search(f"## {s}\n" + r"((.|\n)*?)\*\*\*", magnebot_api, flags=re.MULTILINE).group(0)
sections += section + "\n\n"
doc = re.sub(r"## Fields", sections + "\n## Fields\n", doc)
doc = doc.replace("[TOC-MM]", PyMdDoc.get_toc(doc)).replace("****", "***").replace("\n\n\n", "\n\n")
Path("../doc/api/multimodal.md").write_text(doc, encoding="utf-8")
# Dataset generation documentation.
md = PyMdDoc(input_directory=Path("../dataset_generation"), files=["dataset.py", "rehearsal.py",
"occupancy_mapper.py", "init_data.py"])
md.get_docs(output_directory=Path("../doc/dataset"))
| 4,655 | 1,612 |
import logging
from dataclasses import dataclass
from typing import Dict, Generator, List, Optional, Set, Tuple, Union
import netsquid as ns
from netqasm.lang import operand
from netqasm.lang.encoding import RegisterName
from netqasm.sdk.shared_memory import Arrays, RegisterGroup, setup_registers
from netsquid.components.component import Component, Port
from netsquid.protocols import Protocol
from pydynaa import EventExpression
class SimTimeFilter(logging.Filter):
def filter(self, record):
record.simtime = ns.sim_time()
return True
class LogManager:
STACK_LOGGER = "Stack"
_LOGGER_HAS_BEEN_SETUP = False
@classmethod
def _setup_stack_logger(cls) -> None:
logger = logging.getLogger(cls.STACK_LOGGER)
formatter = logging.Formatter(
"%(levelname)s:%(simtime)s ns:%(name)s:%(message)s"
)
syslog = logging.StreamHandler()
syslog.setFormatter(formatter)
syslog.addFilter(SimTimeFilter())
logger.addHandler(syslog)
logger.propagate = False
cls._LOGGER_HAS_BEEN_SETUP = True
@classmethod
def get_stack_logger(cls, sub_logger: Optional[str] = None) -> logging.Logger:
if not cls._LOGGER_HAS_BEEN_SETUP:
cls._setup_stack_logger()
logger = logging.getLogger(cls.STACK_LOGGER)
if sub_logger is None:
return logger
else:
return logger.getChild(sub_logger)
@classmethod
def set_log_level(cls, level: Union[int, str]) -> None:
logger = cls.get_stack_logger()
logger.setLevel(level)
@classmethod
def get_log_level(cls) -> int:
return cls.get_stack_logger().level
@classmethod
def log_to_file(cls, path: str) -> None:
fileHandler = logging.FileHandler(path, mode="w")
formatter = logging.Formatter(
"%(levelname)s:%(simtime)s ns:%(name)s:%(message)s"
)
fileHandler.setFormatter(formatter)
fileHandler.addFilter(SimTimeFilter())
cls.get_stack_logger().addHandler(fileHandler)
class PortListener(Protocol):
def __init__(self, port: Port, signal_label: str) -> None:
self._buffer: List[bytes] = []
self._port: Port = port
self._signal_label = signal_label
self.add_signal(signal_label)
@property
def buffer(self) -> List[bytes]:
return self._buffer
def run(self) -> Generator[EventExpression, None, None]:
while True:
# Wait for an event saying that there is new input.
yield self.await_port_input(self._port)
counter = 0
# Read all inputs and count them.
while True:
input = self._port.rx_input()
if input is None:
break
self._buffer += input.items
counter += 1
# If there are n inputs, there have been n events, but we yielded only
# on one of them so far. "Flush" these n-1 additional events:
while counter > 1:
yield self.await_port_input(self._port)
counter -= 1
# Only after having yielded on all current events, we can schedule a
# notification event, so that its reactor can handle all inputs at once.
self.send_signal(self._signal_label)
class RegisterMeta:
@classmethod
def prefixes(cls) -> List[str]:
return ["R", "C", "Q", "M"]
@classmethod
def parse(cls, name: str) -> Tuple[RegisterName, int]:
assert len(name) >= 2
assert name[0] in cls.prefixes()
group = RegisterName[name[0]]
index = int(name[1:])
assert index < 16
return group, index
class ComponentProtocol(Protocol):
def __init__(self, name: str, comp: Component) -> None:
super().__init__(name)
self._listeners: Dict[str, PortListener] = {}
self._logger: logging.Logger = LogManager.get_stack_logger(
f"{self.__class__.__name__}({comp.name})"
)
def add_listener(self, name, listener: PortListener) -> None:
self._listeners[name] = listener
def _receive_msg(
self, listener_name: str, wake_up_signal: str
) -> Generator[EventExpression, None, str]:
listener = self._listeners[listener_name]
if len(listener.buffer) == 0:
yield self.await_signal(sender=listener, signal_label=wake_up_signal)
return listener.buffer.pop(0)
def start(self) -> None:
super().start()
for listener in self._listeners.values():
listener.start()
def stop(self) -> None:
for listener in self._listeners.values():
listener.stop()
super().stop()
class AppMemory:
def __init__(self, app_id: int, max_qubits: int) -> None:
self._app_id: int = app_id
self._registers: Dict[RegisterName, RegisterGroup] = setup_registers()
self._arrays: Arrays = Arrays()
self._virt_qubits: Dict[int, Optional[int]] = {
i: None for i in range(max_qubits)
}
self._prog_counter: int = 0
@property
def prog_counter(self) -> int:
return self._prog_counter
def increment_prog_counter(self) -> None:
self._prog_counter += 1
def set_prog_counter(self, value: int) -> None:
self._prog_counter = value
def map_virt_id(self, virt_id: int, phys_id: int) -> None:
self._virt_qubits[virt_id] = phys_id
def unmap_virt_id(self, virt_id: int) -> None:
self._virt_qubits[virt_id] = None
def unmap_all(self) -> None:
for virt_id in self._virt_qubits:
self._virt_qubits[virt_id] = None
@property
def qubit_mapping(self) -> Dict[int, Optional[int]]:
return self._virt_qubits
def phys_id_for(self, virt_id: int) -> int:
return self._virt_qubits[virt_id]
def virt_id_for(self, phys_id: int) -> Optional[int]:
for virt, phys in self._virt_qubits.items():
if phys == phys_id:
return virt
return None
def set_reg_value(self, register: Union[str, operand.Register], value: int) -> None:
if isinstance(register, str):
name, index = RegisterMeta.parse(register)
else:
name, index = register.name, register.index
self._registers[name][index] = value
def get_reg_value(self, register: Union[str, operand.Register]) -> int:
if isinstance(register, str):
name, index = RegisterMeta.parse(register)
else:
name, index = register.name, register.index
return self._registers[name][index]
# for compatibility with netqasm Futures
def get_register(self, register: Union[str, operand.Register]) -> Optional[int]:
return self.get_reg_value(register)
# for compatibility with netqasm Futures
def get_array_part(
self, address: int, index: Union[int, slice]
) -> Union[None, int, List[Optional[int]]]:
if isinstance(index, int):
return self.get_array_value(address, index)
elif isinstance(index, slice):
return self.get_array_values(address, index.start, index.stop)
def init_new_array(self, address: int, length: int) -> None:
self._arrays.init_new_array(address, length)
def get_array(self, address: int) -> List[Optional[int]]:
return self._arrays._get_array(address)
def get_array_entry(self, array_entry: operand.ArrayEntry) -> Optional[int]:
address, index = self.expand_array_part(array_part=array_entry)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, int)
return result
def get_array_value(self, addr: int, offset: int) -> Optional[int]:
address, index = self.expand_array_part(
array_part=operand.ArrayEntry(operand.Address(addr), offset)
)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, int)
return result
def get_array_values(
self, addr: int, start_offset: int, end_offset
) -> List[Optional[int]]:
values = self.get_array_slice(
operand.ArraySlice(operand.Address(addr), start_offset, end_offset)
)
assert values is not None
return values
def set_array_entry(
self, array_entry: operand.ArrayEntry, value: Optional[int]
) -> None:
address, index = self.expand_array_part(array_part=array_entry)
self._arrays[address, index] = value
def set_array_value(self, addr: int, offset: int, value: Optional[int]) -> None:
address, index = self.expand_array_part(
array_part=operand.ArrayEntry(operand.Address(addr), offset)
)
self._arrays[address, index] = value
def get_array_slice(
self, array_slice: operand.ArraySlice
) -> Optional[List[Optional[int]]]:
address, index = self.expand_array_part(array_part=array_slice)
result = self._arrays[address, index]
assert (result is None) or isinstance(result, list)
return result
def expand_array_part(
self, array_part: Union[operand.ArrayEntry, operand.ArraySlice]
) -> Tuple[int, Union[int, slice]]:
address: int = array_part.address.address
index: Union[int, slice]
if isinstance(array_part, operand.ArrayEntry):
if isinstance(array_part.index, int):
index = array_part.index
else:
index_from_reg = self.get_reg_value(register=array_part.index)
if index_from_reg is None:
raise RuntimeError(
f"Trying to use register {array_part.index} "
"to index an array but its value is None"
)
index = index_from_reg
elif isinstance(array_part, operand.ArraySlice):
startstop: List[int] = []
for raw_s in [array_part.start, array_part.stop]:
if isinstance(raw_s, int):
startstop.append(raw_s)
elif isinstance(raw_s, operand.Register):
s = self.get_reg_value(register=raw_s)
if s is None:
raise RuntimeError(
f"Trying to use register {raw_s} to "
"index an array but its value is None"
)
startstop.append(s)
else:
raise RuntimeError(
f"Something went wrong: raw_s should be int "
f"or Register but is {type(raw_s)}"
)
index = slice(*startstop)
else:
raise RuntimeError(
f"Something went wrong: array_part is a {type(array_part)}"
)
return address, index
@dataclass
class NetstackCreateRequest:
app_id: int
remote_node_id: int
epr_socket_id: int
qubit_array_addr: int
arg_array_addr: int
result_array_addr: int
@dataclass
class NetstackReceiveRequest:
app_id: int
remote_node_id: int
epr_socket_id: int
qubit_array_addr: int
result_array_addr: int
@dataclass
class NetstackBreakpointCreateRequest:
app_id: int
@dataclass
class NetstackBreakpointReceiveRequest:
app_id: int
class AllocError(Exception):
pass
class PhysicalQuantumMemory:
def __init__(self, qubit_count: int) -> None:
self._qubit_count = qubit_count
self._allocated_ids: Set[int] = set()
self._comm_qubit_ids: Set[int] = {i for i in range(qubit_count)}
@property
def qubit_count(self) -> int:
return self._qubit_count
@property
def comm_qubit_count(self) -> int:
return len(self._comm_qubit_ids)
def allocate(self) -> int:
"""Allocate a qubit (communcation or memory)."""
for i in range(self._qubit_count):
if i not in self._allocated_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more qubits available")
def allocate_comm(self) -> int:
"""Allocate a communication qubit."""
for i in range(self._qubit_count):
if i not in self._allocated_ids and i in self._comm_qubit_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more comm qubits available")
def allocate_mem(self) -> int:
"""Allocate a memory qubit."""
for i in range(self._qubit_count):
if i not in self._allocated_ids and i not in self._comm_qubit_ids:
self._allocated_ids.add(i)
return i
raise AllocError("No more mem qubits available")
def free(self, id: int) -> None:
self._allocated_ids.remove(id)
def is_allocated(self, id: int) -> bool:
return id in self._allocated_ids
def clear(self) -> None:
self._allocated_ids = {}
class NVPhysicalQuantumMemory(PhysicalQuantumMemory):
def __init__(self, qubit_count: int) -> None:
super().__init__(qubit_count)
self._comm_qubit_ids: Set[int] = {0}
| 13,288 | 3,981 |
from django.apps import AppConfig
class T05Config(AppConfig):
name = 't05'
| 81 | 30 |
import unittest
from datasetio.datasetwriter import DatasetWriter
from datasetio.datasetgenerator import DatasetGenerator
import h5py
import os
import numpy as np
import string
import random
import math
class TestDatasetGenerator(unittest.TestCase):
def setUp(self):
self.feat_length = 5
self.seq_length = 5
self.buffer_size = 5
self.num_rows = 50
self.dataset_file_path = 'test.hdf'
self.dataset_name = 'test'
self.dtypes = [('feat_seq', 'float', (self.seq_length,
self.feat_length)),
('label', 'int'), ('file', h5py.string_dtype())]
self.dataset_writer = DatasetWriter('test', self.num_rows, self.dtypes,
self.dataset_file_path,
self.buffer_size)
self.taken_files = set()
def tearDown(self):
os.remove(self.dataset_file_path)
def initialize_expected_rows(self):
expected_rows = []
for i in range(0, self.num_rows):
zero_features = np.zeros((self.seq_length, self.feat_length))
row = self.generate_row(zero_features, 0, '')
expected_rows.append(row)
return expected_rows
def generate_row(self, features, label, file):
return {'feat_seq': features, 'label': label, 'file': file}
def generate_random_row(self):
features = np.random.rand(self.seq_length, self.feat_length)
label = np.random.randint(2)
letters = string.ascii_lowercase
# Generate a unique file name, i.e. one that hasn't been used in this test yet.
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
while file in self.taken_files:
file = ''.join(random.choice(letters) for i in range(10)) + '.mp4'
self.taken_files.add(file)
return {'feat_seq': features, 'label': label, 'file': file}
def check_db(self, batch_size, expected_rows, shuffle):
gen = DatasetGenerator(self.dataset_file_path,
self.dataset_name,
batch_size,
'feat_seq',
shuffle=shuffle)
gen_features = []
gen_labels = []
for features, labels in gen.generator(1):
gen_features.extend(features.tolist())
gen_labels.extend(labels.tolist())
self.assertEqual(len(expected_rows), len(gen_labels))
for gen_label, gen_features in zip(gen_labels, gen_features):
result = [
row for row in expected_rows if row['label'] == gen_label
and np.array_equal(row['feat_seq'], gen_features)
]
self.assertTrue(result)
def test_full(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
batch_size = 3
self.check_db(batch_size, expected_rows, False)
def test_full_shuffle(self):
expected_rows = self.initialize_expected_rows()
for i in range(0, self.num_rows):
row = self.generate_random_row()
expected_rows[i] = row
self.dataset_writer.add(row)
self.dataset_writer.close()
batch_size = 3
self.check_db(batch_size, expected_rows, True)
if __name__ == '__main__':
unittest.main()
| 3,599 | 1,063 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
class LoggerTests(unittest.TestCase):
def test_logger(self):
import livebridge.config
livebridge.config.LOGFILE = "/dev/null"
from livebridge.logger import logger
self.assertIsInstance(logger, logging.RootLogger)
self.assertIsInstance(logger.handlers[0], logging.StreamHandler)
logger.handlers = []
| 990 | 290 |
from .site import Site
from .civicplus import CivicPlusSite
from .granicus import GranicusSite
SUPPORTED_SITES = {
'granicus': GranicusSite,
'civicplus': CivicPlusSite,
} | 179 | 68 |
"""Test parser functions that converts the incoming json from API into dataclass models."""
from dataclasses import dataclass
from typing import Optional
import pytest
from aiohue.util import dataclass_from_dict
@dataclass
class BasicModelChild:
"""Basic test model."""
a: int
b: str
c: str
d: Optional[int]
@dataclass
class BasicModel:
"""Basic test model."""
a: int
b: float
c: str
d: Optional[int]
e: BasicModelChild
f: str = "default"
def test_dataclass_from_dict():
"""Test dataclass from dict parsing."""
raw = {
"a": 1,
"b": 1.0,
"c": "hello",
"d": 1,
"e": {"a": 2, "b": "test", "c": "test", "d": None},
}
res = dataclass_from_dict(BasicModel, raw)
# test the basic values
assert isinstance(res, BasicModel)
assert res.a == 1
assert res.b == 1.0
assert res.d == 1
# test recursive parsing
assert isinstance(res.e, BasicModelChild)
# test default value
assert res.f == "default"
# test int gets converted to float
raw["b"] = 2
res = dataclass_from_dict(BasicModel, raw)
assert res.b == 2.0
# test string doesn't match int
with pytest.raises(TypeError):
raw2 = {**raw}
raw2["a"] = "blah"
dataclass_from_dict(BasicModel, raw2)
# test missing key result in keyerror
with pytest.raises(KeyError):
raw2 = {**raw}
del raw2["a"]
dataclass_from_dict(BasicModel, raw2)
# test extra keys silently ignored in non-strict mode
raw2 = {**raw}
raw2["extrakey"] = "something"
dataclass_from_dict(BasicModel, raw2, strict=False)
# test extra keys not silently ignored in strict mode
with pytest.raises(KeyError):
dataclass_from_dict(BasicModel, raw2, strict=True)
| 1,814 | 602 |
from datetime import datetime
import pytz
import zeit.cms.content.dav
import zeit.cms.interfaces
import zeit.cms.workflow.interfaces
import zope.component
import zope.dublincore.interfaces
import zope.interface
MIN_DATE = datetime.min.replace(tzinfo=pytz.UTC)
@zope.component.adapter(zeit.cms.interfaces.ICMSContent)
@zope.interface.implementer(zeit.cms.workflow.interfaces.IModified)
class Modified(zeit.cms.content.dav.DAVPropertiesAdapter):
zeit.cms.content.dav.mapProperties(
zeit.cms.workflow.interfaces.IModified,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('last_modified_by', 'date_last_checkout'))
@property
def date_last_modified(self):
dc = zope.dublincore.interfaces.IDCTimes(self.context, None)
if dc is not None:
return dc.modified
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.checkout.interfaces.IBeforeCheckinEvent)
def update_last_modified_by(context, event):
modified = zeit.cms.workflow.interfaces.IModified(context, None)
if modified is None:
return
zope.security.proxy.removeSecurityProxy(modified).last_modified_by = (
event.principal.id)
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.checkout.interfaces.IAfterCheckoutEvent)
def update_date_last_checkout(context, event):
modified = zeit.cms.workflow.interfaces.IModified(context, None)
if modified is None:
return
zope.security.proxy.removeSecurityProxy(modified).date_last_checkout = (
datetime.now(pytz.UTC))
@zope.component.adapter(
zope.interface.Interface,
zeit.cms.workflow.interfaces.IBeforePublishEvent)
def update_date_last_published_semantic(context, event):
published = zeit.cms.workflow.interfaces.IPublishInfo(context)
date_last_published_semantic = (
published.date_last_published_semantic or MIN_DATE)
lsc = zeit.cms.content.interfaces.ISemanticChange(context)
last_semantic_change = lsc.last_semantic_change or MIN_DATE
if last_semantic_change > date_last_published_semantic:
published.date_last_published_semantic = published.date_last_published
| 2,155 | 728 |
#!/usr/bin/env python
"""
This analyzes imitate observe behavioural data.It could be generalized
to analyze any rapid event-related design experiment fairly easily.
Usage:
dm_proc_imob.py [options] <study>
Arguments:
<study> Name of study in system-wide configuration file.
Options:
--subject SUBJID If given, run on a single subject
--debug Debug logging
DETAILS
1) Produces AFNI and FSL-compatible GLM timing files.
2) Runs an AFNI GLM analysis at the single-subject level.
Each subject is run through this pipeline if the outputs do not already exist.
Requires dm-proc-fmri.py to be complete for each subject.
DEPENDENCIES
+ afni
"""
import datman.utils as utils
import datman.config as cfg
from docopt import docopt
import glob
import logging
import os, sys
import tempfile
import time
import yaml
logging.basicConfig(level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s")
logger = logging.getLogger(os.path.basename(__file__))
def check_complete(directory, subject):
"""Checks to see if the output files have been created.
Returns True if the files exist
"""
expected_files = ['{}_glm_IM_1stlvl_MNI-nonlin.nii.gz',
'{}_glm_OB_1stlvl_MNI-nonlin.nii.gz']
for filename in expected_files:
if not os.path.isfile(os.path.join(directory, subject, filename.format(subject))):
return False
return True
def generate_analysis_script(subject, inputs, input_type, config, study):
"""
This writes the analysis script to replicate the methods in [insert paper
here]. It expects timing files to exist (these are static, and are generated
by 'imob-parse.py').
Briefly, this is a standard rapid-event related design. We use 5 tent
functions to explain each event over a 15 second window (this is the
standard length of the HRF).
Returns the path to the script that was generated or None if there was an
error.
"""
assets = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets')
study_base = config.get_study_base(study)
subject_dir = os.path.join(study_base, config.get_path('fmri'), 'imob', subject)
script = '{subject_dir}/{subject}_glm_1stlevel_{input_type}.sh'.format(
subject_dir=subject_dir, subject=subject, input_type=input_type)
IM_data = filter(lambda x: '_IMI_' in x, inputs[input_type])[0]
OB_data = filter(lambda x: '_OBS_' in x, inputs[input_type])[0]
f = open(script, 'wb')
f.write("""#!/bin/bash
#
# Contrasts: emotional faces vs. fixation, emotional faces vs. neutral faces.
# use the 'bucket' dataset (*_1stlevel.nii.gz) for group level analysis.
#
# Imitate GLM for {subject}.
3dDeconvolve \\
-input {IM_data} \\
-mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
-ortvec {subject_dir}/PARAMS/motion.*.01.1D motion_paramaters \\
-polort 4 \\
-num_stimts 6 \\
-local_times \\
-jobs 4 \\
-x1D {subject_dir}/{subject}_glm_IM_1stlevel_design_{input_type}.mat \\
-stim_label 1 IM_AN -stim_times 1 {assets}/IM_event-times_AN.1D \'BLOCK(1,1)\' \\
-stim_label 2 IM_FE -stim_times 2 {assets}/IM_event-times_FE.1D \'BLOCK(1,1)\' \\
-stim_label 3 IM_FX -stim_times 3 {assets}/IM_event-times_FX.1D \'BLOCK(1,1)\' \\
-stim_label 4 IM_HA -stim_times 4 {assets}/IM_event-times_HA.1D \'BLOCK(1,1)\' \\
-stim_label 5 IM_NE -stim_times 5 {assets}/IM_event-times_NE.1D \'BLOCK(1,1)\' \\
-stim_label 6 IM_SA -stim_times 6 {assets}/IM_event-times_SA.1D \'BLOCK(1,1)\' \\
-gltsym 'SYM: -1*IM_FX +0*IM_NE +0.25*IM_AN +0.25*IM_FE +0.25*IM_HA +0.25*IM_SA' \\
-glt_label 1 emot-fix \\
-gltsym 'SYM: +0*IM_FX -1*IM_NE +0.25*IM_AN +0.25*IM_FE +0.25*IM_HA +0.25*IM_SA' \\
-glt_label 2 emot-neut \\
-fitts {subject_dir}/{subject}_glm_IM_1stlvl_explained_{input_type}.nii.gz \\
-errts {subject_dir}/{subject}_glm_IM_1stlvl_residuals_{input_type}.nii.gz \\
-bucket {subject_dir}/{subject}_glm_IM_1stlvl_{input_type}.nii.gz \\
-cbucket {subject_dir}/{subject}_glm_IM_1stlvl_allcoeffs_{input_type}.nii.gz \\
-fout -tout -xjpeg {subject_dir}/{subject}_glm_IM_1stlevel_design_{input_type}.jpg
# Obserse GLM for {subject}.
3dDeconvolve \\
-input {OB_data} \\
-mask {subject_dir}/anat_EPI_mask_MNI-nonlin.nii.gz \\
-ortvec {subject_dir}/PARAMS/motion.*.02.1D motion_paramaters \\
-polort 4 \\
-num_stimts 6 \\
-local_times \\
-jobs 4 \\
-x1D {subject_dir}/{subject}_glm_OB_1stlevel_design_{input_type}.mat \\
-stim_label 1 OB_AN -stim_times 1 {assets}/OB_event-times_AN.1D \'BLOCK(1,1)\' \\
-stim_label 2 OB_FE -stim_times 2 {assets}/OB_event-times_FE.1D \'BLOCK(1,1)\' \\
-stim_label 3 OB_FX -stim_times 3 {assets}/OB_event-times_FX.1D \'BLOCK(1,1)\' \\
-stim_label 4 OB_HA -stim_times 4 {assets}/OB_event-times_HA.1D \'BLOCK(1,1)\' \\
-stim_label 5 OB_NE -stim_times 5 {assets}/OB_event-times_NE.1D \'BLOCK(1,1)\' \\
-stim_label 6 OB_SA -stim_times 6 {assets}/OB_event-times_SA.1D \'BLOCK(1,1)\' \\
-gltsym 'SYM: -1*OB_FX +0*OB_NE +0.25*OB_AN +0.25*OB_FE +0.25*OB_HA +0.25*OB_SA' \\
-glt_label 1 emot-fix \\
-gltsym 'SYM: +0*OB_FX -1*OB_NE +0.25*OB_AN +0.25*OB_FE +0.25*OB_HA +0.25*OB_SA' \\
-glt_label 2 emot-neut \\
-fitts {subject_dir}/{subject}_glm_OB_1stlvl_explained_{input_type}.nii.gz \\
-errts {subject_dir}/{subject}_glm_OB_1stlvl_residuals_{input_type}.nii.gz \\
-bucket {subject_dir}/{subject}_glm_OB_1stlvl_{input_type}.nii.gz \\
-cbucket {subject_dir}/{subject}_glm_OB_1stlvl_allcoeffs_{input_type}.nii.gz \\
-fout -tout -xjpeg {subject_dir}/{subject}_glm_OB_1stlevel_design_{input_type}.jpg
""".format(IM_data=IM_data, OB_data=OB_data, subject_dir=subject_dir, assets=assets,
subject=subject, input_type=input_type))
f.close()
return script
def get_inputs(files, config):
"""
finds the inputs for the imob experiment (one IMI and one OBS file,
respectively) for each epitome stage seperately.
"""
inputs = {}
for exported in config.study_config['fmri']['imob']['glm']:
candidates = filter(lambda x: '{}.nii.gz'.format(exported) in x, files)
tagged_candidates = []
for tag in config.study_config['fmri']['imob']['tags']:
tagged_candidates.extend(filter(lambda x: '_{}_'.format(tag) in x, candidates))
if len(tagged_candidates) == 2:
inputs[exported] = tagged_candidates
else:
raise Exception(candidates)
return inputs
def main():
"""
Loops through subjects, preprocessing using supplied script, and runs a
first-level GLM using AFNI (tent functions, 15 s window) on all subjects.
"""
arguments = docopt(__doc__)
study = arguments['<study>']
subject = arguments['--subject']
debug = arguments['--debug']
logging.info('Starting')
if debug:
logger.setLevel(logging.DEBUG)
# load config for study
try:
config = cfg.config(study=study)
except ValueError:
logger.error('study {} not defined'.format(study))
sys.exit(1)
study_base = config.get_study_base(study)
imob_dir = os.path.join(study_base, config.get_path('fmri'), 'imob')
# process a single subject
if subject:
# get required inputs from each
files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
inputs = get_inputs(files, config)
# check if subject has already been processed
if check_complete(imob_dir, subject):
logger.info('{} already analysed'.format(subject))
sys.exit(0)
# first level GLM for inputs
for input_type in inputs.keys():
script = generate_analysis_script(subject, inputs, input_type, config, study)
rtn, out = utils.run('chmod 754 {}'.format(script))
rtn, out = utils.run(script)
if rtn:
logger.error('Script {} failed to run on subject {} with error:\n{}'.format(
script, subject, out))
sys.exit(1)
# process all subjects
else:
commands = []
for path in glob.glob('{}/*'.format(imob_dir)):
subject = os.path.basename(path)
# add subject if any of the expected outputs do not exist
files = glob.glob(os.path.join(imob_dir, subject) + '/*.nii.gz')
try:
inputs = get_inputs(files, config)
except:
logger.debug('Invalid inputs for {}'.format(subject))
continue
expected = inputs.keys()
for exp in expected:
if not filter(lambda x: '{}_glm_IM_1stlvl_{}'.format(subject, exp) in x, files):
commands.append(" ".join([__file__, study, '--subject {}'.format(subject)]))
break
if commands:
logger.debug("queueing up the following commands:\n"+'\n'.join(commands))
#fd, path = tempfile.mkstemp()
#os.write(fd, '\n'.join(commands))
#os.close(fd)
for i, cmd in enumerate(commands):
jobname = "dm_imob_{}_{}".format(i, time.strftime("%Y%m%d-%H%M%S"))
jobfile = '/tmp/{}'.format(jobname)
logfile = '/tmp/{}.log'.format(jobname)
errfile = '/tmp/{}.err'.format(jobname)
with open(jobfile, 'wb') as fid:
fid.write('#!/bin/bash\n')
fid.write(cmd)
rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(
logfile, errfile, jobname, jobfile))
#rtn, out, err = utils.run('qbatch -i --logdir {logdir} -N {name} --walltime {wt} {cmds}'.format(logdir = log_path, name = jobname, wt = walltime, cmds = path))
if rtn:
logger.error("Job submission failed. Output follows.")
logger.error("stdout: {}".format(out))
sys.exit(1)
if __name__ == "__main__":
main()
| 10,141 | 3,670 |
from pathlib import Path
from release_helper.commands.directory_check_empty import cmd
def test_with_does_not_exist(runner):
result = runner.invoke(cmd, ["folder"])
assert result.exit_code == 0
assert result.stdout == ""
assert result.stderr == ""
def test_with_file(runner):
Path("file").write_text("I exist.\n")
result = runner.invoke(cmd, ["file"])
assert result.exit_code == 1
assert result.stdout == ""
assert result.stderr == "file is not a directory.\n"
def test_with_empty(runner):
Path("folder").mkdir()
result = runner.invoke(cmd, ["folder"])
assert result.exit_code == 0
assert result.stdout == ""
assert result.stderr == ""
def test_with_non_empty_with_one_file(runner):
Path("folder").mkdir()
Path("folder/file.txt").write_text("I exist.\n")
result = runner.invoke(cmd, ["folder"])
assert result.exit_code == 1
assert result.stdout == ""
assert result.stderr == "folder is not empty.\n"
def test_with_non_empty_with_file_and_subfolder_with_delete(runner):
Path("folder").mkdir()
Path("folder/file.txt").write_text("I exist.\n")
Path("folder/subfolder").mkdir()
Path("folder/subfolder/file.txt").write_text("I exist.\n")
result = runner.invoke(cmd, ["folder", "--delete"])
assert result.exit_code == 0
assert result.stdout == ""
assert result.stderr == ""
assert not Path("folder/file.txt").exists()
assert not Path("folder/subfolder").exists()
def test_with_non_empty_with_many_files(runner):
Path("folder").mkdir()
Path("folder/file.txt").write_text("I exist.\n")
Path("folder/file2.txt").write_text("And me too.\n")
result = runner.invoke(cmd, ["folder"])
assert result.exit_code == 1
assert result.stdout == ""
assert result.stderr == "folder is not empty.\n"
def test_with_two_empty_directories(runner):
Path("folder-1").mkdir()
Path("folder-2").mkdir()
result = runner.invoke(cmd, ["folder-1", "folder-2"])
assert result.exit_code == 0
assert result.stdout == ""
assert result.stderr == ""
def test_with_four_directories_with_two_non_empty(runner):
Path("folder-1").mkdir()
Path("folder-2").mkdir()
Path("folder-3").mkdir()
Path("folder-1/file.txt").write_text("I exist.\n")
Path("folder-3/file.txt").write_text("I exist.\n")
result = runner.invoke(cmd, ["folder-1", "folder-2", "folder-3", "folder-4"])
assert result.exit_code == 1
assert result.stdout == ""
assert result.stderr == ("folder-1 is not empty.\n" "folder-3 is not empty.\n")
| 2,598 | 869 |
# -*- coding: utf-8 -*-
import os
import sys
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.common import common_client
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import pb
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(
os.environ.get("TENCENTCLOUD_SECRET_ID"),
os.environ.get("TENCENTCLOUD_SECRET_KEY"))
# 实例化一个http选项,可选的,没有特殊需求可以跳过。
httpProfile = HttpProfile()
httpProfile.reqMethod = "POST" # post请求(默认为post请求)
httpProfile.reqTimeout = 30 # 请求超时时间,单位为秒(默认60秒)
httpProfile.endpoint = "cls.tencentcloudapi.com" # 指定接入地域域名(默认就近接入)
httpProfile.keepAlive = True
# 实例化一个client选项,可选的,没有特殊需求可以跳过。
clientProfile = ClientProfile()
clientProfile.signMethod = "TC3-HMAC-SHA256" # 指定签名算法(默认为HmacSHA256)
clientProfile.httpProfile = httpProfile
client = common_client.CommonClient("cls", '2020-10-16', cred, "ap-beijing", clientProfile)
headers = {
# 使用对应地域下真实存在的日志主题ID
"X-CLS-TopicId": "xxxxf2e2-166c-4174-9473-b6a6dfca6f6e",
# 主题分区,https://cloud.tencent.com/document/product/614/39259
# 取值00000000000000000000000000000000,ffffffffffffffffffffffffffffffff
"X-CLS-HashKey": "0fffffffffffffffffffffffffffffff",
# 压缩类型
"X-CLS-CompressType": "",
}
resp = client.call_octet_stream("UploadLog", headers, pb.pb_gen(1,1))
# 输出json格式的字符串回包
print("%s" % resp)
except TencentCloudSDKException as err:
print("%s" % err)
| 1,713 | 776 |
import re
import pandas as pd
from scrapy import Spider, Request
from bs4 import BeautifulSoup
from collectors.loaders import CommitteeSpeechLoader
from collectors.utils.constants import (
COMMITTEES_SCHEDULE_PATH,
SPEECH_SPEAKER_PATTERN,
COMMITTEE_SPEECH_URL
)
class CommitteeSpeechSpider(Spider):
name = 'coletor-discursos-comissoes'
custom_settings = {
'FEED_EXPORT_FIELDS': ['id_evento', 'ordem_discurso', 'orador',
'transcricao']
}
def __init__(self, year=None):
events = pd.read_csv(COMMITTEES_SCHEDULE_PATH)
if year:
events = events[events['data'].str.contains(year)]
event_ids = events['id_evento'].drop_duplicates().values.tolist()
self.event_ids = event_ids
def start_requests(self):
for event_id in self.event_ids:
query = {'event_id': event_id}
url = COMMITTEE_SPEECH_URL.format_map(query)
yield Request(url=url, callback=self.parse, meta=query)
def parse(self, response):
body = response.css('body').get()
speeches = BeautifulSoup(body, 'html.parser').get_text()
sections = re.split(SPEECH_SPEAKER_PATTERN, speeches)[1:]
if sections:
section_order = range(0, len(sections), 2)
event_id = response.meta['event_id']
for order in section_order:
loader = CommitteeSpeechLoader()
loader.add_value('id_evento', event_id)
loader.add_value('ordem_discurso', (order // 2) + 1)
loader.add_value('orador', sections[order])
loader.add_value('transcricao', sections[order + 1])
yield loader.load_item()
| 1,746 | 570 |
# Copyright (c) 2009-2022 fem2ufo
#
# Python stdlib imports
from dataclasses import dataclass
from array import array
from collections import Counter
from collections.abc import Mapping
from math import dist
from typing import NamedTuple, Tuple, List, Iterator, Iterable, Union, Dict
from itertools import chain
# package imports
from steelpy.f2uModel.mesh.sqlite.nodes import get_node
from steelpy.material.matsql import get_materialSQL
from steelpy.sections.main import get_sectionSQL
from steelpy.f2uModel.results.sqlite.operation.process_sql import create_connection, create_table
from steelpy.trave3D.preprocessor.assemble import (beam_stiffness, beam_Ks,
trans_3d_beam, Rmatrix)
#
#
@dataclass
class BeamElement:
""" """
__slots__ = ['name', 'db_file', 'type']
def __init__(self, element_name:int, db_file:str) -> None:
"""
"""
self.name = element_name
self.db_file = db_file
self.type: str = "beam"
#
@property
def number(self) -> int:
""" """
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[1]
@number.setter
def number(self, value:int) -> None:
""""""
1/0
conn = create_connection(self.db_file)
item = "number"
with conn:
update_element_item(conn, self.name, item, value)
#
@property
def connectivity(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
connodes = get_connectivity(conn, self.name)
return connodes
@connectivity.setter
def connectivity(self, nodes:List[int]) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
#push_connectivity(conn, self.name, nodes)
update_connectivity(conn, self.name, nodes)
#self._connectivity[self.index] = nodes
#
@property
def material(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[4]
@material.setter
def material(self, material_name: str) -> None:
"""
"""
conn = create_connection(self.db_file)
item = "material"
with conn:
update_element_item(conn, self.name, item, material_name)
#
@property
def section(self) -> List:
"""
"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[5]
@section.setter
def section(self, section_name: str) -> None:
"""
"""
conn = create_connection(self.db_file)
item = "section"
with conn:
update_element_item(conn, self.name, item, self.name)
#
@property
def beta(self):
"""beta angle roll"""
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
return data[3]
@beta.setter
def beta(self, value):
"""beta angle roll"""
conn = create_connection(self.db_file)
item = "roll_angle"
with conn:
update_element_item(conn, self.name, item, self.name)
#
#
def __str__(self) -> str:
""" """
conn = create_connection(self.db_file)
with conn:
data = get_element_data(conn, self.name)
#title = data[-1]
if (title := data[-1]) == "NULL":
title = ""
#
return "{:8d} {:8d} {:8d} {:>12s} {:>12s} {: 6.4f} {:>6.3f} {:>12s}\n"\
.format(self.name, *self.connectivity,
self.material, self.section, self.beta,
self.length, title)
#
#
@property
def DoF(self) -> List[ int ]:
"""
"""
conn = create_connection(self.db_file)
dof = [ ]
for node_name in self.connectivity:
node = get_node(conn, node_name=node_name)
number = node[0] - 1
dof.append(number) # * 6
return dof
#
@property
def length(self) -> float:
"""
"""
conn = create_connection(self.db_file)
nodes = self.connectivity
node1 = get_node(conn, node_name=nodes[0])
node2 = get_node(conn, node_name=nodes[1])
# _dx = _node1.x - _node2.x
# _dy = _node1.y - _node2.y
# _dz = _node1.z - _node2.z
# dist2 = (_dx * _dx + _dy * _dy + _dz * _dz)**0.50
return dist(node1[3:6], node2[3:6])
#
@property
def unit_vector(self) -> List[ float ]:
"""
"""
# TODO: get_node should be aligned with inmemmory
conn = create_connection(self.db_file)
node1 = get_node(conn, node_name=self.connectivity[0])
node2 = get_node(conn, node_name=self.connectivity[1])
dx = node2[3] - node1[3]
dy = node2[4] - node1[4]
dz = node2[5] - node1[5]
# direction cosines
L = dist(node1[3:6], node2[3:6])
l = dx / L
m = dy / L
n = dz / L
return [l, m, n]
#
@property
def Kmatrix(self):
""" """
#conn = create_connection(self.db_file)
material, section, beta = self._K_data()
#section = get_sectionSQL(conn, self.section)
#material = get_materialSQL(conn, self.material)
# solve K matrix
R = Rmatrix(*self.unit_vector, beta)
# R = Rmatrix(*self.direction_cosines, self.beta)
# K = beam_stiffness(self.length,
# section.area,
# section.J,
# section.Iy,
# section.Iz,
# material.E,
# material.G)
K = beam_Ks(self.length,
section.area, section.J,
section.Iy, section.Iz,
material.E, material.G,
section.area, section.area)
return trans_3d_beam(K, R)
#
def _K_data(self):
""" """
conn = create_connection(self.db_file)
cur = conn.cursor()
cur.execute ("SELECT * FROM tb_Elements\
WHERE tb_Elements.name = {:};".format(self.name))
row = cur.fetchone()
#
#connodes = get_connectivity(conn, self.name)
#data = [*row[4:], connodes]
material = get_materialSQL(conn, row[4])
section = get_sectionSQL(conn, row[5])
beta = row[6]
conn.close()
return material, section, beta
#
@property
def R(self):
"""
Rotation matrix
"""
if self.type in ['beam', 'truss']:
return Rmatrix(*self.unit_vector, self.beta)
else:
raise IOError("no yet included")
#
#
#
#
class ElementSQL(Mapping):
__slots__ = ['db_file', '_labels']
def __init__(self, db_file:str,
db_system:str="sqlite") -> None:
"""
"""
self.db_file = db_file
self._labels: array = array('I', [])
# create node table
self._create_table()
#
def __setitem__(self, element_number: int, parameters: List) -> None:
"""
parameters = ['beam', node1, node2, material, section, roll_angle]
"""
try:
self._labels.index(element_number)
raise Exception('element {:} already exist'.format(element_number))
except ValueError:
# default
self._labels.append(element_number)
# push to SQL
conn = create_connection(self.db_file)
with conn:
self.push_element(conn, element_number, parameters)
conn.commit()
#
def __getitem__(self, element_number: int):
""" """
try:
self._labels.index(element_number)
return BeamElement(element_number, self.db_file)
except ValueError:
raise IndexError(' ** element {:} does not exist'.format(element_number))
#
#
def __len__(self) -> float:
return len(self._labels)
def __iter__(self) -> Iterator:
"""
"""
return iter(self._labels)
def __contains__(self, value) -> bool:
return value in self._labels
#
#
def push_element(self, conn, element_number, parameters):
""" """
cur = conn.cursor()
cur.execute("SELECT tb_Materials.name, tb_Materials.number FROM tb_Materials;")
materials = cur.fetchall()
materials = {item[0]:item[1] for item in materials}
#
#cur = conn.cursor()
cur.execute("SELECT tb_Sections.name, tb_Sections.number FROM tb_Sections;")
sections = cur.fetchall()
sections = {item[0]:item[1] for item in sections}
# connectivity
push_connectivity(conn, element_number, parameters[1:3])
#
#try:
roll_angle = parameters[5]
#except IndexError:
# roll_angle = 0.0
#print('-->')
if (title := parameters[6]) == "NULL":
title = None
#
project = (element_number, title,
parameters[0],
materials[parameters[3]],
sections[parameters[4]],
roll_angle)
#
sql = 'INSERT INTO tb_Elements(name, title, type, material, section,\
roll_angle)\
VALUES(?,?,?,?,?,?)'
#cur = conn.cursor()
cur.execute(sql, project)
#
def _create_table(self) -> None:
""" """
_table_elements = "CREATE TABLE IF NOT EXISTS tb_Elements(\
number INTEGER PRIMARY KEY NOT NULL,\
name INTEGER NOT NULL,\
title TEXT,\
type TEXT NOT NULL,\
material INTEGER NOT NULL REFERENCES tb_Materials(number),\
section INTEGER NOT NULL REFERENCES tb_Sections(number),\
roll_angle DECIMAL);"
#
_table_connectivity = "CREATE TABLE IF NOT EXISTS tb_Connectivity(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
node_name INTEGER REFERENCES tb_Nodes(name),\
node_end INTEGER NOT NULL);"
#
_table_univectors = "CREATE TABLE IF NOT EXISTS tb_DirectionCosines(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
type TEXT NOT NULL);"
#
_table_offset = "CREATE TABLE IF NOT EXISTS tb_Eccentricities(\
number INTEGER PRIMARY KEY NOT NULL,\
element_name INTEGER NOT NULL REFERENCES tb_Elements(name),\
node_name INTEGER REFERENCES tb_Nodes(name),\
node_end INTEGER NOT NULL,\
system TEXT NOT NULL,\
x DECIMAL,\
y DECIMAL,\
z DECIMAL);"
#
conn = create_connection(self.db_file)
create_table(conn, _table_elements)
create_table(conn, _table_connectivity)
create_table(conn, _table_offset)
create_table(conn, _table_univectors)
#
#def iter_elements(self, arraysize=1000):
# """
# """
# conn = create_connection(self.db_file)
# cur = conn.cursor()
# # TODO: check if direction cosines given
# cur.execute("SELECT tb_Elements.name, tb_Elements.number, tb_Elements.type,\
# tb_Elements.roll_angle, tb_Elements.material, tb_Elements.section\
# FROM tb_Elements;" )
# #
# try:
# while True:
# elements = cur.fetchmany(arraysize)
# if not elements:
# break
# for element in elements:
# #cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
# # FROM tb_Connectivity\
# # WHERE tb_Connectivity.element_name = {:};".format(element[0]))
# #row = cur.fetchall()
# #connodes = [x for _, x in sorted(row)]
# connodes = get_connectivity(conn, element[0])
# data = [*element[0:6], connodes, self.db_file]
# yield BeamElement(data)
# except Exception as e:
# print(e)
# finally:
# conn.close()
#
@property
def get_connectivities(self):
""" """
conn = create_connection(self.db_file)
cur = conn.cursor()
cur.execute( "SELECT tb_Elements.name FROM tb_Elements;")
elements = cur.fetchall()
connodes = []
for element in elements:
#cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
# FROM tb_Connectivity\
# WHERE tb_Connectivity.element_name = {:};".format(member[0]))
#row = cur.fetchall()
#connodes.append([x for _,x in sorted(row)])
connodes.append(get_connectivity(conn, element[0]))
conn.close()
return connodes
#
#
def get_number(self, start:int=0)-> Iterable[int]:
"""
"""
try:
n = max(self._labels)
except ValueError:
n = start
#
while True:
n += 1
yield n
#
#
def update_item(self, element_number:int, item:str, value:Union[float,int]):
""" """
conn = create_connection(self.db_file)
with conn:
update_element_item(conn, element_number, item, value)
#conn.commit()
#
@property
def get_free_nodes(self):
"""
find nodes not sharing elements
"""
connectivities = self.get_connectivities
#connectivities = [conn for conn in connectivities.values()]
#column
flat = list(chain.from_iterable(connectivities))
return [k for k, v in Counter(flat).items() if v == 1]
#
#
def get_connectivity(conn, element_name):
""" """
cur = conn.cursor()
cur.execute("SELECT tb_Connectivity.node_end, tb_Connectivity.node_name\
FROM tb_Connectivity\
WHERE tb_Connectivity.element_name = {:};".format(element_name))
connodes = cur.fetchall()
return [x for _, x in sorted(connodes)]
#return connodes
#
def push_connectivity(conn, element_name, connectivity):
"""
"""
cur = conn.cursor()
for x, node in enumerate(connectivity):
project = (element_name, node, x+1)
sql = 'INSERT INTO tb_Connectivity(element_name,\
node_name, node_end)\
VALUES(?,?,?)'
cur.execute(sql, project)
#return cur.lastrowid
#
def update_connectivity(conn, element_name, connectivity):
"""
"""
cur = conn.cursor()
for x, node in enumerate(connectivity):
project = (node, element_name, x+1)
sql = 'UPDATE tb_Connectivity SET node_name = ? \
WHERE element_name = ?\
AND node_end = ?'
cur.execute(sql, project)
#return cur.lastrowid
#
#
def update_element_item(conn, name, item, value):
""" """
project = (value, name)
sql = 'UPDATE tb_Elements SET {:} = ? WHERE name = ?'.format(item)
cur = conn.cursor()
cur.execute(sql, project)
#
#
def get_element_data(conn, element_name):
""" """
cur = conn.cursor()
cur.execute ("SELECT tb_Elements.name, tb_Elements.number, tb_Elements.type,\
tb_Elements.roll_angle, tb_Materials.name, tb_Sections.name, tb_Elements.title\
FROM tb_Elements, tb_Materials, tb_Sections\
WHERE tb_Elements.name = {:} \
AND tb_Elements.material = tb_Materials.number \
AND tb_Elements.section = tb_Sections.number;".format(element_name))
row = cur.fetchone()
#
connodes = get_connectivity(conn, element_name)
data = [*row[:6], connodes, row[-1]]
#conn.close ()
return data
#
# | 16,823 | 4,995 |
# Batch convert LF to '\n' and vice versa
# Tool of csv2po.py
# By Tom CHEN <tomchen.org@gmail.com> (tomchen.org)
import re
from pathlib import Path
from getfilepaths import getFilePaths
def convertLf(inputPath, outputPath, encoding = None, SlashNTolf = True): # check LF (non CRLF)
f = inputPath.open(mode = 'r', newline = '\r\n', encoding = encoding)
content = f.read()
f.close()
if SlashNTolf:
content = re.sub(r'\\n', '\n', content)
else:
content = re.sub('(?<!\r)\n', r'\\n', content)
outputPath.parent.mkdir(parents = True, exist_ok = True)
fo = outputPath.open(mode = 'w', newline = '', encoding = encoding)
fo.write(content)
fo.close()
def batchConvertLf(inputPath, outputPath, SlashNTolf = True, extension = 'txt', encoding = 'UTF-8'):
for p in getFilePaths(inputPath, extension = extension):
convertLf(p, outputPath.joinpath(p.relative_to(inputPath)), encoding, SlashNTolf)
# batchConvertLf(inputPath = Path('0_source/zh_CN/customlist/t'), outputPath = Path('0_source/zh_CN/customlist/t2'), SlashNTolf = False, extension = 'list', encoding = 'UTF-8')
batchConvertLf(inputPath = Path('0_source/zh_CN/customlist/t2'), outputPath = Path('0_source/zh_CN/customlist/t3'), SlashNTolf = True, extension = 'list', encoding = 'UTF-8')
| 1,256 | 458 |
import logging
import os
import time
from glob import glob
import cv2 as cv
import numpy as np
import pandas as pd
import torch
from src.semantic_segmentation.loaders import GTDataset, RGBIncrementalDataset
from src.semantic_segmentation.trainer import Trainer
from src.semantic_segmentation.utils.losses import CrossEntropy2d
from src.semantic_segmentation.utils.metrics import IoU, accuracy, f1_score
from tqdm import tqdm
class ClassicTrainer(Trainer):
def __init__(self, cfg, train=True, dataset=None):
super(ClassicTrainer, self).__init__(cfg)
if train:
self.train_dataset = RGBIncrementalDataset(dataset, self.cfg, finetune=False)
self.gt_dataset = GTDataset(dataset, self.cfg, self.train_dataset.train_ids)
logging.info(f"Train ids (len {len(self.train_dataset.imgs)}): {[os.path.basename(i) for i in self.train_dataset.imgs]}"
)
self.dataset = dataset
test_dataset = RGBIncrementalDataset(dataset, self.cfg, train=False, finetune=False)
logging.info(
f"Test ids (len {len(test_dataset.imgs)}): {[os.path.basename(i) for i in test_dataset.imgs]}"
)
self.metrics = pd.DataFrame(data={i:[] for i in [os.path.basename(i) for i in test_dataset.imgs]}).T
def train(self, epochs):
"""Train the network"""
# Initialization
logging.info(
"%s INFO: Begin training",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
)
iter_ = 0
start_epoch, accu, iou, f1, train_loss, test_loss, losses = self._load_init()
loss_weights = torch.ones(
self.cfg.N_CLASSES, dtype=torch.float32, device=self.device
)
if self.cfg.WEIGHTED_LOSS:
weights = self.gt_dataset.compute_frequency()
loss_weights = (
torch.from_numpy(weights).type(torch.FloatTensor).to(self.device)
)
train_loader = self.train_dataset.get_loader(
self.cfg.BATCH_SIZE, self.cfg.WORKERS
)
for e in tqdm(range(start_epoch, epochs + 1), total=epochs):
logging.info(
"\n%s Epoch %s",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
e,
)
self.scheduler.step()
self.net.train()
steps_pbar = tqdm(
train_loader, total=self.cfg.EPOCH_SIZE // self.cfg.BATCH_SIZE
)
for data in steps_pbar:
features, labels = data
self.optimizer.zero_grad()
features = features.float().to(self.device)
labels = labels.float().to(self.device)
output = self.net(features)
if isinstance(output, tuple):
output, _, _ = output
loss = CrossEntropy2d(output, labels, weight=loss_weights)
loss.backward()
self.optimizer.step()
losses.append(loss.item())
iter_ += 1
steps_pbar.set_postfix({"loss": loss.item()})
train_loss.append(np.mean(losses[-1 * self.cfg.EPOCH_SIZE :]))
logging.info(f"Train loss: {train_loss}")
loss, iou_, acc_, f1_ = self.test()
test_loss.append(loss)
accu.append(acc_)
iou.append(iou_ * 100)
f1.append(f1_ * 100)
# Save final state
name = "_".join([os.path.join(self.cfg.PATH_MODELS, self.net_name), os.path.basename(self.dataset), f"{self.cfg.ext}.pt"])
self.save_to_jit(name)
def test(self):
logging.info(
"%s INFO: Begin testing",
time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime()),
)
csv_name = "{}_{}{}.csv".format(os.path.join(self.cfg.SAVE_FOLDER, self.cfg.NET_NAME), os.path.basename(self.dataset), self.cfg.ext)
self.net.eval()
loss, acc, iou, f1 = (
[],
[],
[],
[],
) # will contain the metric and loss calculated for each image
test_dataset = RGBIncrementalDataset(self.dataset, self.cfg, train=False, finetune=False)
test_images = test_dataset.get_loader(1, self.cfg.TEST_WORKERS)
stride = self.cfg.STRIDE
for iteration, (idx, data) in enumerate(tqdm(zip(test_dataset.test_ids, test_images), total=len(test_dataset.test_ids))):
file_name = os.path.basename(sorted(glob(os.path.join(self.dataset, "gts", '*')))[idx])
logging.info("Filename: %s", file_name)
data = [i.squeeze(0) for i in data]
img = data[:-1][0]
gt = data[-1].cpu().numpy()
pred_ = self._infer_image(stride, img, self.net, self.cfg.N_CLASSES)
# Computes the class with the highest probability
pred = np.argmax(pred_, axis=-1)
# Compute the metrics
ignore_indx = None
metric_acc = accuracy(pred, gt, ignore_indx=ignore_indx)
metric_iou = IoU(pred, gt, self.cfg.N_CLASSES, all_iou=True, ignore_indx=ignore_indx)
metric_f1 = f1_score(pred, gt, self.cfg.N_CLASSES, all=True, ignore_indx=ignore_indx)
metric_iou, all_iou = metric_iou
metric_f1, all_f1, weighted_f1 = metric_f1
acc.append(metric_acc)
iou.append(metric_iou)
f1.append(metric_f1)
logging.info("Mean IoU : " + str(np.nanmean(iou)))
logging.info("Mean accu : " + str(np.nanmean(acc)))
logging.info("Mean F1 : " + str(np.nanmean(f1)))
return np.mean(loss), np.nanmean(iou), np.mean(acc), np.mean(f1)
def _load_init(self):
start_epoch = 1
train_loss = []
test_loss = []
losses = []
accu = []
iou = []
f1 = []
return start_epoch, accu, iou, f1, train_loss, test_loss, losses
| 5,939 | 1,959 |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
class User(AbstractUser):
from_user = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="to_user")
def get_score(self):
return round(self.ratings.aggregate(models.Avg('score'))['score__avg'], 2)
def get_recommend(self):
return self.ratings.order_by('-score').first()
def __str__(self):
return self.username
| 478 | 150 |
from django.contrib import admin
from statistics_tickets.models import ResultOfTreatment, VisitPurpose, StatisticsTicket, Outcomes, ExcludePurposes, ConditionsCare, Place
class StatisticsTicketAdmin(admin.ModelAdmin):
list_display = ('card', 'date', 'invalid_ticket', 'purpose__title', 'result__title', 'first_time', 'primary_visit', 'dispensary_registration', 'doctor')
exclude = ("card ",)
readonly_fields = ('card',)
def purpose__title(self, obj: StatisticsTicket):
return obj.purpose.title if obj.purpose else ""
def result__title(self, obj: StatisticsTicket):
return obj.result.title if obj.result else ""
admin.site.register(VisitPurpose)
admin.site.register(ResultOfTreatment)
admin.site.register(Outcomes)
admin.site.register(ExcludePurposes)
admin.site.register(ConditionsCare)
admin.site.register(Place)
admin.site.register(StatisticsTicket, StatisticsTicketAdmin)
| 919 | 288 |
"""
@file
@brief Starts an app locally to test it.
"""
from OpenSSL import crypto
def create_self_signed_cert(keyfile="key.pem", certfile="cert.pem",
country='FR', state='Paris', location='Paris',
organization='mathenjeu', cn='mathenjeu',
organizational_unit_name=None,
email=None, size=4096, days=365, algo="sha256",
fLOG=print):
"""
Creates a signed certificate.
:param keyfile: key file
:param certfile: certificate file
:param country: country
:param state: state
:param location: location
:param cn: common name
:param organization: organization
:param organizational_unit_name: organizational unit name (can be empty)
:param email: email (can be empty)
:param size: key size
:param days: days it is valid
:param algo: algorithm
:param fLOG: logging function
See also `How to generate a certificate using pyOpenSSL to make it secure connection?
<https://stackoverflow.com/questions/44055029/how-to-generate-a-certificate-using-pyopenssl-to-make-it-secure-connection>`_,
`How to serve HTTP/2 using Python
<https://medium.com/python-pandemonium/how-to-serve-http-2-using-python-5e5bbd1e7ff1>`_.
.. cmdref::
:title: Creates a signed certificate
:cmd: -m mathenjeu create_self_signed_cert --help
The command line creates a certificate used later by
a service such as :epkg:`hypercorn` or :epkg:`waitress`.
Example::
python -m mathenjeu create_self_signed_cert --keyfile=key.pem --certfile=cert.pem
"""
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, size)
cert = crypto.X509()
cert.get_subject().C = country
cert.get_subject().ST = state
cert.get_subject().L = location
cert.get_subject().O = organization
if organizational_unit_name:
cert.get_subject().OU = organizational_unit_name
cert.get_subject().CN = cn
if email:
cert.get_subject().emailAddress = email
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(5 * days * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha256')
with open(certfile, 'wb') as f:
if fLOG:
fLOG("[create_self_signed_cert] create '{0}'".format(certfile))
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(keyfile, 'wb') as f:
if fLOG:
fLOG("[create_self_signed_cert] create '{0}'".format(keyfile))
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
| 2,706 | 888 |
# from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView
from django.views.generic import DetailView
from django.shortcuts import render
from .models import Ad
# def index(request):
# return HttpResponse("Hello, world. You're at the polls index.")
class HomePage(ListView):
#ListView - type of page with list of models objects.
model = Ad
template_name = 'index.html'
context_object_name = 'all_ads_list' #rename model for more usable using
class AdDetail(DetailView):
model = Ad
template_name = 'detail.html' | 600 | 168 |
import os
import shutil
import argparse
def rename(source_path, recursive):
if recursive and os.path.isdir(source_path):
for dir_path, dir_names, filenames in os.walk(source_path):
for name in filenames + dir_names:
rename(os.path.join(dir_path, name), recursive)
dirname, basename = os.path.split(source_path)
destination_path = os.path.join(dirname, basename.lower())
if not os.path.exists(destination_path):
shutil.move(source_path, destination_path)
print('Moved {0} to {1}'.format(source_path, destination_path))
def main():
description = 'Rename files and directories to lowercase'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('paths', nargs='+', help='Path to files or directories')
parser.add_argument('-r', '--recursive', action='store_true',
help='Recursively search directory')
args = parser.parse_args()
recursive = args.recursive
for path in args.paths:
rename(path, recursive)
if __name__ == '__main__':
main()
| 1,099 | 329 |
#!/usr/bin/env python2
import numpy as np
import rospy
from rospy.numpy_msg import numpy_msg
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
def follow_wall(scan):
####### STUDENT CODE START #######
# use the scan data to appropriately modify 'speed' and 'steering_angle'
speed = 1
steering_angle = 1
####### STUDENT CODE END #######
return (speed, steering_angle)
########################### Ignore Code Below ###########################
class WallFollower:
# import ROS parameters from the "params.yaml" file.
# access these variables in class functions with self:
# i.e. self.CONSTANT
SCAN_TOPIC = rospy.get_param("wall_follower/scan_topic")
DRIVE_TOPIC = rospy.get_param("wall_follower/drive_topic")
SIDE = rospy.get_param("wall_follower/side")
VELOCITY = rospy.get_param("wall_follower/velocity")
DESIRED_DISTANCE = rospy.get_param("wall_follower/desired_distance")
def __init__(self):
# setup laser scan subscriber
self.sub_scan = rospy.Subscriber(self.SCAN_TOPIC,
LaserScan,
callback=self.scan_callback)
# setup drive publisher
self.pub_drive = rospy.Publisher(self.DRIVE_TOPIC,
AckermannDriveStamped,
queue_size=1)
def scan_callback(self, scan_msg):
"""Lidar callback function"""
# get list of range measurements
scan_data = scan_msg.ranges
# call student's code for speed and angle, given scan
drive_command = follow_wall(scan_data)
print(drive_command)
# create, populate and publish drive command
drive_msg = AckermannDriveStamped()
drive_msg.drive.speed = drive_command[0]
drive_msg.drive.steering_angle = drive_command[1]
self.pub_drive.publish(drive_msg)
if __name__ == "__main__":
rospy.init_node('wall_follower')
wall_follower = WallFollower()
rospy.spin() | 2,099 | 668 |
# File: volatility_connector.py
#
# Copyright (c) 2014-2016 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from phantom.vault import Vault
# THIS Connector imports
from volatility_consts import *
import uuid
import os
import glob
import re
import shutil
import sys
import fnmatch
# Volatility imports
# pylint: disable=E0611
import volatility.conf as vol_conf
import volatility.registry as registry
import volatility.commands as vol_commands
import volatility.addrspace as addrspace
# import volatility.plugins.filescan as filescan
import volatility.plugins.vadinfo as vadinfo
import volatility.utils as vol_utils
import volatility.plugins.malware.malfind as malfind
import volatility.protos as protos
# Code to execute inorder to use volatility as a library
# TODO: Move these to initialize()
registry.PluginImporter()
vol_config = vol_conf.ConfObject()
registry.register_global_options(vol_config, vol_commands.Command)
registry.register_global_options(vol_config, addrspace.BaseAddressSpace)
cmds = registry.get_plugin_classes(vol_commands.Command, lower=True)
# the following argv 'work around' is to keep volatility happe
# and _also_ debug the connector as a script via pudb
try:
argv_temp = list(sys.argv)
except:
pass
sys.argv = ['']
vol_config.parse_options()
class VolatilityConnector(BaseConnector):
ACTION_ID_GET_PSINFO = "get_psinfo"
ACTION_ID_EXTRACT_PROCESS = "get_process_image"
ACTION_ID_RUN_EXHAUSTIVE_CMDS = "run_exhaustive_commands"
ACTION_ID_RUN_DRIVERSCAN = "run_driverscan"
ACTION_ID_RUN_MUTANTSCAN = "run_mutantscan"
ACTION_ID_RUN_FILESCAN = "run_filescan"
ACTION_ID_RUN_HIVELIST = "run_hivelist"
ACTION_ID_RUN_MALFIND = "run_malfind"
ACTION_ID_RUN_SHELLBAGS = "run_shellbags"
ACTION_ID_RUN_TIMELINER = "run_timeliner"
ACTION_ID_RUN_CMDSCAN = "run_cmdscan"
ACTION_ID_RUN_PRINTKEY = "run_printkey"
ACTION_ID_RUN_MFTPARSER = "run_mftparser"
ACTION_ID_RUN_SOCKSCAN = "run_sockscan"
ACTION_ID_RUN_IEHISTORY = "run_iehistory"
ACTION_ID_LIST_CONNECTIONS = "list_connections"
def __init__(self):
# Call the BaseConnectors init first
super(VolatilityConnector, self).__init__()
def initialize(self):
return self._get_vol_py_path(self)
def _get_vol_py_path(self, result):
app_dir = os.path.dirname(os.path.abspath(__file__))
matches = []
for root, dirnames, filenames in os.walk("{0}/dependencies".format(app_dir)):
for filename in fnmatch.filter(filenames, 'vol.py'):
matches.append(os.path.join(root, filename))
if (not matches):
return result.set_status(phantom.APP_ERROR, "Unable to find vol.py in app directory")
# The first instance that matches is good
self._vol_py_path = matches[0]
return (phantom.APP_SUCCESS)
def _get_profile(self, vol_config, cmds, action_result):
imageinfo = cmds['imageinfo'](vol_config)
action_result.set_status(phantom.APP_ERROR, VOL_ERR_UNABLE_TO_CHOOSE_A_PROFILE)
try:
for label, type, value in imageinfo.calculate():
# self.debug_print('label', label)
if (re.search('.*Suggested.*Profile.*', label)):
# self.debug_print('value', value)
m = re.search('(.*?),.*', value)
if m:
profile = m.group(1)
# self.debug_print('profile', profile)
return (action_result.set_status(phantom.APP_SUCCESS), profile)
except Exception as e:
action_result.set_status(phantom.APP_ERROR, VOL_ERR_GET_PROFILE, e)
return (action_result.get_status(), None)
def _handle_psinfo(self, vault_id, vol_config, cmds, action_result):
# First execute the dlllist plugin
dlllist = cmds['dlllist'](vol_config)
# the dlllist dictionary where the pid is the key
dll_list = {}
for obj in dlllist.calculate():
pid = "{}".format(obj.UniqueProcessId)
if (obj.Peb):
curr_dict = {}
curr_dict['command_line'] = "{}".format(str(obj.Peb.ProcessParameters.CommandLine or ''))
dll_list[pid] = curr_dict
modules = obj.get_load_modules()
try:
path = next(modules)
except StopIteration:
continue
curr_dict['path'] = str(path.FullDllName)
# Now run the psscan plugin
psscan = cmds['psscan'](vol_config)
num_of_processes = 0
for obj in psscan.calculate():
num_of_processes += 1
pid = "{}".format(obj.UniqueProcessId)
curr_dict = {
"offset": "{}".format(hex(int(obj.obj_offset))),
"name": "{}".format(obj.ImageFileName),
"pid": "{}".format(obj.UniqueProcessId),
"ppid": "{}".format(obj.InheritedFromUniqueProcessId),
"pdb": "{}".format(hex(int(obj.Pcb.DirectoryTableBase))),
"time_created": "{}".format(obj.CreateTime or ''),
"time_exited": "{}".format(obj.ExitTime or ''),
"command_line": "",
"path": ""}
# get info from dll list if present
if (pid in dll_list):
if ('command_line' in dll_list[pid]):
curr_dict['command_line'] = dll_list[pid]['command_line']
if ('path' in dll_list[pid]):
curr_dict['path'] = dll_list[pid]['path']
action_result.add_data(curr_dict)
data_size = action_result.get_data_size()
if (not data_size):
# psscan did not complete successfully, try pslist
self.debug_print("psscan did not yield any results, trying pslist")
pslist = cmds['pslist'](vol_config)
num_of_processes = 0
for obj in pslist.calculate():
num_of_processes += 1
pid = "{}".format(obj.UniqueProcessId)
curr_dict = {
"offset": "{}".format(hex(int(obj.obj_offset))),
"name": "{}".format(obj.ImageFileName),
"pid": "{}".format(obj.UniqueProcessId),
"ppid": "{}".format(obj.InheritedFromUniqueProcessId),
"pdb": "{}".format(hex(int(obj.Pcb.DirectoryTableBase))),
"time_created": "{}".format(obj.CreateTime or ''),
"time_exited": "{}".format(obj.ExitTime or ''),
"command_line": "",
"path": ""}
# get info from dll list if present
if (pid in dll_list):
if ('command_line' in dll_list[pid]):
curr_dict['command_line'] = dll_list[pid]['command_line']
if ('path' in dll_list[pid]):
curr_dict['path'] = dll_list[pid]['path']
action_result.add_data(curr_dict)
action_result.update_summary({VOL_JSON_NUM_PROCESSES: num_of_processes})
return action_result.set_status(phantom.APP_SUCCESS)
def _move_file_to_vault(self, container_id, file_size, type_str, contains, local_file_path, action_result):
self.save_progress(phantom.APP_PROG_ADDING_TO_VAULT)
# lets move the data into the vault
vault_details = action_result.add_data({})
if (not file_size):
file_size = os.path.getsize(local_file_path)
vault_details[phantom.APP_JSON_SIZE] = file_size
vault_details[phantom.APP_JSON_TYPE] = type_str
vault_details[phantom.APP_JSON_CONTAINS] = contains
vault_details[phantom.APP_JSON_ACTION_NAME] = self.get_action_name()
vault_details[phantom.APP_JSON_APP_RUN_ID] = self.get_app_run_id()
file_name = os.path.basename(local_file_path)
vault_ret_dict = Vault.add_attachment(local_file_path, container_id, file_name, vault_details)
if (vault_ret_dict.get('succeeded')):
vault_details[phantom.APP_JSON_VAULT_ID] = vault_ret_dict[phantom.APP_JSON_HASH]
vault_details[phantom.APP_JSON_NAME] = file_name
action_result.set_status(phantom.APP_SUCCESS, VOL_SUCC_FILE_ADD_TO_VAULT,
vault_id=vault_ret_dict[phantom.APP_JSON_HASH])
else:
# print vault_ret_dict['message']
action_result.set_status(phantom.APP_ERROR, phantom.APP_ERR_FILE_ADD_TO_VAULT)
action_result.append_to_message('. ' + vault_ret_dict['message'])
return vault_details
def _handle_process_extraction(self, vault_id, vault_file, profile, param):
# Create and make the temp directory for this vault_file
temp_dir = "/vault/tmp/{}".format(str(uuid.uuid4()))
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if not os.path.exists(temp_dir):
return self.set_status(phantom.APP_ERROR, VOL_ERR_CANNOT_MAKE_TEMP_FOLDER)
# Get the comma separated pid list
pid_comma_separated = phantom.get_req_value(param, phantom.APP_JSON_PID)
# Create an array of pid, get_list_from_string will remove blanks, empty elements and duplicates
pid_list = phantom.get_list_from_string(pid_comma_separated)
# Create the comma separated list again without the spaces, dumpfiles spits an error
# if the pids are anything but comma separated
pid_comma_separated = ','.join(pid_list)
# The volatility command
vol_command = "python2.7 {0} --filename={1} --profile={2} dumpfiles -n ".format(self._vol_py_path, vault_file, profile)
vol_command += " --dump-dir {} -p {}".format(temp_dir, pid_comma_separated)
# self.debug_print('vol_command', vol_command)
# Execute it
try:
sout, serr, cmd_ret_code = phantom.run_ext_command(vol_command)
except Exception as e:
self.debug_print("Failed to execute '{0}'".format(vol_command), e)
action_result = self.add_action_result(ActionResult(dict(param)))
return action_result.set_status(phantom.APP_ERROR, "Failed to execute volatility command")
# We ignore the return values of this command because it silently fails, the only
# way to find out if the pid was extracted is to check for it's presence on disk
# and fail if not found
for pid in pid_list:
# Create a action result to store this pid's status
action_result = self.add_action_result(ActionResult(dict(param)))
# Set the parameter
action_result.update_param({phantom.APP_JSON_VAULT_ID: vault_id, phantom.APP_JSON_PID: pid})
# Update the summary with the profile used
action_result.update_summary({VOL_JSON_PROFILE_USED: profile})
# Create a path to the image file
image_filename = '{}/file.{}.*.exe.img'.format(temp_dir, pid)
# Check if it exists
files_matched = glob.glob(image_filename)
# Only one should match since we are giving a pid
if (len(files_matched) == 1):
out_file_name = files_matched[0]
self.debug_print('File Name', out_file_name)
self._move_file_to_vault(self.get_container_id(), os.path.getsize(out_file_name),
VOL_CONST_EXTRACTED_PROCESS_FILE_TYPE, [VOL_CONST_EXTRACTED_PROCESS_FILE_TYPE, 'hash'],
out_file_name, action_result)
else:
action_result.set_status(phantom.APP_ERROR, VOL_ERR_EXTRACTED_PROCESS,
files_matched=len(files_matched),
should_match='1')
# TODO: Write a util function to delete a non-empty directory.
# os.rmdir or shutil.rmtree will not work
# os.rmdir(temp_dir)
return action_result.get_status()
def _run_vol_cmd_shell(self, vol_plugin_cmd, vault_id, vault_file, profile, action_result, additional_switch=[]):
temp_dir = "/vault/tmp/{}".format(str(uuid.uuid4()))
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if not os.path.exists(temp_dir):
return action_result.set_status(phantom.APP_ERROR, VOL_ERR_CANNOT_MAKE_TEMP_FOLDER)
out_file_name = "{0}/{1}.txt".format(temp_dir,
vol_plugin_cmd.replace(' ', '_'))
vol_command = []
vol_command.append('python2.7')
vol_command.append(self._vol_py_path)
vol_command.append("--filename={}".format(vault_file))
vol_command.append("--profile={}".format(profile))
vol_command.append(vol_plugin_cmd)
vol_command.extend(additional_switch)
self.debug_print('vol_command', vol_command)
try:
sout, serr, cmd_ret_code = phantom.run_ext_command(vol_command)
except Exception as e:
self.debug_print("Failed to execute '{0}'".format(vol_command), e)
return action_result.set_status(phantom.APP_ERROR, "Failed to execute volatility command")
if (cmd_ret_code != 0):
action_result.set_status(phantom.APP_ERROR, VOL_ERR_COMMAND, command=vol_plugin_cmd)
action_result.append_to_message('. ' + serr.strip('\r\n '))
return action_result.get_status()
# write the stdout to the file
with open(out_file_name, "w") as out_fp:
out_fp.write(sout)
# Add the name of the input vault_id file to the output file, it looks better, shows some relationship
vault_file_info = Vault.get_file_info(container_id=self.get_container_id(), vault_id=vault_id)
self.debug_print('vault_file_info: {0}'.format(vault_file_info))
if (len(vault_file_info) > 0):
generate_name = "{0}/{1}-{2}.txt".format(temp_dir,
vault_file_info[0][phantom.APP_JSON_NAME],
vol_plugin_cmd.replace(' ', '_'))
shutil.move(out_file_name, generate_name)
out_file_name = generate_name
type_str = VOL_CONST_FORENSIC_FILE_TYPE.format(vol_plugin_cmd)
self._move_file_to_vault(self.get_container_id(), os.path.getsize(out_file_name),
type_str, [type_str], out_file_name, action_result)
# TODO: Write a util function to delete a non-empty directory.
# os.rmdir or shutil.rmtree will not work
# os.rmdir(temp_dir)
return action_result.get_status()
def _run_mftparser_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('mftparser', vault_id, vault_file, profile, action_result)
def _run_timeliner_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('timeliner', vault_id, vault_file, profile, action_result)
def _run_cmdscan_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('cmdscan', vault_id, vault_file, profile, action_result)
def _run_printkey_cmd(self, vault_id, vault_file, profile, action_result, param):
additional_switch = []
additional_switch.append('-K')
additional_switch.append(str(param[VOL_JSON_KEY]))
if (VOL_JSON_HIVE_ADDRESS in param):
additional_switch.append('-o')
additional_switch.append(str(param[VOL_JSON_HIVE_ADDRESS]))
return self._run_vol_cmd_shell('printkey', vault_id, vault_file, profile, action_result, additional_switch)
def _run_shellbags_cmd(self, vault_id, vault_file, profile, action_result):
return self._run_vol_cmd_shell('shellbags', vault_id, vault_file, profile, action_result)
def _run_iehistory_cmd(self, vol_config, cmds, action_result):
iehistory = cmds['iehistory'](vol_config)
for process, hist_record in iehistory.calculate():
location = "{}".format(hist_record.Url)
# strip all the data before http if present
url_location = location.find("http")
url = location[url_location:] if (url_location != -1) else location
curr_data = {
"offset": "{}".format(hex(int(hist_record.obj_offset))),
"pid": "{}".format(process.UniqueProcessId),
"image_filename": "{}".format(process.ImageFileName),
"cache_type": "{}".format(hist_record.Signature),
"record_length": "{}".format(hist_record.Length),
"location": "{}".format(location),
"url": "{}".format(url),
}
if (hist_record.obj_name == '_URL_RECORD'):
curr_data['last_modified'] = "{}".format(hist_record.LastModified)
curr_data['last_accessed'] = "{}".format(hist_record.LastAccessed)
curr_data['file_offset'] = "{}".format(hist_record.FileOffset)
curr_data['data_offset'] = "{}".format(hist_record.DataOffset)
curr_data['data_length'] = "{}".format(hist_record.DataSize)
if (hist_record.FileOffset > 0):
curr_data['file'] = "{}".format(hist_record.File)
if (hist_record.has_data()):
curr_data['data'] = "{}".format(hist_record.Data)
action_result.add_data(curr_data)
return action_result.set_status(phantom.APP_SUCCESS)
def _list_connections(self, vol_config, cmds, action_result):
if (vol_config.PROFILE.find('WinXP') != -1):
return self._run_connscan_cmd(vol_config, cmds, action_result)
return self._run_netscan_cmd(vol_config, cmds, action_result)
def _run_netscan_cmd(self, vol_config, cmds, action_result):
netscan = cmds['netscan'](vol_config)
addr_space = vol_utils.load_as(netscan._config, astype='physical')
if (not netscan.is_valid_profile(addr_space.profile)):
return action_result.set_status(phantom.APP_ERROR, VOL_ERR_NOT_SUPPORTED_FOR_PROFILE,
vol_command='netscan', profile=vol_config.PROFILE)
for obj, proto, local_addr, local_port, remote_addr, remote_port, state in netscan.calculate():
curr_data = {
"offset": "{}".format(hex(int(obj.obj_offset))),
'proto': "{}".format(proto),
'local_ip': "{}".format(local_addr),
'local_port': "{}".format(local_port),
'remote_ip': "{}".format(remote_addr),
'remote_port': "{}".format(remote_port),
'state': "{}".format(state),
'pid': "{}".format(obj.Owner.UniqueProcessId),
'owner': "{}".format(obj.Owner.ImageFileName),
'create_time': "{}".format(obj.CreateTime or '')
}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_SOCKETS: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_sockscan_cmd(self, vol_config, cmds, action_result):
sockscan = cmds['sockscan'](vol_config)
addr_space = vol_utils.load_as(sockscan._config, astype='physical')
if (not sockscan.is_valid_profile(addr_space.profile)):
return action_result.set_status(phantom.APP_ERROR, VOL_ERR_NOT_SUPPORTED_FOR_PROFILE,
vol_command='sockscan', profile=vol_config.PROFILE)
for obj in sockscan.calculate():
curr_data = {
"offset": "{}".format(hex(int(obj.obj_offset))),
'pid': "{}".format(obj.Pid),
'local_port': "{}".format(obj.LocalPort),
'proto': "{}".format(obj.Protocol),
'protocol': "{}".format(protos.protos.get(obj.Protocol.v(), "-")),
'local_ip': "{}".format(obj.LocalIpAddress),
'create_time': "{}".format(obj.CreateTime)
}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_SOCKETS: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_connscan_cmd(self, vol_config, cmds, action_result):
connscan = cmds['connscan'](vol_config)
addr_space = vol_utils.load_as(connscan._config, astype='physical')
if (not connscan.is_valid_profile(addr_space.profile)):
return action_result.set_status(phantom.APP_ERROR, VOL_ERR_NOT_SUPPORTED_FOR_PROFILE,
vol_command='connscan', profile=vol_config.PROFILE)
for obj in connscan.calculate():
curr_data = {
"offset": "{}".format(hex(int(obj.obj_offset))),
'local_ip': "{}".format(obj.LocalIpAddress),
'local_port': "{}".format(obj.LocalPort),
'remote_ip': "{}".format(obj.RemoteIpAddress),
'remote_port': "{}".format(obj.RemotePort),
'pid': "{}".format(obj.Pid)
}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_CONNECTIONS: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_malfind_cmd(self, vol_config, cmds, action_result):
mal = cmds['malfind'](vol_config)
for task in mal.calculate():
for vad, address_space in task.get_vads(vad_filter=task._injection_filter):
if (mal._is_vad_empty(vad, address_space)):
continue
content = address_space.zread(vad.Start, 64)
curr_data = {
'process': "{}".format(task.ImageFileName),
'pid': "{}".format(task.UniqueProcessId),
'address': "{}".format(hex(int(vad.Start))),
'vad_tag': "{}".format(vad.Tag),
'protection': "{}".format(vadinfo.PROTECT_FLAGS.get(vad.u.VadFlags.Protection.v(), "")),
'flags': "{}".format(str(vad.u.VadFlags))
}
curr_data['buffer'] = "\r"
for o, h, c in vol_utils.Hexdump(content):
curr_data['buffer'] += "{0:#010x} {1:<48} {2}".format(vad.Start + o, h, ''.join(c))
curr_data['buffer'] += "\r\n"
curr_data['disassembly'] = "\r"
for o, i, h in malfind.Disassemble(content, vad.Start):
curr_data['disassembly'] += "{0:#x} {1:<16} {2}".format(o, i, h)
curr_data['disassembly'] += "\r\n"
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_POSSIBLE_MAL_INSTANCES_FOUND: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_hivelist_cmd(self, vol_config, cmds, action_result):
command = cmds['hivelist'](vol_config)
# store the offsets here, need to keep track of them to ignore them properly in the loop below
hive_offsets = []
for hive in command.calculate():
if hive.Hive.Signature != 0xbee0bee0:
continue
if hive.obj_offset in hive_offsets:
continue
try:
name = str(hive.FileFullPath or '') or str(hive.FileUserName or '') or str(hive.HiveRootPath or '') or '[no name]'
except AttributeError:
name = '[no name]'
curr_data = {
'virtual': "{}".format(hex(int(hive.obj_offset))),
'physical': "{}".format(hex(int(hive.obj_vm.vtop(hive.obj_offset)))),
'name': "{}".format(name)}
hive_offsets.append(hive.obj_offset)
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_HIVES: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_filescan_cmd(self, vol_config, cmds, action_result):
command = cmds['filescan'](vol_config)
for file in command.calculate():
header = file.get_object_header()
curr_data = {
'offset': "{}".format(hex(int(file.obj_offset))),
'ptr': "{}".format(header.PointerCount),
'hnd': "{}".format(header.HandleCount),
'access': str(file.access_string()),
'name': str(file.file_name_with_device() or '')}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_FILES: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_mutantscan_cmd(self, vol_config, cmds, action_result):
command = cmds['mutantscan'](vol_config)
for mutant in command.calculate():
obj = mutant.get_object_header()
if (mutant.OwnerThread > 0x80000000):
thread = mutant.OwnerThread.dereference_as('_ETHREAD')
cid = "{0}:{1}".format(thread.Cid.UniqueProcess, thread.Cid.UniqueThread)
pid = "{0}".format(thread.Cid.UniqueProcess)
else:
cid = ""
pid = ""
curr_data = {
'offset': "{}".format(mutant.obj_offset),
'ptr': "{}".format(obj.PointerCount),
'hnd': "{}".format(obj.HandleCount),
'signal': "{}".format(mutant.Header.SignalState),
'thread': "{}".format(mutant.OwnerThread),
'cid': cid,
'pid': pid,
'name': str(obj.NameInfo.Name or '')}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_MUTEXES: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def _run_driverscan_cmd(self, vol_config, cmds, action_result):
# driverscan command
command = cmds['driverscan'](vol_config)
# for obj, drv_obj, ext_obj in command.calculate():
for drv_obj in command.calculate():
ext_obj = drv_obj.DriverExtension
obj = drv_obj.get_object_header()
curr_data = {
'offset': "{}".format(hex(int(drv_obj.obj_offset))),
'pointer_count': "{}".format(obj.PointerCount),
'handle_count': "{}".format(obj.HandleCount),
'start': "{}".format(hex(int(drv_obj.DriverStart))),
'size': "{}".format(drv_obj.DriverSize),
'service_key': str(ext_obj.ServiceKeyName or ''),
'name': str(obj.NameInfo.Name or ''),
'driver_name': str(drv_obj.DriverName or '')}
action_result.add_data(curr_data)
action_result.update_summary({VOL_JSON_TOTAL_DRIVERS: action_result.get_data_size()})
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
# Get params
vault_id = param[phantom.APP_JSON_VAULT_ID]
# Create a action_result to hold the status for the profile creation
action_result = self.add_action_result(ActionResult(dict(param)))
try:
vault_file = Vault.get_file_path(vault_id)
except Exception as e:
status = action_result.set_status(phantom.APP_ERROR, "Error accessing vault file", e)
return action_result.get_status()
# Create the vol config, commands require it
vol_config.LOCATION = "{}{}".format(VOL_CONST_FILE_URL_PROTO, vault_file)
# self.debug_print('vol_config.LOCATION', vol_config.LOCATION)
profile = param.get(VOL_JSON_PROFILE)
if (not profile):
self.save_progress("Trying to detect the volatility profile of the input file")
status, profile = self._get_profile(vol_config, cmds, action_result)
if (phantom.is_fail(status)):
# failure, will need to return from here
return action_result.get_status()
# We have a profile, first set the status to failure, else it will show up as success if an exception occurs
action_result.set_status(phantom.APP_ERROR)
# Set it in the vol config
vol_config.PROFILE = profile
# Add this info to the summary
action_result.update_summary({VOL_JSON_PROFILE_USED: profile})
# Send the progress
self.save_progress(VOL_PROG_USING_PROFILE, prof_name=profile)
# Get the action
action = self.get_action_identifier()
status = phantom.APP_ERROR
if (action == self.ACTION_ID_GET_PSINFO):
try:
status = self._handle_psinfo(vault_id, vol_config, cmds, action_result)
except Exception as e:
status = action_result.set_status(phantom.APP_ERROR, "", e)
elif (action == self.ACTION_ID_EXTRACT_PROCESS):
# Process extraction is a bit different, it supports multiple processes
# and therefore possible to add more than one action results,
# Therefore it's neccessary to remove the action_result that was just added
# Also it runs volatility as a seperate process using popen, so it takes
# the config as params
self.remove_action_result(action_result)
status = self._handle_process_extraction(vault_id, vault_file, profile, param)
elif (action == self.ACTION_ID_RUN_DRIVERSCAN):
status = self._run_driverscan_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_MUTANTSCAN):
status = self._run_mutantscan_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_FILESCAN):
status = self._run_filescan_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_HIVELIST):
status = self._run_hivelist_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_MALFIND):
status = self._run_malfind_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_SOCKSCAN):
status = self._run_sockscan_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_LIST_CONNECTIONS):
status = self._list_connections(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_IEHISTORY):
status = self._run_iehistory_cmd(vol_config, cmds, action_result)
elif (action == self.ACTION_ID_RUN_SHELLBAGS):
status = self._run_shellbags_cmd(vault_id, vault_file, profile, action_result)
elif (action == self.ACTION_ID_RUN_TIMELINER):
status = self._run_timeliner_cmd(vault_id, vault_file, profile, action_result)
elif (action == self.ACTION_ID_RUN_CMDSCAN):
status = self._run_cmdscan_cmd(vault_id, vault_file, profile, action_result)
elif (action == self.ACTION_ID_RUN_PRINTKEY):
status = self._run_printkey_cmd(vault_id, vault_file, profile, action_result, param)
elif (action == self.ACTION_ID_RUN_MFTPARSER):
status = self._run_mftparser_cmd(vault_id, vault_file, profile, action_result)
else:
self.remove_action_result(action_result)
return self.unknown_action()
return status
if __name__ == '__main__':
import sys
import pudb
import json
pudb.set_trace()
with open(argv_temp[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = VolatilityConnector()
connector.print_progress_message = True
result = connector._handle_action(json.dumps(in_json), None)
print result
exit(0)
| 32,556 | 10,277 |
# Moved Django integration to a seperate module to serve as Django's app name.
| 80 | 21 |
from django.apps import AppConfig
class RentingConfig(AppConfig):
name = 'renting'
| 89 | 29 |
import collections
class Token(collections.namedtuple('Token', 'type string start end line index startpos endpos')):
...
| 127 | 33 |
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
import argparse
import json
import time
import traceback
import v2.lib.resource_op as s3lib
import v2.utils.log as log
import v2.utils.utils as utils
import yaml
from v2.lib.exceptions import TestExecError
from v2.lib.read_io_info import ReadIOInfo
from v2.lib.resource_op import Config
from v2.lib.rgw_config_opts import CephConfOp, ConfigOpts
from v2.lib.s3.auth import Auth
from v2.lib.s3.write_io_info import BasicIOInfoStructure, IOInfoInitialize
from v2.tests.multisite import resuables
from v2.utils.test_desc import AddTestInfo
from v2.utils.utils import HttpResponseParser, RGWService
TEST_DATA_PATH = None
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
# create buckets
bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
bucket_versioning = s3lib.resource_op(
{"obj": rgw_conn, "resource": "BucketVersioning", "args": [bucket.name]}
)
# checking the versioning status
version_status = s3lib.resource_op(
{"obj": bucket_versioning, "resource": "status", "args": None}
)
if version_status is None:
log.info("bucket versioning still not enabled")
# enabling bucket versioning
version_enable_status = s3lib.resource_op(
{"obj": bucket_versioning, "resource": "enable", "args": None}
)
response = HttpResponseParser(version_enable_status)
if response.status_code == 200:
log.info("version enabled")
else:
raise TestExecError("version enable failed")
return bucket
def upload_objects(user_info, bucket, config):
log.info("s3 objects to create: %s" % config.objects_count)
for oc in range(config.objects_count):
s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
resuables.upload_object(
s3_object_name, bucket, TEST_DATA_PATH, config, user_info
)
def test_exec(config):
test_info = AddTestInfo("RGW Dynamic Resharding test")
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
io_info_initialize.initialize(basic_io_structure.initial())
ceph_conf = CephConfOp()
rgw_service = RGWService()
try:
test_info.started_info()
log.info("starting IO")
config.max_objects_per_shard = 10
config.no_of_shards = 10
config.user_count = 1
user_info = s3lib.create_users(config.user_count)
user_info = user_info[0]
auth = Auth(user_info)
rgw_conn = auth.do_auth()
config.bucket_count = 1
log.info("no of buckets to create: %s" % config.bucket_count)
bucket_name = utils.gen_bucket_name_from_userid(user_info["user_id"], rand_no=1)
bucket = create_bucket_with_versioning(rgw_conn, user_info, bucket_name)
upload_objects(user_info, bucket, config)
log.info("sharding configuration will be added now.")
if config.sharding_type == "online":
log.info("sharding type is online")
# for online,
# the number of shards should be greater than [ (no of objects)/(max objects per shard) ]
# example: objects = 500 ; max object per shard = 10
# then no of shards should be at least 50 or more
time.sleep(15)
log.info("making changes to ceph.conf")
ceph_conf.set_to_ceph_conf(
"global",
ConfigOpts.rgw_max_objs_per_shard,
config.max_objects_per_shard,
)
ceph_conf.set_to_ceph_conf(
"global", ConfigOpts.rgw_dynamic_resharding, True
)
num_shards_expected = config.objects_count / config.max_objects_per_shard
log.info("num_shards_expected: %s" % num_shards_expected)
log.info("trying to restart services ")
srv_restarted = rgw_service.restart()
time.sleep(30)
if srv_restarted is False:
raise TestExecError("RGW service restart failed")
else:
log.info("RGW service restarted")
if config.sharding_type == "offline":
log.info("sharding type is offline")
# for offline.
# the number of shards will be the value set in the command.
time.sleep(15)
log.info("in offline sharding")
cmd_exec = utils.exec_shell_cmd(
"radosgw-admin bucket reshard --bucket=%s --num-shards=%s"
% (bucket.name, config.no_of_shards)
)
if cmd_exec is False:
raise TestExecError("offline resharding command execution failed")
# upload_objects(user_info, bucket, config)
log.info("s3 objects to create: %s" % config.objects_count)
for oc in range(config.objects_count):
s3_object_name = utils.gen_s3_object_name(
bucket.name, config.objects_count + oc
)
resuables.upload_object(
s3_object_name, bucket, TEST_DATA_PATH, config, user_info
)
time.sleep(300)
log.info("verification starts")
op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name)
json_doc = json.loads(op)
bucket_id = json_doc["data"]["bucket"]["bucket_id"]
op2 = utils.exec_shell_cmd(
"radosgw-admin metadata get bucket.instance:%s:%s"
% (bucket.name, bucket_id)
)
json_doc2 = json.loads((op2))
num_shards_created = json_doc2["data"]["bucket_info"]["num_shards"]
log.info("no_of_shards_created: %s" % num_shards_created)
log.info("no_of_shards_expected: %s" % num_shards_expected)
if config.sharding_type == "offline":
if num_shards_expected != num_shards_created:
raise TestExecError("expected number of shards not created")
log.info("Expected number of shards created")
if config.sharding_type == "online":
log.info(
"for online, "
"number of shards created should be greater than or equal to number of expected shards"
)
if int(num_shards_created) >= int(num_shards_expected):
log.info("Expected number of shards created")
else:
raise TestExecError("Expected number of shards not created")
read_io = ReadIOInfo()
read_io.yaml_fname = "io_info.yaml"
read_io.verify_io()
test_info.success_status("test passed")
sys.exit(0)
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status("test failed")
sys.exit(1)
except TestExecError as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status("test failed")
sys.exit(1)
if __name__ == "__main__":
project_dir = os.path.abspath(os.path.join(__file__, "../../.."))
test_data_dir = "test_data"
TEST_DATA_PATH = os.path.join(project_dir, test_data_dir)
log.info("TEST_DATA_PATH: %s" % TEST_DATA_PATH)
if not os.path.exists(TEST_DATA_PATH):
log.info("test data dir not exists, creating.. ")
os.makedirs(TEST_DATA_PATH)
parser = argparse.ArgumentParser(description="RGW S3 Automation")
parser.add_argument("-c", dest="config", help="RGW Test yaml configuration")
args = parser.parse_args()
yaml_file = args.config
config = Config()
with open(yaml_file, "r") as f:
doc = yaml.load(f)
config.objects_count = doc["config"]["objects_count"]
config.objects_size_range = {
"min": doc["config"]["objects_size_range"]["min"],
"max": doc["config"]["objects_size_range"]["max"],
}
config.sharding_type = doc["config"]["sharding_type"]
log.info(
"objects_count: %s\n"
"objects_size_range: %s\n"
"sharding_type: %s\n"
% (config.objects_count, config.objects_size_range, config.sharding_type)
)
test_exec(config)
| 8,117 | 2,561 |
import os
import sys
import json
from json.decoder import JSONDecodeError
# Program which manages a simple database (a Python dictionary stored in a file)
class Database():
""" Clase que modela la BD sobre la que trabaja el programa. Incluye los métodos necesarios para cargarla y actualizarla en el disco"""
def __init__(self,nombre_fichero):
"""Inicializa el objeto BD y carga en memoria los datos"""
self.nombre_fichero = nombre_fichero
self.diccionario = {}
self.__cargar_archivo()
def __comprobar_archivo(self):
"""Comprueba si existe el archivo. En caso afirmativo, devuelve True. En caso negativo, lo crea y devuelve False."""
if(not(os.path.isfile(nombre_archivo))):
with open(nombre_archivo,'w') as archivo:
archivo.close
return False
else:
return True # sí que existe el archivo
def __cargar_archivo(self):
"""Carga la BD en la memoria RAM"""
if(self.__comprobar_archivo()):
try:
archivo = open(nombre_archivo,'r')
self.diccionario.update(json.load(archivo))
archivo.close
except JSONDecodeError:
print('Error reading the JSON file: wrong format')
sys.exit(1) # termina la ejecución del programa con error
except Exception:
print('Error reading the database')
sys.exit(1) # termina la ejecución del programa con error
"""Métodos públicos de la clase: CRUD"""
def actualizar_archivo(self):
"""Actualiza el archivo de texto en el que se almacena la BD"""
archivo = open(nombre_archivo,'w')
archivo.write(json.dumps(self.diccionario))
archivo.close
def crear_entrada(self,clave, valor):
"""Añade una entrada al diccionario con la clave y valor especificados"""
if(clave in self.diccionario):
print("There is already an entry with the key " + clave)
else:
# No hay entradas con esa clave
self.diccionario[clave] = valor
print("Entry successfully created")
self.actualizar_archivo()
def ver_entradas(self):
"""Muestra en pantalla las entradas del diccionario"""
print("Number of entries: " + str(len(self.diccionario)) + '\n')
for clave,valor in self.diccionario.items():
print(('\t %s --> %s') %(clave,valor))
def eliminar_entrada(self,clave):
"""Elimina del diccionario la entrada con la clave especificada"""
if(clave in self.diccionario):
del self.diccionario[clave]
print(('Entry with key "%s" successfully deleted' %(clave,)))
self.actualizar_archivo()
else:
# No existe ninguna entrada con esa clave
print("No entry in the database with key " + clave)
def modificar_entrada(self,clave):
"""Modifica una entrada ya creada en el diccionario"""
if(clave in self.diccionario):
nuevo_valor = input("Insert a new value for " + clave + ": ")
self.diccionario[clave] = nuevo_valor
print('Entry updated')
self.actualizar_archivo()
else:
# No existe ninguna entrada con esa clave
print("No entry in the database with key " + clave)
# Carga la base de datos desde el fichero de texto
nombre_archivo = 'file.json' # nombre por defecto
if(len(sys.argv)==2):
nombre_archivo = sys.argv[1] # nombre de fichero especificado en los argumentos del programa
base_datos = Database(nombre_archivo)
def mostrar_menu():
"""Muestra un menú para que el usuario pueda interactuar con la aplicación"""
print("Database: " + nombre_archivo)
while(True):
print("\nChoose an option: ")
print("1) Read entries in the DB")
print("2) Create a new entry in the DB")
print("3) Delete entry from the DB")
print("4) Modify entry from the DB")
print("0) Exit")
seleccion = input("Option: ")
if(not(seleccion.isdigit())):
print("Wrong option. Try again.")
else:
# Comprueba la opción elegida
if(int(seleccion)==1):
# Ver entradas en la BD
print('\n')
base_datos.ver_entradas()
elif(int(seleccion)==2):
# Crear nueva entrada en la BD
clave = input('\nType the key: ')
valor = input('Type the value: ')
base_datos.crear_entrada(clave, valor)
elif(int(seleccion)==3):
# Eliminar entrada de la BD
print('\n')
clave = input("Key of the entry you want to delete: ")
base_datos.eliminar_entrada(clave)
elif(int(seleccion)==4):
# Modificar entrada de la BD
print('\n')
clave = input("Key of the entry you want to modify: ")
base_datos.modificar_entrada(clave)
elif(int(seleccion)==0):
# Salir del programa
sys.exit(0)
else:
# Selección no válida
print("Wrong selection. Try again.")
# Muestra el menú de usuario
mostrar_menu()
| 5,410 | 1,573 |
#!/usr/bin/env python
""" generated source for module GdlCleaner """
# package: org.ggp.base.util.gdl.transforms
import java.util.ArrayList
import java.util.List
import org.ggp.base.util.gdl.grammar.Gdl
import org.ggp.base.util.gdl.grammar.GdlConstant
import org.ggp.base.util.gdl.grammar.GdlDistinct
import org.ggp.base.util.gdl.grammar.GdlFunction
import org.ggp.base.util.gdl.grammar.GdlLiteral
import org.ggp.base.util.gdl.grammar.GdlNot
import org.ggp.base.util.gdl.grammar.GdlOr
import org.ggp.base.util.gdl.grammar.GdlPool
import org.ggp.base.util.gdl.grammar.GdlProposition
import org.ggp.base.util.gdl.grammar.GdlRelation
import org.ggp.base.util.gdl.grammar.GdlRule
import org.ggp.base.util.gdl.grammar.GdlSentence
import org.ggp.base.util.gdl.grammar.GdlTerm
import org.ggp.base.util.gdl.grammar.GdlVariable
# Cleans up various issues with games to make them more standardized.
class GdlCleaner(object):
""" generated source for class GdlCleaner """
MAX_ITERATIONS = 100
BASE = GdlPool.getConstant("base")
@classmethod
def run(cls, description):
""" generated source for method run """
i = 0
while i < cls.MAX_ITERATIONS:
if newDescription == description:
break
description = newDescription
i += 1
return description
@classmethod
def runOnce(cls, description):
""" generated source for method runOnce """
newDescription = ArrayList()
# First: Clean up all rules with zero-element bodies
for gdl in description:
if isinstance(gdl, (GdlRule, )):
if rule.getBody().size() == 0:
newDescription.add(rule.getHead())
else:
newDescription.add(gdl)
else:
newDescription.add(gdl)
# TODO: Add (role ?player) where appropriate, i.e. in rules for
# "legal" or "input" where the first argument is an undefined
# variable
# Get rid of "extra parentheses", i.e. zero-arity functions
description = newDescription
newDescription = ArrayList()
for gdl in description:
if isinstance(gdl, (GdlRelation, )):
newDescription.add(cleanParentheses(gdl))
elif isinstance(gdl, (GdlRule, )):
newDescription.add(cleanParentheses(gdl))
else:
newDescription.add(gdl)
# TODO: Get rid of GdlPropositions in the description
# Get rid of (not (distinct _ _)) literals in rules
# TODO: Expand to functions
description = newDescription
newDescription = ArrayList()
for gdl in description:
if isinstance(gdl, (GdlRule, )):
if cleaned != None:
newDescription.add(cleaned)
else:
newDescription.add(gdl)
# Get rid of the old style of "base" sentences (with arity more than 1, not in rules)
# See e.g. current version of Qyshinsu on the Dresden server
description = newDescription
newDescription = ArrayList()
removeBaseSentences = False
for gdl in description:
if isinstance(gdl, (GdlRelation, )):
if relation.__name__ == cls.BASE and relation.arity() != 1:
removeBaseSentences = True
break
# Note that in this case, we have to remove ALL of them or we might
# misinterpret this as being the new kind of "base" relation
for gdl in description:
if isinstance(gdl, (GdlRelation, )):
if removeBaseSentences and relation.__name__ == cls.BASE:
# Leave out the relation
else:
newDescription.add(gdl)
else:
newDescription.add(gdl)
return newDescription
@classmethod
def removeNotDistinctLiterals(cls, rule):
""" generated source for method removeNotDistinctLiterals """
while rule != None and getNotDistinctLiteral(rule) != None:
rule = removeNotDistinctLiteral(rule, getNotDistinctLiteral(rule))
return rule
@classmethod
def getNotDistinctLiteral(cls, rule):
""" generated source for method getNotDistinctLiteral """
for literal in rule.getBody():
if isinstance(literal, (GdlNot, )):
if isinstance(, (GdlDistinct, )):
# For now, we can only deal with this if not both are functions.
# That means we have to skip that case at this point.
if not (isinstance(, (GdlFunction, ))) or not (isinstance(, (GdlFunction, ))):
return not_
return None
# Returns null if the rule is useless.
@classmethod
def removeNotDistinctLiteral(cls, rule, notDistinctLiteral):
""" generated source for method removeNotDistinctLiteral """
# Figure out the substitution we want...
# If we have two constants: Either remove one or
# maybe get rid of the ___?
# One is a variable: Replace the variable with the other thing
# throughout the rule
distinct = notDistinctLiteral.getBody()
arg1 = distinct.getArg1()
arg2 = distinct.getArg2()
if arg1 == arg2:
# Just remove that literal
newBody.addAll(rule.getBody())
newBody.remove(notDistinctLiteral)
return GdlPool.getRule(rule.getHead(), newBody)
if isinstance(arg1, (GdlVariable, )):
# What we return will still have the not-distinct literal,
# but it will get replaced in the next pass.
# (Even if we have two variables, they will be equal next time through.)
return CommonTransforms.replaceVariable(rule, arg1, arg2)
if isinstance(arg2, (GdlVariable, )):
return CommonTransforms.replaceVariable(rule, arg2, arg1)
if isinstance(arg1, (GdlConstant, )) or isinstance(arg2, (GdlConstant, )):
# We have two non-equal constants, or a constant and a function.
# The rule should have no effect.
return None
# We have two functions. Complicated! (Have to replace them with unified version.)
# We pass on this case for now.
# TODO: Implement correctly.
raise UnsupportedOperationException("We can't currently handle (not (distinct <function> <function>)).")
@classmethod
@overloaded
def cleanParentheses(cls, rule):
""" generated source for method cleanParentheses """
cleanedHead = cls.cleanParentheses(rule.getHead())
cleanedBody = ArrayList()
for literal in rule.getBody():
cleanedBody.add(cls.cleanParentheses(literal))
return GdlPool.getRule(cleanedHead, cleanedBody)
@classmethod
@cleanParentheses.register(object, GdlLiteral)
def cleanParentheses_0(cls, literal):
""" generated source for method cleanParentheses_0 """
if isinstance(literal, (GdlSentence, )):
return cls.cleanParentheses(literal)
elif isinstance(literal, (GdlDistinct, )):
return GdlPool.getDistinct(term1, term2)
elif isinstance(literal, (GdlNot, )):
return GdlPool.getNot(cls.cleanParentheses(body))
elif isinstance(literal, (GdlOr, )):
while i < or_.arity():
pass
i += 1
return GdlPool.getOr(disjuncts)
raise RuntimeException("Unexpected literal type in GdlCleaner")
@classmethod
@cleanParentheses.register(object, GdlSentence)
def cleanParentheses_1(cls, sentence):
""" generated source for method cleanParentheses_1 """
if isinstance(sentence, (GdlProposition, )):
return sentence
cleanedBody = ArrayList()
for term in sentence.getBody():
cleanedBody.add(cls.cleanParentheses(term))
if len(cleanedBody) == 0:
return GdlPool.getProposition(sentence.__name__)
else:
return GdlPool.getRelation(sentence.__name__, cleanedBody)
@classmethod
@cleanParentheses.register(object, GdlTerm)
def cleanParentheses_2(cls, term):
""" generated source for method cleanParentheses_2 """
if isinstance(term, (GdlConstant, )) or isinstance(term, (GdlVariable, )):
return term
if isinstance(term, (GdlFunction, )):
# The whole point of the function
if function_.arity() == 0:
return function_.__name__
for functionTerm in function_.getBody():
cleanedBody.add(cls.cleanParentheses(functionTerm))
return GdlPool.getFunction(function_.__name__, cleanedBody)
raise RuntimeException("Unexpected term type in GdlCleaner")
| 8,929 | 2,536 |
"""
Convert dataset to TFRecord for TF object detection training.
Example usage:
python3 create_tf_record.py \
--root_dir ./ \
--image_dir images \
--annotation_dir annotations \
--output_dir tf-record \
--dataset_name radar-ml
Only datset_name is required.
Based on:
https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_pascal_tf_record.py
Copyright (c) 2019~2020 Lindo St. Angel
"""
import hashlib
import io
import logging
import os
import random
import re
import contextlib2
import numpy as np
import PIL.Image
import tensorflow as tf
import argparse
from lxml import etree
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
logger = logging.getLogger(__name__)
NUM_TFRECORD_SHARDS = 1
TRAIN_VAL_SPLIT = 0.8
TFRECORD_TRAIN_NAME = 'train'
TFRECORD_VAL_NAME = 'val'
ALT_NAME_MAP = {
'lindo': 'person',
'nikki': 'person',
'eva': 'person',
'nico': 'person',
'unknown': 'person',
'polly': 'dog',
'rebel': 'cat',
'jack': 'cat'
}
def dict_to_tf_example(data,
label_map_dict,
image_subdirectory,
use_alt_names=False,
ignore_difficult_instances=False):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
label_map_dict: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
Pascal dataset directory holding the actual image data.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
use_alt_names: Use class names that may be different than labels in images.
A translation map must be provided (default: False).
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(image_subdirectory, data['filename'])
with tf.io.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format must be JPEG.')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmins = []
ymins = []
xmaxs = []
ymaxs = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin = float(obj['bndbox']['xmin'])
xmax = float(obj['bndbox']['xmax'])
ymin = float(obj['bndbox']['ymin'])
ymax = float(obj['bndbox']['ymax'])
xmins.append(xmin / width)
ymins.append(ymin / height)
xmaxs.append(xmax / width)
ymaxs.append(ymax / height)
#class_name = get_class_name_from_filename(data['filename'])
if use_alt_names:
class_name = ALT_NAME_MAP.get(obj['name'], obj['name'])
else:
class_name = obj['name']
print(class_name, label_map_dict[class_name])
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
feature_dict = {
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
def create_tf_record(output_filename,
num_shards,
label_map_dict,
annotations_dir,
image_dir,
examples,
use_alt_names):
"""Creates a TFRecord file from examples.
Args:
output_filename: Path to where output file is saved.
num_shards: Number of shards for output file.
label_map_dict: The label map dictionary.
annotations_dir: Directory where annotation files are stored.
image_dir: Directory where image files are stored.
examples: Examples to parse and save to tf record.
use_alt_names: use alternative class name mapping.
"""
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, output_filename, num_shards)
for idx, example in enumerate(examples):
if idx % 10 == 0:
logger.info('On image %d of %d', idx, len(examples))
xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
if not os.path.exists(xml_path):
logger.warning('Could not find %s, ignoring example.', xml_path)
continue
with tf.io.gfile.GFile(xml_path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
try:
tf_example = dict_to_tf_example(
data=data,
label_map_dict=label_map_dict,
image_subdirectory=image_dir,
use_alt_names=use_alt_names)
if tf_example:
shard_idx = idx % num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
except ValueError:
logger.warning('Invalid example: %s, ignoring.', xml_path)
def gen_trainval_list(images_path):
"""Creates a list of image names without file extensions.
The list items will not match the ordering of the images on disk.
Args:
images_path: Path to where images are located.
"""
def make(file):
if file.endswith('.jpg' or '.jpeg'):
return os.path.basename(file).split('.')[0]
return [make(file) for file in os.listdir(images_path)]
def main(args):
logger.info('Reading dataset info.')
image_dir = os.path.join(args.root_dir, args.image_dir,
args.dataset_name)
logger.info(f'Image directory: {image_dir}')
annotations_dir = os.path.join(args.root_dir, args.annotation_dir,
args.dataset_name)
logger.info(f'Annotation directory: {annotations_dir}')
label_map = os.path.join(args.root_dir, args.annotation_dir,
args.dataset_name, args.label_map_name)
logger.info(f'Label map: {label_map}')
use_alt_names = args.use_alt_names
logger.info(f'use alt names: {use_alt_names}')
# Split data into training and validation sets.
random.seed(42)
examples_list = gen_trainval_list(image_dir)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(TRAIN_VAL_SPLIT * num_examples)
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logger.info('Found %d training and %d validation examples.',
len(train_examples), len(val_examples))
train_output_path = os.path.join(args.root_dir, args.output_dir,
args.dataset_name, TFRECORD_TRAIN_NAME)
val_output_path = os.path.join(args.root_dir, args.output_dir,
args.dataset_name, TFRECORD_VAL_NAME)
label_map_dict = label_map_util.get_label_map_dict(label_map)
# Create training TFRecord.
logger.info('Creating training TFRecord.')
create_tf_record(
train_output_path,
NUM_TFRECORD_SHARDS,
label_map_dict,
annotations_dir,
image_dir,
train_examples,
use_alt_names)
logger.info(f'Created training TFRecord: {train_output_path}')
# Create validation TFRecord.
logger.info('Creating validation TFRecord.')
create_tf_record(
val_output_path,
NUM_TFRECORD_SHARDS,
label_map_dict,
annotations_dir,
image_dir,
val_examples,
use_alt_names)
logger.info(f'Created validation TFRecord: {val_output_path}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str,
help='Root directory.',
default='./')
parser.add_argument('--output_dir', type=str,
help='TFRecord directory.',
default='tf-record')
parser.add_argument('--annotation_dir', type=str,
help='Annotation directory.',
default='annotations')
parser.add_argument('--label_map_name', type=str,
help='Label map name.',
default='label_map.pbtxt')
parser.add_argument('--image_dir', type=str,
help='Image directory.',
default='images')
parser.add_argument('--dataset_name', type=str,
help='Name of dataset',
required=True)
parser.add_argument('--use_alt_names', action='store_true',
help='Use alternative class names. Must match label_map_name.pbtxt')
parser.set_defaults(use_alt_names=False)
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
level=logging.DEBUG)
main(args) | 11,205 | 3,400 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import startup2
import page_sets
from telemetry import benchmark
# Disable accessing protected member for startup2._StartupPerfBenchmark. It
# needs to be protected to not be listed in the list of benchmarks to run, even
# though its purpose is only to factorise common code between startup
# benchmarks.
# pylint: disable=protected-access
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlColdTBM(startup2._StartupPerfBenchmark):
"""Measures time to start Chrome cold with startup URLs."""
page_set = page_sets.StartupPagesPageSet
options = {'pageset_repeat': 5}
def SetExtraBrowserOptions(self, options):
options.clear_sytem_cache_for_browser_and_profile_on_start = True
super(StartWithUrlColdTBM, self).SetExtraBrowserOptions(options)
@classmethod
def Name(cls):
return 'start_with_url.cold.startup_pages'
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlWarmTBM(startup2._StartupPerfBenchmark):
"""Measures stimetime to start Chrome warm with startup URLs."""
page_set = page_sets.StartupPagesPageSet
options = {'pageset_repeat': 11}
@classmethod
def Name(cls):
return 'start_with_url.warm.startup_pages'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del value # unused
# Ignores first results because the first invocation is actualy cold since
# we are loading the profile for the first time.
return not is_first_result
| 1,774 | 552 |
import logging
import discord
import datetime
import json
from operator import itemgetter
from discord.ext import commands
from os import linesep
from .base_cog import BaseCog
from conf import config
log = logging.getLogger(__name__)
class Core(BaseCog):
"""A minimal cog for testing."""
def __init__(self, bot):
BaseCog.__init__(self, bot)
self.bot = bot
with open(config.cogs_data_path + '/user_shortcuts.json', 'r') as shortcuts_file:
self.shortcuts = json.load(shortcuts_file)
@commands.command()
async def info(self, context):
"""General information on the bot instance."""
BaseCog.check_main_server(self, context)
BaseCog.check_bot_channel(self, context)
BaseCog.check_forbidden_characters(self, context)
await self.bot.post_message(self.bot.bot_channel, '```' + self.bot.info_text + '```')
@commands.command(pass_context=True)
async def time(self, context):
"""Displays current local time and date for the bot."""
BaseCog.check_forbidden_characters(self, context)
await self.bot.post_message(context.message.channel, 'Current time is ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + ' (' + config.timezone + ').')
@commands.command()
async def shortcuts(self, context):
"""Displays registered shortcuts for user nicknames."""
BaseCog.check_main_server(self, context)
BaseCog.check_bot_channel(self, context)
BaseCog.check_forbidden_characters(self, context)
indent = max(len(shortcut) for shortcut, name in self.shortcuts.items())
sorted_shortcuts = sorted(self.shortcuts.items(), key=itemgetter(0), reverse=False)
result = '```Shortcut Nickname' + linesep + linesep
for shortcut, name in sorted_shortcuts:
result += shortcut.ljust(indent) + ' ' + name + linesep
result += '```'
await self.bot.post_message(self.bot.bot_channel, result)
@commands.command()
async def addshortcut(self, context, shortcut, user):
"""[ADMINS ONLY] Creates a new shortcut for a specified username."""
BaseCog.check_main_server(self, context)
BaseCog.check_bot_channel(self, context)
BaseCog.check_admin(self, context)
BaseCog.check_forbidden_characters(self, context)
self.shortcuts[shortcut] = user
with open(config.cogs_data_path + '/user_shortcuts.json', 'w') as shortcuts_file:
json.dump(self.shortcuts, shortcuts_file)
await self.bot.post_message(self.bot.bot_channel, context.message.author.name + ' has created a new shortcut \"' + shortcut + '\".')
def setup(bot):
"""Core cog load."""
bot.add_cog(Core(bot))
log.info("Core cog loaded")
| 2,791 | 884 |
from dataclasses import dataclass
from typing import List
from cu_pass.dpa_calculator.constants import REGION_TYPE_DENSE_URBAN, REGION_TYPE_RURAL, REGION_TYPE_SUBURBAN, REGION_TYPE_URBAN
from cu_pass.dpa_calculator.helpers.list_distributor.fractional_distribution.fractional_distribution import \
FractionalDistribution
from cu_pass.dpa_calculator.helpers.list_distributor.fractional_distribution.fractional_distribution_uniform import \
FractionalDistributionUniform
@dataclass
class HeightDistribution:
maximum_height_in_meters: float
minimum_height_in_meters: float
fraction_of_cbsds: float
def to_fractional_distribution(self) -> FractionalDistribution:
return FractionalDistributionUniform(
range_maximum=self.maximum_height_in_meters,
range_minimum=self.minimum_height_in_meters,
fraction=self.fraction_of_cbsds
)
def fractional_distribution_to_height_distribution(distribution: FractionalDistribution) -> HeightDistribution:
return HeightDistribution(
maximum_height_in_meters=distribution.range_maximum,
minimum_height_in_meters=distribution.range_minimum,
fraction_of_cbsds=distribution.fraction
)
OUTDOOR_UE_HEIGHT_IN_METERS = 1.5
INDOOR_UE_HEIGHT_DIFFERENCE_FROM_AP = 1.5
INDOOR_AP_HEIGHT_DISTRIBUTION_CATEGORY_A = {
REGION_TYPE_DENSE_URBAN: [
HeightDistribution(
maximum_height_in_meters=15,
minimum_height_in_meters=3,
fraction_of_cbsds=0.5
),
HeightDistribution(
maximum_height_in_meters=30,
minimum_height_in_meters=18,
fraction_of_cbsds=0.25
),
HeightDistribution(
maximum_height_in_meters=60,
minimum_height_in_meters=33,
fraction_of_cbsds=0.25
),
],
REGION_TYPE_RURAL: [
HeightDistribution(
maximum_height_in_meters=3,
minimum_height_in_meters=3,
fraction_of_cbsds=0.8
),
HeightDistribution(
maximum_height_in_meters=6,
minimum_height_in_meters=6,
fraction_of_cbsds=0.2
),
],
REGION_TYPE_SUBURBAN: [
HeightDistribution(
maximum_height_in_meters=3,
minimum_height_in_meters=3,
fraction_of_cbsds=0.7
),
HeightDistribution(
maximum_height_in_meters=12,
minimum_height_in_meters=6,
fraction_of_cbsds=0.3
)
],
REGION_TYPE_URBAN: [
HeightDistribution(
maximum_height_in_meters=3,
minimum_height_in_meters=3,
fraction_of_cbsds=0.5
),
HeightDistribution(
maximum_height_in_meters=18,
minimum_height_in_meters=6,
fraction_of_cbsds=0.5
)
]
}
OUTDOOR_UE_HEIGHT_DISTRIBUTION = {
REGION_TYPE_DENSE_URBAN: [
HeightDistribution(
maximum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
minimum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
fraction_of_cbsds=1
),
],
REGION_TYPE_RURAL: [
HeightDistribution(
maximum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
minimum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
fraction_of_cbsds=1
),
],
REGION_TYPE_SUBURBAN: [
HeightDistribution(
maximum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
minimum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
fraction_of_cbsds=1
),
],
REGION_TYPE_URBAN: [
HeightDistribution(
maximum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
minimum_height_in_meters=OUTDOOR_UE_HEIGHT_IN_METERS,
fraction_of_cbsds=1
),
]
}
OUTDOOR_AP_HEIGHT_DISTRIBUTION_CATEGORY_B = {
REGION_TYPE_DENSE_URBAN: [
HeightDistribution(
maximum_height_in_meters=30,
minimum_height_in_meters=6,
fraction_of_cbsds=1
),
],
REGION_TYPE_RURAL: [
HeightDistribution(
maximum_height_in_meters=100,
minimum_height_in_meters=6,
fraction_of_cbsds=1
),
],
REGION_TYPE_SUBURBAN: [
HeightDistribution(
maximum_height_in_meters=100,
minimum_height_in_meters=6,
fraction_of_cbsds=1
),
],
REGION_TYPE_URBAN: [
HeightDistribution(
maximum_height_in_meters=30,
minimum_height_in_meters=6,
fraction_of_cbsds=1
),
]
}
def _get_indoor_ue_height_distribution(associated_ap_distribution: HeightDistribution) -> HeightDistribution:
return HeightDistribution(
maximum_height_in_meters=associated_ap_distribution.maximum_height_in_meters - INDOOR_UE_HEIGHT_DIFFERENCE_FROM_AP,
minimum_height_in_meters=associated_ap_distribution.minimum_height_in_meters - INDOOR_UE_HEIGHT_DIFFERENCE_FROM_AP,
fraction_of_cbsds=associated_ap_distribution.fraction_of_cbsds
)
def _get_indoor_ue_height_distributions(associated_ap_distributions: List[HeightDistribution]) -> List[HeightDistribution]:
return [_get_indoor_ue_height_distribution(associated_ap_distribution=distribution) for distribution in associated_ap_distributions]
INDOOR_UE_HEIGHT_DISTRIBUTION = {region_type: _get_indoor_ue_height_distributions(associated_ap_distributions=distributions)
for region_type, distributions in INDOOR_AP_HEIGHT_DISTRIBUTION_CATEGORY_A.items()}
| 5,627 | 1,947 |
# icons used for filtering
ICONS = ['10k', '10mp', '11mp', '12mp', '13mp', '14mp', '15mp', '16mp', '17mp', '18mp', '19mp', '1k', '1k_plus', '1x_mobiledata', '20mp', '21mp', '22mp', '23mp', '24mp', '2k', '2k_plus', '2mp', '30fps', '30fps_select', '360', '3d_rotation', '3g_mobiledata', '3k', '3k_plus', '3mp', '3p', '4g_mobiledata', '4g_plus_mobiledata', '4k', '4k_plus', '4mp', '5g', '5k', '5k_plus', '5mp', '6_ft_apart', '60fps', '60fps_select', '6k', '6k_plus', '6mp', '7k', '7k_plus', '7mp', '8k', '8k_plus', '8mp', '9k', '9k_plus', '9mp', 'ac_unit', 'access_alarm', 'access_alarms', 'access_time', 'access_time_filled', 'accessibility', 'accessibility_new', 'accessible', 'accessible_forward', 'account_balance', 'account_balance_wallet', 'account_box', 'account_circle', 'account_tree', 'ad_units', 'adb', 'add', 'add_a_photo', 'add_alarm', 'add_alert', 'add_box', 'add_business', 'add_chart', 'add_circle', 'add_circle_outline', 'add_comment', 'add_ic_call', 'add_link', 'add_location', 'add_location_alt', 'add_moderator', 'add_photo_alternate', 'add_reaction', 'add_road', 'add_shopping_cart', 'add_task', 'add_to_drive', 'add_to_home_screen', 'add_to_photos', 'add_to_queue', 'addchart', 'adjust', 'admin_panel_settings', 'agriculture', 'air', 'airline_seat_flat', 'airline_seat_flat_angled', 'airline_seat_individual_suite', 'airline_seat_legroom_extra', 'airline_seat_legroom_normal', 'airline_seat_legroom_reduced', 'airline_seat_recline_extra', 'airline_seat_recline_normal', 'airplane_ticket', 'airplanemode_active', 'airplanemode_inactive', 'airplay', 'airport_shuttle', 'alarm', 'alarm_add', 'alarm_off', 'alarm_on', 'album', 'align_horizontal_center', 'align_horizontal_left', 'align_horizontal_right', 'align_vertical_bottom', 'align_vertical_center', 'align_vertical_top', 'all_inbox', 'all_inclusive', 'all_out', 'alt_route', 'alternate_email', 'amp_stories', 'analytics', 'anchor', 'android', 'animation', 'announcement', 'aod', 'apartment', 'api', 'app_blocking', 'app_registration', 'app_settings_alt', 'approval', 'apps', 'architecture', 'archive', 'arrow_back', 'arrow_back_ios', 'arrow_back_ios_new', 'arrow_circle_down', 'arrow_circle_up', 'arrow_downward', 'arrow_drop_down', 'arrow_drop_down_circle', 'arrow_drop_up', 'arrow_forward', 'arrow_forward_ios', 'arrow_left', 'arrow_right', 'arrow_right_alt', 'arrow_upward', 'art_track', 'article', 'aspect_ratio', 'assessment', 'assignment', 'assignment_ind', 'assignment_late', 'assignment_return', 'assignment_returned', 'assignment_turned_in', 'assistant', 'assistant_direction', 'assistant_photo', 'atm', 'attach_email', 'attach_file', 'attach_money', 'attachment', 'attractions', 'attribution', 'audiotrack', 'auto_awesome', 'auto_awesome_mosaic', 'auto_awesome_motion', 'auto_delete', 'auto_fix_high', 'auto_fix_normal', 'auto_fix_off', 'auto_graph', 'auto_stories', 'autofps_select', 'autorenew', 'av_timer', 'baby_changing_station', 'backpack', 'backspace', 'backup', 'backup_table', 'badge', 'bakery_dining', 'balcony', 'ballot', 'bar_chart', 'barcode', 'batch_prediction', 'bathroom', 'bathtub', 'battery_20', 'battery_30', 'battery_50', 'battery_60', 'battery_80', 'battery_90', 'battery_alert', 'battery_charging_20', 'battery_charging_30', 'battery_charging_50', 'battery_charging_60', 'battery_charging_80', 'battery_charging_90', 'battery_charging_full', 'battery_full', 'battery_saver', 'battery_std', 'battery_unknown', 'beach_access', 'bed', 'bedroom_baby', 'bedroom_child', 'bedroom_parent', 'bedtime', 'beenhere', 'bento', 'bike_scooter', 'biotech', 'blender', 'block', 'bloodtype', 'bluetooth', 'bluetooth_audio', 'bluetooth_connected', 'bluetooth_disabled', 'bluetooth_drive', 'bluetooth_searching', 'blur_circular', 'blur_linear', 'blur_off', 'blur_on', 'bolt', 'book', 'book_online', 'bookmark', 'bookmark_add', 'bookmark_added', 'bookmark_border', 'bookmark_remove', 'bookmarks', 'border_all', 'border_bottom', 'border_clear', 'border_color', 'border_horizontal', 'border_inner', 'border_left', 'border_outer', 'border_right', 'border_style', 'border_top', 'border_vertical', 'branding_watermark', 'breakfast_dining', 'brightness_1', 'brightness_2', 'brightness_3', 'brightness_4', 'brightness_5', 'brightness_6', 'brightness_7', 'brightness_auto', 'brightness_high', 'brightness_low', 'brightness_medium', 'broken_image', 'browser_not_supported', 'brunch_dining', 'brush', 'bubble_chart', 'bug_report', 'build', 'build_circle', 'bungalow', 'burst_mode', 'bus_alert', 'business', 'business_center', 'cabin', 'cable', 'cached', 'cake', 'calculate', 'calendar_today', 'calendar_view_day', 'calendar_view_month', 'calendar_view_week', 'call', 'call_end', 'call_made', 'call_merge', 'call_missed', 'call_missed_outgoing', 'call_received', 'call_split', 'call_to_action', 'camera', 'camera_alt', 'camera_enhance', 'camera_front', 'camera_indoor', 'camera_outdoor', 'camera_rear', 'camera_roll', 'cameraswitch', 'campaign', 'cancel', 'cancel_presentation', 'cancel_schedule_send', 'car_rental', 'car_repair', 'card_giftcard', 'card_membership', 'card_travel', 'carpenter', 'cases', 'casino', 'cast', 'cast_connected', 'cast_for_education', 'catching_pokemon', 'category', 'celebration', 'cell_wifi', 'center_focus_strong', 'center_focus_weak', 'chair', 'chair_alt', 'chalet', 'change_circle', 'change_history', 'charging_station', 'chat', 'chat_bubble', 'chat_bubble_outline', 'check', 'check_box', 'check_box_outline_blank', 'check_circle', 'check_circle_outline', 'checkroom', 'chevron_left', 'chevron_right', 'child_care', 'child_friendly', 'chrome_reader_mode', 'circle', 'circle_notifications', 'class', 'clean_hands', 'cleaning_services', 'clear', 'clear_all', 'close', 'close_fullscreen', 'closed_caption', 'closed_caption_disabled', 'closed_caption_off', 'cloud', 'cloud_circle', 'cloud_done', 'cloud_download', 'cloud_off', 'cloud_queue', 'cloud_upload', 'code', 'code_off', 'coffee', 'coffee_maker', 'collections', 'collections_bookmark', 'color_lens', 'colorize', 'comment', 'comment_bank', 'commute', 'compare', 'compare_arrows', 'compass_calibration', 'compress', 'computer', 'confirmation_number', 'connect_without_contact', 'connected_tv', 'construction', 'contact_mail', 'contact_page', 'contact_phone', 'contact_support', 'contactless', 'contacts', 'content_copy', 'content_cut', 'content_paste', 'content_paste_off', 'control_camera', 'control_point', 'control_point_duplicate', 'copy_all', 'copyright', 'coronavirus', 'corporate_fare', 'cottage', 'countertops', 'create', 'create_new_folder', 'credit_card', 'credit_card_off', 'credit_score', 'crib', 'crop', 'crop_16_9', 'crop_3_2', 'crop_5_4', 'crop_7_5', 'crop_din', 'crop_free', 'crop_landscape', 'crop_original', 'crop_portrait', 'crop_rotate', 'crop_square', 'dangerous', 'dark_mode', 'dashboard', 'dashboard_customize', 'data_saver_off', 'data_saver_on', 'data_usage', 'date_range', 'deck', 'dehaze', 'delete', 'delete_forever', 'delete_outline', 'delete_sweep', 'delivery_dining', 'departure_board', 'description', 'design_services', 'desktop_access_disabled', 'desktop_mac', 'desktop_windows', 'details', 'developer_board', 'developer_board_off', 'developer_mode', 'device_hub', 'device_thermostat', 'device_unknown', 'devices', 'devices_other', 'dialer_sip', 'dialpad', 'dining', 'dinner_dining', 'directions', 'directions_bike', 'directions_boat', 'directions_boat_filled', 'directions_bus', 'directions_bus_filled', 'directions_car', 'directions_car_filled', 'directions_off', 'directions_railway', 'directions_railway_filled', 'directions_run', 'directions_subway', 'directions_subway_filled', 'directions_transit', 'directions_transit_filled', 'directions_walk', 'dirty_lens', 'disabled_by_default', 'disc_full', 'divide', 'dns', 'do_disturb', 'do_disturb_alt', 'do_disturb_off', 'do_disturb_on', 'do_not_disturb', 'do_not_disturb_alt', 'do_not_disturb_off', 'do_not_disturb_on', 'do_not_disturb_on_total_silence', 'do_not_step', 'do_not_touch', 'dock', 'domain', 'domain_disabled', 'domain_verification', 'done', 'done_all', 'done_outline', 'donut_large', 'donut_small', 'door_back', 'door_front', 'door_sliding', 'doorbell', 'double_arrow', 'downhill_skiing', 'download', 'download_done', 'download_for_offline', 'downloading', 'drafts', 'drag_handle', 'drag_indicator', 'drive_eta', 'drive_file_move', 'drive_file_rename_outline', 'drive_folder_upload', 'dry', 'dry_cleaning', 'duo', 'dvr', 'dynamic_feed', 'dynamic_form', 'e_mobiledata', 'earbuds', 'earbuds_battery', 'east', 'eco', 'edgesensor_high', 'edgesensor_low', 'edit', 'edit_attributes', 'edit_location', 'edit_location_alt', 'edit_notifications', 'edit_off', 'edit_road', 'eject', 'elderly', 'electric_bike', 'electric_car', 'electric_moped', 'electric_rickshaw', 'electric_scooter', 'electrical_services', 'elevator', 'email', 'emoji_emotions', 'emoji_events', 'emoji_flags', 'emoji_food_beverage', 'emoji_nature', 'emoji_objects', 'emoji_people', 'emoji_symbols', 'emoji_transportation', 'engineering', 'enhanced_encryption', 'equalizer', 'equals', 'error', 'error_outline', 'escalator', 'escalator_warning', 'euro', 'euro_symbol', 'ev_station', 'event', 'event_available', 'event_busy', 'event_note', 'event_seat', 'exit_to_app', 'expand', 'expand_less', 'expand_more', 'explicit', 'explore', 'explore_off', 'exposure', 'exposure_neg_1', 'exposure_neg_2', 'exposure_plus_1', 'exposure_plus_2', 'exposure_zero', 'extension', 'extension_off', 'face', 'face_retouching_natural', 'face_retouching_off', 'facebook', 'fact_check', 'family_restroom', 'fast_forward', 'fast_rewind', 'fastfood', 'favorite', 'favorite_border', 'featured_play_list', 'featured_video', 'feed', 'feedback', 'female', 'fence', 'festival', 'fiber_dvr', 'fiber_manual_record', 'fiber_new', 'fiber_pin', 'fiber_smart_record', 'file_copy', 'file_download', 'file_download_done', 'file_download_off', 'file_present', 'file_upload', 'filter', 'filter_1', 'filter_2', 'filter_3', 'filter_4', 'filter_5', 'filter_6', 'filter_7', 'filter_8', 'filter_9', 'filter_9_plus', 'filter_alt', 'filter_b_and_w', 'filter_center_focus', 'filter_drama', 'filter_frames', 'filter_hdr', 'filter_list', 'filter_none', 'filter_tilt_shift', 'filter_vintage', 'find_in_page', 'find_replace', 'fingerprint', 'fire_extinguisher', 'fireplace', 'first_page', 'fit_screen', 'fitness_center', 'flag', 'flaky', 'flare', 'flash_auto', 'flash_off', 'flash_on', 'flashlight_off', 'flashlight_on', 'flatware', 'flight', 'flight_land', 'flight_takeoff', 'flip', 'flip_camera_android', 'flip_camera_ios', 'flip_to_back', 'flip_to_front', 'flourescent', 'flutter_dash', 'fmd_bad', 'fmd_good', 'folder', 'folder_open', 'folder_shared', 'folder_special', 'follow_the_signs', 'font_download', 'font_download_off', 'food_bank', 'format_align_center', 'format_align_justify', 'format_align_left', 'format_align_right', 'format_bold', 'format_clear', 'format_color_fill', 'format_color_reset', 'format_color_text', 'format_indent_decrease', 'format_indent_increase', 'format_italic', 'format_line_spacing', 'format_list_bulleted', 'format_list_numbered', 'format_list_numbered_rtl', 'format_paint', 'format_quote', 'format_shapes', 'format_size', 'format_strikethrough', 'format_textdirection_l_to_r', 'format_textdirection_r_to_l', 'format_underlined', 'forum', 'forward', 'forward_10', 'forward_30', 'forward_5', 'forward_to_inbox', 'foundation', 'free_breakfast', 'fullscreen', 'fullscreen_exit', 'functions', 'g_mobiledata', 'g_translate', 'gamepad', 'games', 'garage', 'gavel', 'gesture', 'get_app', 'gif', 'gite', 'golf_course', 'gpp_bad', 'gpp_good', 'gpp_maybe', 'gps_fixed', 'gps_not_fixed', 'gps_off', 'grade', 'gradient', 'grading', 'grain', 'graphic_eq', 'grass', 'greater_than', 'greater_than_equal', 'grid_3x3', 'grid_4x4', 'grid_goldenratio', 'grid_off', 'grid_on', 'grid_view', 'group', 'group_add', 'group_work', 'groups', 'h_mobiledata', 'h_plus_mobiledata', 'hail', 'handyman', 'hardware', 'hd', 'hdr_auto', 'hdr_auto_select', 'hdr_enhanced_select', 'hdr_off', 'hdr_off_select', 'hdr_on', 'hdr_on_select', 'hdr_plus', 'hdr_strong', 'hdr_weak', 'headphones', 'headphones_battery', 'headset', 'headset_mic', 'headset_off', 'healing', 'health_and_safety', 'hearing', 'hearing_disabled', 'height', 'help', 'help_center', 'help_outline', 'hevc', 'hide_image', 'hide_source', 'high_quality', 'highlight', 'highlight_alt', 'highlight_off', 'hiking', 'history', 'history_edu', 'history_toggle_off', 'holiday_village', 'home', 'home_max', 'home_mini', 'home_repair_service', 'home_work', 'horizontal_distribute', 'horizontal_rule', 'horizontal_split', 'hot_tub', 'hotel', 'hourglass_bottom', 'hourglass_disabled', 'hourglass_empty', 'hourglass_full', 'hourglass_top', 'house', 'house_siding', 'houseboat', 'how_to_reg', 'how_to_vote', 'http', 'https', 'hvac', 'ice_skating', 'icecream', 'image', 'image_aspect_ratio', 'image_not_supported', 'image_search', 'imagesearch_roller', 'import_contacts', 'import_export', 'important_devices', 'inbox', 'indeterminate_check_box', 'info', 'input', 'insert_chart', 'insert_chart_outlined', 'insert_comment', 'insert_drive_file', 'insert_emoticon', 'insert_invitation', 'insert_link', 'insert_photo', 'insights', 'integration_instructions', 'inventory', 'inventory_2', 'invert_colors', 'invert_colors_off', 'ios_share', 'iron', 'iso', 'kayaking', 'keyboard', 'keyboard_alt', 'keyboard_arrow_down', 'keyboard_arrow_left', 'keyboard_arrow_right', 'keyboard_arrow_up', 'keyboard_backspace', 'keyboard_capslock', 'keyboard_hide', 'keyboard_return', 'keyboard_tab', 'keyboard_voice', 'king_bed', 'kitchen', 'kitesurfing', 'label', 'label_important', 'label_off', 'landscape', 'language', 'laptop', 'laptop_chromebook', 'laptop_mac', 'laptop_windows', 'last_page', 'launch', 'layers', 'layers_clear', 'leaderboard', 'leak_add', 'leak_remove', 'leave_bags_at_home', 'legend_toggle', 'lens', 'lens_blur', 'less_than', 'less_than_equal', 'library_add', 'library_add_check', 'library_books', 'library_music', 'light', 'light_mode', 'lightbulb', 'line_style', 'line_weight', 'linear_scale', 'link', 'link_off', 'linked_camera', 'liquor', 'list', 'list_alt', 'live_help', 'live_tv', 'living', 'local_activity', 'local_airport', 'local_atm', 'local_bar', 'local_cafe', 'local_car_wash', 'local_convenience_store', 'local_dining', 'local_drink',
'local_fire_department', 'local_florist', 'local_gas_station', 'local_grocery_store', 'local_hospital', 'local_hotel', 'local_laundry_service', 'local_library', 'local_mall', 'local_movies', 'local_offer', 'local_parking', 'local_pharmacy', 'local_phone', 'local_pizza', 'local_play', 'local_police', 'local_post_office', 'local_printshop', 'local_see', 'local_shipping', 'local_taxi', 'location_city', 'location_disabled', 'location_off', 'location_on', 'location_searching', 'lock', 'lock_clock', 'lock_open', 'log_in', 'log_out', 'login', 'logout', 'looks', 'looks_3', 'looks_4', 'looks_5', 'looks_6', 'looks_one', 'looks_two', 'loop', 'loupe', 'low_priority', 'loyalty', 'lte_mobiledata', 'lte_plus_mobiledata', 'luggage', 'lunch_dining', 'mail', 'mail_outline', 'male', 'manage_accounts', 'manage_search', 'map', 'maps_home_work', 'maps_ugc', 'margin', 'mark_as_unread', 'mark_chat_read', 'mark_chat_unread', 'mark_email_read', 'mark_email_unread', 'markunread', 'markunread_mailbox', 'masks', 'maximize', 'media_bluetooth_off', 'media_bluetooth_on', 'mediation', 'medical_services', 'medication', 'meeting_room', 'memory', 'menu', 'menu_book', 'menu_open', 'merge_type', 'message', 'mic', 'mic_external_off', 'mic_external_on', 'mic_none', 'mic_off', 'microwave', 'military_tech', 'minimize', 'minus', 'miscellaneous_services', 'missed_video_call', 'mms', 'mobile_friendly', 'mobile_off', 'mobile_screen_share', 'mobiledata_off', 'mode', 'mode_comment', 'mode_edit', 'mode_edit_outline', 'mode_night', 'mode_standby', 'model_training', 'monetization_on', 'money', 'money_off', 'money_off_csred', 'monitor', 'monitor_weight', 'monochrome_photos', 'mood', 'mood_bad', 'moped', 'more', 'more_horiz', 'more_time', 'more_vert', 'motion_photos_auto', 'motion_photos_off', 'motion_photos_on', 'motion_photos_pause', 'motion_photos_paused', 'motorcycle', 'mouse', 'move_to_inbox', 'movie', 'movie_creation', 'movie_filter', 'moving', 'mp', 'multiline_chart', 'multiple_stop', 'museum', 'music_note', 'music_off', 'music_video', 'my_location', 'nat', 'nature', 'nature_people', 'navigate_before', 'navigate_next', 'navigation', 'near_me', 'near_me_disabled', 'nearby_error', 'nearby_off', 'network_cell', 'network_check', 'network_locked', 'network_wifi', 'new_releases', 'next_plan', 'next_week', 'nfc', 'night_shelter', 'nightlife', 'nightlight', 'nightlight_round', 'nights_stay', 'no_accounts', 'no_backpack', 'no_cell', 'no_drinks', 'no_encryption', 'no_encryption_gmailerrorred', 'no_flash', 'no_food', 'no_luggage', 'no_meals', 'no_meeting_room', 'no_photography', 'no_sim', 'no_stroller', 'no_transfer', 'nordic_walking', 'north', 'north_east', 'north_west', 'not_accessible', 'not_equal', 'not_interested', 'not_listed_location', 'not_started', 'note', 'note_add', 'note_alt', 'notes', 'notification_add', 'notification_important', 'notifications', 'notifications_active', 'notifications_none', 'notifications_off', 'notifications_paused', 'offline_bolt', 'offline_pin', 'offline_share', 'ondemand_video', 'online_prediction', 'opacity', 'open_in_browser', 'open_in_full', 'open_in_new', 'open_in_new_off', 'open_with', 'other_houses', 'outbond', 'outbound', 'outbox', 'outdoor_grill', 'outlet', 'outlined_flag', 'padding', 'pages', 'pageview', 'paid', 'palette', 'pan_tool', 'panorama', 'panorama_fish_eye', 'panorama_horizontal', 'panorama_horizontal_select', 'panorama_photosphere', 'panorama_photosphere_select', 'panorama_vertical', 'panorama_vertical_select', 'panorama_wide_angle', 'panorama_wide_angle_select', 'paragliding', 'park', 'party_mode', 'password', 'pattern', 'pause', 'pause_circle', 'pause_circle_filled', 'pause_circle_outline', 'pause_presentation', 'payment', 'payments', 'pedal_bike', 'pending', 'pending_actions', 'people', 'people_alt', 'people_outline', 'percentage', 'perm_camera_mic', 'perm_contact_calendar', 'perm_data_setting', 'perm_device_information', 'perm_identity', 'perm_media', 'perm_phone_msg', 'perm_scan_wifi', 'person', 'person_add', 'person_add_alt', 'person_add_alt_1', 'person_add_disabled', 'person_off', 'person_outline', 'person_pin', 'person_pin_circle', 'person_remove', 'person_remove_alt_1', 'person_search', 'personal_video', 'pest_control', 'pest_control_rodent', 'pets', 'phone', 'phone_android', 'phone_bluetooth_speaker', 'phone_callback', 'phone_disabled', 'phone_enabled', 'phone_forwarded', 'phone_in_talk', 'phone_iphone', 'phone_locked', 'phone_missed', 'phone_paused', 'phonelink', 'phonelink_erase', 'phonelink_lock', 'phonelink_off', 'phonelink_ring', 'phonelink_setup', 'photo', 'photo_album', 'photo_camera', 'photo_camera_back', 'photo_camera_front', 'photo_filter', 'photo_library', 'photo_size_select_actual', 'photo_size_select_large', 'photo_size_select_small', 'piano', 'piano_off', 'picture_as_pdf', 'picture_in_picture', 'picture_in_picture_alt', 'pie_chart', 'pie_chart_outline', 'pin', 'pin_drop', 'pin_off', 'pivot_table_chart', 'place', 'plagiarism', 'play_arrow', 'play_circle', 'play_circle_filled', 'play_circle_filled_white', 'play_circle_outline', 'play_disabled', 'play_for_work', 'play_lesson', 'playlist_add', 'playlist_add_check', 'playlist_play', 'plumbing', 'plus', 'plus_minus', 'plus_minus_alt', 'plus_one', 'podcasts', 'point_of_sale', 'policy', 'poll', 'polymer', 'pool', 'portable_wifi_off', 'portrait', 'post_add', 'power', 'power_input', 'power_off', 'power_settings_new', 'precision_manufacturing', 'pregnant_woman', 'present_to_all', 'preview', 'price_change', 'price_check', 'print', 'print_disabled', 'priority_high', 'privacy_tip', 'production_quantity_limits', 'psychology', 'public', 'public_off', 'publish', 'published_with_changes', 'push_pin', 'qr_code', 'qr_code_2', 'qr_code_scanner', 'qrcode', 'query_builder', 'query_stats', 'question_answer', 'queue', 'queue_music', 'queue_play_next', 'quickreply', 'quiz', 'r_mobiledata', 'radar', 'radio', 'radio_button_checked', 'radio_button_unchecked', 'railway_alert', 'ramen_dining', 'rate_review', 'raw_off', 'raw_on', 'read_more', 'receipt', 'receipt_long', 'recent_actors', 'recommend', 'record_voice_over', 'redeem', 'redo', 'reduce_capacity', 'refresh', 'remember_me', 'remove', 'remove_circle', 'remove_circle_outline', 'remove_done', 'remove_from_queue', 'remove_moderator', 'remove_red_eye', 'remove_shopping_cart', 'reorder', 'repeat', 'repeat_on', 'repeat_one', 'repeat_one_on', 'replay', 'replay_10', 'replay_30', 'replay_5', 'replay_circle_filled', 'reply', 'reply_all', 'report', 'report_gmailerrorred', 'report_off', 'report_problem', 'request_page', 'request_quote', 'reset_tv', 'restart_alt', 'restaurant', 'restaurant_menu', 'restore', 'restore_from_trash', 'restore_page', 'reviews', 'rice_bowl', 'ring_volume', 'rocket', 'roofing', 'room', 'room_preferences', 'room_service', 'rotate_90_degrees_ccw', 'rotate_left', 'rotate_right', 'rounded_corner', 'router', 'rowing', 'rss_feed', 'rsvp', 'rtt', 'rule', 'rule_folder', 'run_circle', 'running_with_errors', 'rv_hookup', 'safety_divider', 'sailing', 'sanitizer', 'satellite', 'save', 'save_alt', 'saved_search', 'savings', 'scanner', 'scatter_plot', 'schedule', 'schedule_send', 'schema', 'school', 'science', 'score', 'screen_lock_landscape', 'screen_lock_portrait', 'screen_lock_rotation', 'screen_rotation', 'screen_search_desktop', 'screen_share', 'screenshot', 'sd', 'sd_card', 'sd_card_alert', 'sd_storage', 'search', 'search_off', 'security', 'security_update', 'security_update_good', 'security_update_warning', 'segment', 'select_all', 'self_improvement', 'sell', 'send', 'send_and_archive', 'send_to_mobile', 'sensor_door', 'sensor_window', 'sensors', 'sensors_off', 'sentiment_dissatisfied', 'sentiment_neutral', 'sentiment_satisfied', 'sentiment_satisfied_alt', 'sentiment_slightly_dissatisfied', 'sentiment_very_dissatisfied', 'sentiment_very_satisfied', 'set_meal', 'settings', 'settings_accessibility', 'settings_applications', 'settings_backup_restore', 'settings_bluetooth', 'settings_brightness', 'settings_cell', 'settings_ethernet', 'settings_input_antenna', 'settings_input_component', 'settings_input_composite', 'settings_input_hdmi', 'settings_input_svideo', 'settings_overscan', 'settings_phone', 'settings_power', 'settings_remote', 'settings_suggest', 'settings_system_daydream', 'settings_voice', 'share', 'share_arrival_time', 'share_location', 'shield', 'shop', 'shop_2', 'shop_two', 'shopping_bag', 'shopping_basket', 'shopping_cart', 'short_text', 'shortcut', 'show_chart', 'shower', 'shuffle', 'shuffle_on', 'shutter_speed', 'sick', 'signal_cellular_0_bar', 'signal_cellular_1_bar', 'signal_cellular_2_bar', 'signal_cellular_3_bar', 'signal_cellular_4_bar', 'signal_cellular_alt', 'signal_cellular_connected_no_internet_0_bar', 'signal_cellular_connected_no_internet_1_bar', 'signal_cellular_connected_no_internet_2_bar', 'signal_cellular_connected_no_internet_3_bar', 'signal_cellular_connected_no_internet_4_bar', 'signal_cellular_no_sim', 'signal_cellular_nodata', 'signal_cellular_null', 'signal_cellular_off', 'signal_wifi_0_bar', 'signal_wifi_1_bar', 'signal_wifi_1_bar_lock', 'signal_wifi_2_bar', 'signal_wifi_2_bar_lock', 'signal_wifi_3_bar', 'signal_wifi_3_bar_lock', 'signal_wifi_4_bar', 'signal_wifi_4_bar_lock', 'signal_wifi_bad', 'signal_wifi_connected_no_internet_4', 'signal_wifi_off', 'signal_wifi_statusbar_4_bar', 'signal_wifi_statusbar_connected_no_internet_4', 'signal_wifi_statusbar_null', 'sim_card', 'sim_card_alert', 'sim_card_download', 'single_bed', 'sip', 'skateboarding', 'skip_next', 'skip_previous', 'sledding', 'slideshow', 'slow_motion_video', 'smart_button', 'smart_display', 'smart_screen', 'smart_toy', 'smartphone', 'smoke_free', 'smoking_rooms', 'sms', 'sms_failed', 'snippet_folder', 'snooze', 'snowboarding', 'snowmobile', 'snowshoeing', 'soap', 'social_distance', 'sort', 'sort_by_alpha', 'source', 'south', 'south_east', 'south_west', 'spa', 'space_bar', 'speaker', 'speaker_group', 'speaker_notes', 'speaker_notes_off', 'speaker_phone', 'speed', 'spellcheck', 'splitscreen', 'sports', 'sports_bar', 'sports_baseball', 'sports_basketball', 'sports_cricket', 'sports_esports', 'sports_football', 'sports_golf', 'sports_handball', 'sports_hockey', 'sports_kabaddi', 'sports_mma', 'sports_motorsports', 'sports_rugby', 'sports_score', 'sports_soccer', 'sports_tennis', 'sports_volleyball', 'square_foot', 'stacked_bar_chart', 'stacked_line_chart', 'stairs', 'star', 'star_border', 'star_border_purple500', 'star_half', 'star_outline', 'star_purple500', 'star_rate', 'stars', 'stay_current_landscape', 'stay_current_portrait', 'stay_primary_landscape', 'stay_primary_portrait', 'sticky_note_2', 'stop', 'stop_circle', 'stop_screen_share', 'storage', 'store', 'store_mall_directory', 'storefront', 'storm', 'straighten', 'stream', 'streetview', 'strikethrough_s', 'stroller', 'style', 'subdirectory_arrow_left', 'subdirectory_arrow_right', 'subject', 'subscript', 'subscriptions', 'subtitles', 'subtitles_off', 'subway', 'summarize', 'superscript', 'supervised_user_circle', 'supervisor_account', 'support', 'support_agent', 'surfing', 'surround_sound', 'swap_calls', 'swap_horiz', 'swap_horizontal_circle', 'swap_vert', 'swap_vertical_circle', 'swipe', 'switch_account', 'switch_camera', 'switch_left', 'switch_right', 'switch_video', 'sync', 'sync_alt', 'sync_disabled', 'sync_problem', 'system_security_update', 'system_security_update_good', 'system_security_update_warning', 'system_update', 'system_update_alt', 'tab', 'tab_unselected', 'table_chart', 'table_rows', 'table_view', 'tablet', 'tablet_android', 'tablet_mac', 'tag', 'tag_faces', 'takeout_dining', 'tap_and_play', 'tapas', 'task', 'task_alt', 'taxi_alert', 'terrain', 'text_fields', 'text_format', 'text_rotate_up', 'text_rotate_vertical', 'text_rotation_angledown', 'text_rotation_angleup', 'text_rotation_down', 'text_rotation_none', 'text_snippet', 'textsms', 'texture', 'theater_comedy', 'theaters', 'thermostat', 'thermostat_auto', 'thumb_down', 'thumb_down_alt', 'thumb_down_off_alt', 'thumb_up', 'thumb_up_alt', 'thumb_up_off_alt', 'thumbs_up_down', 'time_to_leave', 'timelapse', 'timeline', 'timer', 'timer_10', 'timer_10_select', 'timer_3', 'timer_3_select', 'timer_off', 'title', 'toc', 'today', 'toggle_off', 'toggle_on', 'toll', 'tonality', 'topic', 'touch_app', 'tour', 'toys', 'track_changes', 'traffic', 'train', 'tram', 'transfer_within_a_station', 'transform', 'transgender', 'transit_enterexit', 'translate', 'travel_explore', 'trending_down', 'trending_flat', 'trending_up', 'trip_origin', 'try', 'tty', 'tune', 'tungsten', 'turned_in', 'turned_in_not', 'tv', 'tv_off', 'two_wheeler', 'umbrella', 'unarchive', 'undo', 'unfold_less', 'unfold_more', 'unpublished', 'unsubscribe', 'upcoming', 'update', 'update_disabled', 'upgrade', 'upload', 'upload_file', 'usb', 'usb_off', 'verified', 'verified_user', 'vertical_align_bottom', 'vertical_align_center', 'vertical_align_top', 'vertical_distribute', 'vertical_split', 'vibration', 'video_call', 'video_camera_back', 'video_camera_front', 'video_label', 'video_library', 'video_settings', 'video_stable', 'videocam', 'videocam_off', 'videogame_asset', 'videogame_asset_off', 'view_agenda', 'view_array', 'view_carousel', 'view_column', 'view_comfy', 'view_compact', 'view_day', 'view_headline', 'view_in_ar', 'view_list', 'view_module', 'view_quilt', 'view_sidebar', 'view_stream', 'view_week', 'vignette', 'villa', 'visibility', 'visibility_off', 'voice_chat', 'voice_over_off', 'voicemail', 'volume_down', 'volume_mute', 'volume_off', 'volume_up', 'volunteer_activism', 'vpn_key', 'vpn_lock', 'vrpano', 'wallpaper', 'warning', 'warning_amber', 'wash', 'watch', 'watch_later', 'water', 'water_damage', 'waterfall_chart', 'waves', 'wb_auto', 'wb_cloudy', 'wb_incandescent', 'wb_iridescent', 'wb_shade', 'wb_sunny', 'wb_twilight', 'wc', 'web', 'web_asset', 'web_asset_off', 'weekend', 'west', 'whatshot', 'wheelchair_pickup', 'where_to_vote', 'widgets', 'wifi', 'wifi_calling', 'wifi_calling_3', 'wifi_lock', 'wifi_off', 'wifi_protected_setup', 'wifi_tethering', 'wifi_tethering_error_rounded', 'wifi_tethering_off', 'window', 'wine_bar', 'work', 'work_off', 'work_outline', 'workspaces', 'wrap_text', 'wrong_location', 'wysiwyg', 'yard', 'youtube_searched_for', 'zoom_in', 'zoom_out', 'zoom_out_map']
| 28,491 | 10,602 |
"""Built in actions for Jaseci"""
from .module.standard_actions import * # noqa
| 83 | 29 |
import numpy.testing as npt
import pytest
import numpy as np
from pyHalo.Rendering.MassFunctions.delta import DeltaFunction
class TestBackgroundDensityDelta(object):
def setup(self):
self.mass = 0.01
self.volume = 10
self.rho = 10
self.mfunc = DeltaFunction(self.mass, self.volume, self.rho, False)
self.mfunc_poisson = DeltaFunction(self.mass, self.volume, self.rho, True)
self.mfunc_empty = DeltaFunction(100000 * self.volume * self.rho, self.volume, self.rho, False)
def test_density_delta(self):
n_expected = self.rho * self.volume / self.mass
m = self.mfunc.draw()
n_drawn = len(m)
npt.assert_equal(n_drawn, n_expected)
for mi in m:
npt.assert_equal(mi, self.mass)
m = self.mfunc_poisson.draw()
for mi in m:
npt.assert_equal(mi, self.mass)
m = self.mfunc_empty.draw()
npt.assert_equal(len(m), 0.)
if __name__ == '__main__':
pytest.main()
| 1,008 | 358 |
#!/usr/bin/env python
'''
VisualizeGraph.py
This file contains helpful sub-routines for generating images from a Random Walk run.
'''
import math
import os
import struct
from LayeredGraph import LayeredGraph
def saveGraphImage(mg, outFN, rankings=None, minWeight=0.0, drawEdgeWeights=False, nodeTypes=None):
'''
This function generates a dot file for graphviz to visualize the graph
@param mg - the LayeredGraph to generate an image from
@param outFN - the location to save the output (.dot is expected)
@param rankings - the full rankings of all nodes in the graph (default: None, do not color the graph and visualize the whole graph)
@param minWeight - the minimum weight from the ranking required to show up in the image (default: 0.0)
@param drawEdgeWeights - if True, weight values will be included on the edges (default: False)
'''
#if we have ranks, create a dictionary of the weights for lookup later
if rankings != None:
rDict = {}
for w, t, v in rankings:
rDict[(t, v)] = w
#open the file for writing
fp = open(outFN, 'w+')
fp.write('digraph food {\n')
n = mg.nodes
if nodeTypes == None:
nodeTypes = sorted(n.keys())
#iterate through all nodes in the graph
#for k in sorted(n.keys()):
for k in nodeTypes:
for v in sorted(n[k]):
vw = v.replace(':', '_')
if rankings == None:
#if there are no rankings, then always write the node
fp.write(k+'_'+vw+';\n')
else:
#we have rankings, so only write the node if it has sufficient weight
r = rDict[(k, v)]
if r < minWeight:
continue
#all weights are in the range [0, 1], so scale that up to RGB 255 scale
fc = int(math.floor(r*255))
rgb = (255, 255-fc, 255-fc)
fcHash = '#'+bytes.hex(struct.pack('BBB',*rgb))
#write the node and include the weight
fp.write('{}_{} [label="{}_{} ({:.4f})" style=filled fillcolor="{}"];\n'.format(k, vw, k, vw, r, fcHash));
#now go through the nodes again looking for edges
#for k2 in sorted(n.keys()):
for k2 in nodeTypes:
for v2 in sorted(n[k2]):
#make sure this node has enough weight to show up
if rankings != None:
r2 = rDict[(k2, v2)]
if r2 < minWeight:
continue
#if an edges exists, it has a weight > 0
w = mg.getEdge(k, v, k2, v2)
if w > 0.0:
wn = mg.getEdge(k, v, k2, v2, True)
vw2 = v2.replace(':', '_')
if drawEdgeWeights:
#include the raw weight and the normalized weight
#TODO: option for one or both?
fp.write('{}_{} -> {}_{} [label="{}({:.2f})"];\n'.format(k, vw, k2, vw2, w, wn))
else:
#only include the edge itself
fp.write('{}_{} -> {}_{};\n'.format(k, vw, k2, vw2))
fp.write('}\n')
fp.close()
def visualize_RWR(dotPrefix, imagePrefix, mg, startProbs, restartProb, bg=None, cycleLimit=1000, minWeight=0.0):
'''
Run RWR and generate a dot file for each iteration. Requires graphviz to be installed to run "dot".
@param dotPrefix - dot files will be saved to <dotPrefix>.<iteration>.dot
@param imagePrefix - image files will be saved to <imagePrefix>.<iteration>.png
@param mg - an instance of LayeredGraph
@param startProbs - same as LayeredGraph.RWR_rank(..)
@param restartProb - same as LayeredGraph.RWR_rank(..)
@param bg - same as LayeredGraph.RWR_rank(..)
@param cycleLimit - same as LayeredGraph.RWR_rank(..)
@param minWeight - the minimum weight on a node to visualize it (default: 0.0)
'''
#first, generate the iterator
rankTypes = set(mg.nodes.keys())
rwr_iter = mg.RWR_iter(startProbs, restartProb, rankTypes, bg, cycleLimit)
for x, rankings in enumerate(rwr_iter):
dotFN = '.'.join([dotPrefix, str(x), 'dot'])
pngFN = '.'.join([imagePrefix, str(x), 'png'])
#create the dot file, then run dot to generate the image file
saveGraphImage(mg, dotFN, rankings, minWeight=minWeight, drawEdgeWeights=False)
os.system('dot -Tpng -o '+pngFN+' '+dotFN)
| 4,716 | 1,439 |
import sys
import math
import heapq
from collections import namedtuple
from itertools import chain, zip_longest
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
NORTH, EAST, SOUTH, WEST, STILL = range(5)
DIRECTIONS = [NORTH, EAST, SOUTH, WEST]
ALL_DIRECTIONS = [NORTH, EAST, SOUTH, WEST, STILL]
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def opposite_cardinal(direction):
"Returns the opposing cardinal direction."
return (direction + 2) % 4 if direction != STILL else STILL
Square = namedtuple('Square', 'x y owner strength production')
Move = namedtuple('Move', 'square direction')
class GameMap:
def __init__(self, size_string, production_string, map_string=None):
self.width, self.height = tuple(map(int, size_string.split()))
self.production = tuple(tuple(map(int, substring)) for substring in grouper(production_string.split(), self.width))
self.contents = None
self.get_frame(map_string)
self.starting_player_count = len(set(square.owner for square in self)) - 1
def get_frame(self, map_string=None):
"Updates the map information from the latest frame provided by the Halite game environment."
if map_string is None:
map_string = get_string()
split_string = map_string.split()
owners = list()
while len(owners) < self.width * self.height:
counter = int(split_string.pop(0))
owner = int(split_string.pop(0))
owners.extend([owner] * counter)
assert len(owners) == self.width * self.height
assert len(split_string) == self.width * self.height
self.contents = [[Square(x, y, owner, strength, production)
for x, (owner, strength, production)
in enumerate(zip(owner_row, strength_row, production_row))]
for y, (owner_row, strength_row, production_row)
in enumerate(zip(grouper(owners, self.width),
grouper(map(int, split_string), self.width),
self.production))]
def __iter__(self):
"Allows direct iteration over all squares in the GameMap instance."
return chain.from_iterable(self.contents)
def neighbors(self, square, n=1, include_self=False):
"Iterable over the n-distance neighbors of a given square. For single-step neighbors, the enumeration index provides the direction associated with the neighbor."
assert isinstance(include_self, bool)
assert isinstance(n, int) and n > 0
if n == 1:
combos = ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0)) # NORTH, EAST, SOUTH, WEST, STILL ... matches indices provided by enumerate(game_map.neighbors(square))
else:
combos = ((dx, dy) for dy in range(-n, n+1) for dx in range(-n, n+1) if abs(dx) + abs(dy) <= n)
return (self.contents[(square.y + dy) % self.height][(square.x + dx) % self.width] for dx, dy in combos if include_self or dx or dy)
def get_target(self, square, direction):
"Returns a single, one-step neighbor in a given direction."
dx, dy = ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0))[direction]
return self.contents[(square.y + dy) % self.height][(square.x + dx) % self.width]
def get_distance(self, sq1, sq2):
"Returns Manhattan distance between two squares."
dx = min(abs(sq1.x - sq2.x), sq1.x + self.width - sq2.x, sq2.x + self.width - sq1.x)
dy = min(abs(sq1.y - sq2.y), sq1.y + self.height - sq2.y, sq2.y + self.height - sq1.y)
return dx + dy
def get_direction_toward(self, sq1, sq2):
best_cost = math.inf
best_direction = None
for direction in ALL_DIRECTIONS:
cur_cost = self.get_distance(self.get_target(sq1, direction), sq2)
if cur_cost < best_cost:
best_direction = direction
best_cost = cur_cost
return best_direction
def get_direction_toward_with_A_star(self, sq1, sq2):
frontier = PriorityQueue()
frontier.put(sq1, 0)
came_from = {}
cost_so_far = {}
came_from[sq1] = sq1
cost_so_far[sq1] = 0
while not frontier.empty():
current = frontier.get()
if current == sq2:
break
for next in self.neighbors(current):
new_cost = cost_so_far[current] + 1 # La valeur 1 represente le cout pour passer d'un carre a l'autre.
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + self.get_distance(sq2, next)
frontier.put(next, priority)
came_from[next] = current
next_tile = sq2
while next_tile in came_from.keys() and came_from[next_tile] != sq1:
next_tile = came_from[next_tile]
deltaX = sq1.x - next_tile.x
deltaY = sq1.y - next_tile.y
# ((0, -1), (1, 0), (0, 1), (-1, 0), (0, 0)) # NORTH, EAST, SOUTH, WEST, STILL
if (deltaX == 1 or deltaX == -(self.width - 1)) and deltaY == 0:
return WEST
elif (deltaX == -1 or deltaX == self.width-1) and deltaY == 0:
return EAST
elif deltaX == 0 and (deltaY == -1 or deltaY == self.height-1):
return SOUTH
elif deltaX == 0 and (deltaY == 1 or deltaY == -(self.height - 1)):
return NORTH
else:
return STILL
#################################################################
# Functions for communicating with the Halite game environment #
#################################################################
def send_string(s):
sys.stdout.write(s)
sys.stdout.write('\n')
sys.stdout.flush()
def get_string():
return sys.stdin.readline().rstrip('\n')
def get_init():
playerID = int(get_string())
m = GameMap(get_string(), get_string())
return playerID, m
def send_init(name):
send_string(name)
def translate_cardinal(direction):
"Translate direction constants used by this Python-based bot framework to that used by the official Halite game environment."
return (direction + 1) % 5
def send_frame(moves):
send_string(' '.join(str(move.square.x) + ' ' + str(move.square.y) + ' ' + str(translate_cardinal(move.direction)) for move in moves))
| 6,881 | 2,194 |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @return {void} Do not return anything, modify head in-place instead.
def reorderList(self, head):
if not head: return
if not head.next: return
nodes = []
temp = head
while temp:
nodes.append(temp)
temp = temp.next
length = len(nodes)
temp = [None] * length
if length%2:
temp[::2] = nodes[:length//2+1]
temp[1::2] = nodes[length//2+1:][::-1]
else:
temp[::2] = nodes[:length//2]
temp[1::2] = nodes[length//2:][::-1]
nodes = temp
pos = 1
while pos < length:
nodes[pos-1].next = nodes[pos]
pos += 1
nodes[pos-1].next = None
| 934 | 288 |
__author__ = 'itamar'
import sys
from engine.facebook_logic import fb_logic
import logging
import webapp2
from engine.DAL import DAL
sys.path.insert(0, 'lib') #we need this line in order to make libraries imported from lib folder work properly
import requests
FACEBOOK_APP_ID = "683953828381840"
class APILoginHandler(webapp2.RequestHandler):
def get(self):
received = False
_id = self.request.get("id").encode('ascii', 'ignore')
token = self.request.get("token").encode('ascii', 'ignore')
if _id == "" or token == "":
received = False
else:
fb = fb_logic()
if fb.test_id(_id) is False:
received = 2
else:
fb = fb_logic()
if fb.validate_fb_login(_id, access_token=token) is not False:
mydb = DAL()
user = fb.validate_fb_login(_id, access_token=token)
logging.info(user)
try:
email = user["email"].encode('ascii', 'ignore')
except:
email = None
received = mydb.set_user_details(fb_id=int(_id), name=user['first_name'].encode('ascii', 'ignore'),
last_name=user['last_name'].encode('ascii', 'ignore'),
email=email)
logging.info("received is "+ str(received))
else:
received = -1
logging.info(received)
self.post(received)
def post(self, received):
if received is False:
self.response.set_status(400)
self.response.write("ERROR: Missing parameters")
return
elif received == -1:
self.response.set_status(401)
self.response.write("Session Aouth Failed")
elif received == 2:
self.response.set_status(402)
self.response.write("Invalid ID")
else:
self.response.set_status(200)
self.response.write(received)
return
def get_results(request_url, params):
request = requests.get(request_url, params=params, verify=True)
data = request.json()
return data, request.status_code
login = webapp2.WSGIApplication([
('/login', APILoginHandler)
], debug=True) | 2,389 | 681 |
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
class LinkedList:
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def isempty(self):
return self._size == 0
def addlast(self, e):
newest = _Node(e, None)
if self.isempty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1
def addfirst(self, e):
newest = _Node(e, None)
if self.isempty():
self._head = newest
self._tail = newest
else:
newest._next = self._head
self._head = newest
self._size += 1
def addany(self, e, position):
newest = _Node(e, None)
p = self._head
i = 1
while i < position-1:
p = p._next
i = i + 1
newest._next = p._next
p._next = newest
self._size += 1
def removefirst(self):
if self.isempty():
print('List is empty')
return
e = self._head._element
self._head = self._head._next
self._size -= 1
if self.isempty():
self._tail = None
return e
def removelast(self):
if self.isempty():
print('List is empty')
return
p = self._head
i = 1
while i < len(self) - 1:
p = p._next
i = i + 1
self._tail = p
p = p._next
e = p._element
self._tail._next = None
self._size -= 1
return e
def removeany(self, position):
p = self._head
i = 1
while i < position - 1:
p = p._next
i = i + 1
e = p._next._element
p._next = p._next._next
self._size -= 1
return e
def display(self):
p = self._head
while p:
print(p._element,end='-->')
p = p._next
print()
def search(self,key):
p = self._head
index = 0
while p:
if p._element == key:
return index
p = p._next
index = index + 1
return -1
def insertsorted(self,e):
newest = _Node(e, None)
if self.isempty():
self._head = newest
else:
p = self._head
q = self._head
while p and p._element < e:
q = p
p = p._next
if p == self._head:
newest._next = self._head
self._head = newest
else:
newest._next = q._next
q._next = newest
self._size += 1
| 2,996 | 977 |
"""Trivia module."""
import urllib3
import json
import random
import html
class Trivia:
"""Defining base class for inheritence."""
@staticmethod
def trivia():
"""Get random questions from opentdb trivia API."""
http = urllib3.PoolManager()
req_return = http.request('GET', 'https://opentdb.com/api.php?amount=1')
trivia_data = json.loads(req_return.data.decode('utf-8'))
all_answers = trivia_data['results'][0]['incorrect_answers']
all_answers.insert(0, trivia_data['results'][0]['correct_answer'])
random.shuffle(all_answers)
comma = ","
shuffled_string = comma.join(all_answers)
return f"""Trivia:
{html.unescape(trivia_data['results'][0]['question'])}
Options: {shuffled_string}
"""
| 806 | 267 |
import os
import sipconfig
#CAS: this is a win32 version, specific to my machine, provided for example.
# The name of the SIP build file generated by SIP and used by the build
# system.
build_file = "blist.sbf"
# Get the SIP configuration information.
config = sipconfig.Configuration()
# Run SIP to generate the code.
os.system(" ".join([config.sip_bin, "-c", ".", "-b", build_file, "sip/blist.sip"]))
# Create the Makefile.
makefile = sipconfig.SIPModuleMakefile(config, build_file)
# Add the library we are wrapping. The name doesn't include any platform
# specific prefixes or extensions (e.g. the "lib" prefix on UNIX, or the
# ".dll" extension on Windows).
makefile.extra_libs = ['buddylist']
makefile.extra_include_dirs.append(r'C:\Users\Christopher\workspace\boost_1_42_0')
makefile.extra_cxxflags.append('//EHsc')
makefile.extra_lib_dirs.extend([r'C:\Users\Christopher\workspace\digsby\ext\src\BuddyList\msvc2008\Release'])
makefile._build['objects'] += " PythonInterface.obj FileUtils.obj"
makefile._build['sources'] += " PythonInterface.cpp FileUtils.cpp"
# Generate the Makefile itself.
makefile.generate()
| 1,159 | 401 |
'''
This module contains shortcuts to simplify accessing package internals when
using raspicam as a form of library, or scripting it.
'''
from os.path import dirname, join
from .main import Application
with open(join(dirname(__file__), 'version.txt')) as fp:
__version__ = fp.read().strip()
| 298 | 87 |
# # -*- coding: utf-8 -*-
# from __future__ import unicode_literals
#
# from django.contrib.postgres.fields import JSONField
# from django.contrib.auth.models import User
# from .pagelists.components import COMPONENT_CHOICES
# from .pagelists.content import CONTENT_CHOICES
# from ..languages.models import LANGUAGE_CODE_CHOICES, SUPPORTED_LANGUAGES, DEFAULT_LANGUAGE
#
# from django.db import models
#
# class PageResources(models.Model):
# SUPPORTED_LANGUAGES = SUPPORTED_LANGUAGES
#
# component = models.CharField(max_length=2, choices=COMPONENT_CHOICES)
# content_type = models.CharField(max_length=2, choices=CONTENT_CHOICES)
# page_name = models.CharField(max_length=100)
# data = JSONField()
# language = models.CharField(max_length=2, choices=LANGUAGE_CODE_CHOICES, default=DEFAULT_LANGUAGE)
# language_validated = models.BooleanField(default=False)
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
# created_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name="PagesCreated")
# updated_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name="PagesUpdated")
| 1,196 | 413 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import json
import mock
from dashboard.services import swarming_service
class _SwarmingTest(unittest.TestCase):
def setUp(self):
patcher = mock.patch('dashboard.common.utils.ServiceAccountHttp')
self.__http = mock.MagicMock()
service_account_http = patcher.start()
service_account_http.return_value = self.__http
self.addCleanup(patcher.stop)
def _Set200ReturnValue(self):
self.__SetRequestReturnValue({'status': '200'}, {'content': {}})
def _Set500ReturnValue(self):
self.__SetRequestReturnValue({'status': '500'}, {'errors': {}})
def _Assert200Response(self, content):
self.assertEqual(content, {'content': {}})
def _AssertRequestMade(self, path, *args, **kwargs):
self.__http.request.assert_called_once_with(
swarming_service.API_BASE_URL + path, *args, **kwargs)
def __SetRequestReturnValue(self, response, content):
self.__http.request.return_value = (response, json.dumps(content))
class BotTest(_SwarmingTest):
def testGet(self):
self._Set200ReturnValue()
response = swarming_service.Bot('bot_id').Get()
self._Assert200Response(response)
self._AssertRequestMade('bot/bot_id/get', 'GET')
def testTasks(self):
self._Set200ReturnValue()
response = swarming_service.Bot('bot_id').Tasks()
self._Assert200Response(response)
self._AssertRequestMade('bot/bot_id/tasks', 'GET')
class BotsTest(_SwarmingTest):
def testList(self):
self._Set200ReturnValue()
response = swarming_service.Bots().List(
'CkMSPWoQ', {'pool': 'Chrome-perf', 'a': 'b'}, False, 1, True)
self._Assert200Response(response)
path = ('bots/list?cursor=CkMSPWoQ&dimensions=a%3Ab&'
'dimensions=pool%3AChrome-perf&is_dead=false&'
'limit=1&quarantined=true')
self._AssertRequestMade(path, 'GET')
class TaskTest(_SwarmingTest):
def testCancel(self):
self._Set200ReturnValue()
response = swarming_service.Task('task_id').Cancel()
self._Assert200Response(response)
self._AssertRequestMade('task/task_id/cancel', 'POST')
def testRequest(self):
self._Set200ReturnValue()
response = swarming_service.Task('task_id').Request()
self._Assert200Response(response)
self._AssertRequestMade('task/task_id/request', 'GET')
def testResult(self):
self._Set200ReturnValue()
response = swarming_service.Task('task_id').Result()
self._Assert200Response(response)
self._AssertRequestMade('task/task_id/result', 'GET')
def testResultWithPerformanceStats(self):
self._Set200ReturnValue()
response = swarming_service.Task('task_id').Result(True)
self._Assert200Response(response)
self._AssertRequestMade(
'task/task_id/result?include_performance_stats=true', 'GET')
def testStdout(self):
self._Set200ReturnValue()
response = swarming_service.Task('task_id').Stdout()
self._Assert200Response(response)
self._AssertRequestMade('task/task_id/stdout', 'GET')
class TasksTest(_SwarmingTest):
def testNew(self):
body = {
'name': 'name',
'user': 'user',
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {
'isolated': 'isolated_hash',
},
'extra_args': ['--output-format=json'],
'dimensions': [
{'key': 'id', 'value': 'bot_id'},
{'key': 'pool', 'value': 'Chrome-perf'},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'id:bot_id',
'pool:Chrome-perf',
],
}
self._Set200ReturnValue()
response = swarming_service.Tasks().New(body)
self._Assert200Response(response)
self._AssertRequestMade('tasks/new', 'POST',
body=json.dumps(body),
headers={'Content-Type': 'application/json'})
class FailureTest(_SwarmingTest):
def testBotGet(self):
self._Set500ReturnValue()
with self.assertRaises(swarming_service.SwarmingError):
swarming_service.Bot('bot_id').Get()
self._AssertRequestMade('bot/bot_id/get', 'GET')
| 4,365 | 1,490 |
# Tests the rest calls in picasso/rest.py
# TODO Make a TestLib and add common setup and teardown functions
import time
import unittest
from unittest import TestCase
import requests
from threading import Thread
import ims.common.config as config
config.load()
import ims.picasso.rest as rest
from ims.rpc.server.name_server import start_name_server
from ims.rpc.server.rpc_server import start_rpc_server
import ims.common.constants as constants
import ims.einstein.ceph as ceph
from ims.common.log import trace
from ims.database.database import Database
from ims.einstein.operations import BMI
_cfg = config.get()
PICASSO_URL = _cfg.tests.picasso_url
CORRECT_HIL_USERNAME = _cfg.tests.correct_hil_username
CORRECT_HIL_PASSWORD = _cfg.tests.correct_hil_password
INCORRECT_HIL_PASSWORD = _cfg.tests.incorrect_hil_password
NODE_NAME = _cfg.tests.node_name
NIC = _cfg.tests.nic
PROJECT = _cfg.tests.project
NETWORK = _cfg.tests.network
EXIST_IMG_NAME = _cfg.tests.exist_img_name
NEW_SNAP_NAME = _cfg.tests.new_snap_name
NOT_EXIST_IMG_NAME = _cfg.tests.not_exist_img_name
NOT_EXIST_SNAP_NAME = _cfg.tests.not_exist_snap_name
# The Coverage Issue for these tests was that no coverage data was
# being generated for the server processes.
# To solve this I made the server processes to run as threads as
# coverage.py covers threads by default.
# The next issue was stopping these threads when the tests were done.
# To solve this I created a child thread that starts the server threads
# as daemon threads and checks a global variable.
# When the global variable is true the thread exits and the server threads
# die as they are daemon threads.
# This successfully results in flushing of coverage data.
threads = []
stop_services = False
def start_services():
global threads, stop_services
stop_services = False
threads = [Thread(target=start_name_server),
Thread(target=start_rpc_server),
Thread(target=rest.start)]
for t in threads:
t.daemon = True
t.start()
while not stop_services:
time.sleep(0)
def setUpModule():
t = Thread(target=start_services)
t.start()
def tearDownModule():
global stop_services
stop_services = True
class TestProvision(TestCase):
"""
Tests Rest Provision call by importing an image and calling provision
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT,
constants.NODE_NAME_PARAMETER: NODE_NAME,
constants.IMAGE_NAME_PARAMETER: EXIST_IMG_NAME,
constants.NETWORK_PARAMETER: NETWORK,
constants.NIC_PARAMETER: NIC}
res = requests.put(PICASSO_URL + "provision/", data=data,
auth=(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD))
self.assertEqual(res.status_code, 200)
time.sleep(constants.HIL_CALL_TIMEOUT)
def tearDown(self):
self.good_bmi.deprovision(NODE_NAME, NETWORK, NIC)
self.good_bmi.remove_image(EXIST_IMG_NAME)
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
time.sleep(constants.HIL_CALL_TIMEOUT)
class TestDeprovision(TestCase):
"""
Tests Rest Deprovision call by doing previous steps and calling deprovision
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
self.good_bmi.provision(NODE_NAME, EXIST_IMG_NAME, NETWORK, NIC)
time.sleep(constants.HIL_CALL_TIMEOUT)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT,
constants.NODE_NAME_PARAMETER: NODE_NAME,
constants.NETWORK_PARAMETER: NETWORK,
constants.NIC_PARAMETER: NIC}
res = requests.delete(PICASSO_URL + "deprovision/", data=data,
auth=(
CORRECT_HIL_USERNAME,
CORRECT_HIL_PASSWORD))
self.assertEqual(res.status_code, 200)
time.sleep(constants.HIL_CALL_TIMEOUT)
def tearDown(self):
self.good_bmi.remove_image(EXIST_IMG_NAME)
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
class TestCreateSnapshot(TestCase):
"""
Calls provision like TestProvision then creates a snapshot using rest call
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
self.good_bmi.provision(NODE_NAME, EXIST_IMG_NAME, NETWORK, NIC)
time.sleep(constants.HIL_CALL_TIMEOUT)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT,
constants.NODE_NAME_PARAMETER: NODE_NAME,
constants.SNAP_NAME_PARAMETER: NEW_SNAP_NAME}
res = requests.put(PICASSO_URL + "create_snapshot/", data=data,
auth=(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD))
self.assertEqual(res.status_code, 200)
snaps = self.db.image.fetch_snapshots_from_project(PROJECT)
has_image = False
for snapshot in snaps:
if NEW_SNAP_NAME == snapshot[0]:
has_image = True
self.assertTrue(has_image)
with ceph.RBD(_cfg.fs,
_cfg.iscsi.password) as fs:
img_id = self.good_bmi.get_ceph_image_name_from_project(
NEW_SNAP_NAME, PROJECT)
fs.get_image(img_id)
def tearDown(self):
self.good_bmi.deprovision(NODE_NAME, NETWORK, NIC)
self.good_bmi.remove_image(NEW_SNAP_NAME)
self.good_bmi.remove_image(EXIST_IMG_NAME)
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
time.sleep(constants.HIL_CALL_TIMEOUT)
class TestListSnapshots(TestCase):
"""
Does the same steps as previous test and calls list snapshots rest call
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
self.good_bmi.provision(NODE_NAME, EXIST_IMG_NAME, NETWORK, NIC)
time.sleep(constants.HIL_CALL_TIMEOUT)
self.good_bmi.create_snapshot(NODE_NAME, NEW_SNAP_NAME)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT}
res = requests.post(PICASSO_URL + "list_snapshots/", data=data,
auth=(CORRECT_HIL_USERNAME,
CORRECT_HIL_PASSWORD))
self.assertEqual(res.status_code, 200)
js = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(js[0][0], NEW_SNAP_NAME)
def tearDown(self):
self.good_bmi.deprovision(NODE_NAME, NETWORK, NIC)
self.good_bmi.remove_image(NEW_SNAP_NAME)
self.good_bmi.remove_image(EXIST_IMG_NAME)
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
time.sleep(constants.HIL_CALL_TIMEOUT)
@unittest.skip('Same as Remove Image')
class TestRemoveSnapshot(TestCase):
"""
This is because snapshot is also an image in out terms currently
"""
def setUp(self):
pass
def runTest(self):
pass
def tearDown(self):
pass
class TestListImages(TestCase):
"""
Imports an import image and calls the list images rest call
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT}
res = requests.post(PICASSO_URL + "list_images/", data=data,
auth=(CORRECT_HIL_USERNAME,
CORRECT_HIL_PASSWORD))
js = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(js[0], EXIST_IMG_NAME)
def tearDown(self):
self.good_bmi.remove_image(EXIST_IMG_NAME)
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
class TestRemoveImage(TestCase):
"""
Imports an Image and calls the remove image rest call
"""
@trace
def setUp(self):
self.db = Database()
self.db.project.insert(PROJECT, NETWORK)
self.good_bmi = BMI(CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD,
PROJECT)
self.good_bmi.import_ceph_image(EXIST_IMG_NAME)
def runTest(self):
data = {constants.PROJECT_PARAMETER: PROJECT,
constants.IMAGE_NAME_PARAMETER: EXIST_IMG_NAME}
res = requests.delete(PICASSO_URL + "remove_image/", data=data, auth=(
CORRECT_HIL_USERNAME, CORRECT_HIL_PASSWORD))
self.assertEqual(res.status_code, 200)
def tearDown(self):
self.db.project.delete_with_name(PROJECT)
self.db.close()
self.good_bmi.shutdown()
| 9,871 | 3,356 |
import asyncio
import logging
import logging.handlers
import os
import queue
class Heartbeat:
def __init__(self, interval=.5):
self._interval = interval
self._beat = None
async def heartbeat(self):
while True:
await asyncio.sleep(self._interval)
async def __aenter__(self):
return await self.start()
async def start(self):
if self._beat is None:
self._beat = asyncio.ensure_future(self.heartbeat())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
return await self.stop()
async def stop(self):
self._beat.cancel()
while not self._beat.done():
try:
await self._beat
except asyncio.CancelledError:
pass
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
@singleton
class RotateHandlers:
def __init__(self):
self._callbacks = []
def add_callback(self, cb):
self._callbacks.append(cb)
def fire(self):
for cb in self._callbacks:
cb()
class OverflowingQueue(queue.Queue):
def put(self, item, block=True, timeout=None):
try:
return queue.Queue.put(self, item, block, timeout)
except queue.Full:
# Log sink hang
pass
return None
def put_nowait(self, item):
return self.put(item, False)
class AsyncLoggingHandler:
def __init__(self, handler, maxsize=1024):
_queue = OverflowingQueue(maxsize)
self._listener = logging.handlers.QueueListener(_queue, handler)
self._async_handler = logging.handlers.QueueHandler(_queue)
def __enter__(self):
self._listener.start()
return self._async_handler
def __exit__(self, exc_type, exc_value, traceback):
self._listener.stop()
def raw_log_handler(verbosity, logfile=None):
if logfile:
if is_nt():
handler = logging.FileHandler(logfile)
else:
handler = logging.handlers.WatchedFileHandler(logfile)
def rotate_cb():
try:
handler.reopenIfNeeded()
except:
pass
RotateHandlers().add_callback(rotate_cb)
else:
handler = logging.StreamHandler()
handler.setLevel(verbosity)
handler.setFormatter(logging.Formatter('%(asctime)s '
'%(levelname)-8s '
'%(name)s: %(message)s',
'%Y-%m-%d %H:%M:%S'))
return handler
def setup_logger(name, verbosity, handler):
logger = logging.getLogger(name)
logger.setLevel(verbosity)
logger.addHandler(handler)
return logger
def enable_uvloop():
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
return False
else:
return True
def is_nt():
return os.name == 'nt'
| 3,199 | 920 |
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
#
# Code based in part on ``litex`` and ``liteiclink``.
# SPDX-License-Identifier: BSD-3-Clause
""" Soft PIPE backend for the Xilinx 7 Series GTP transceivers. """
from amaranth import *
from amaranth.lib.cdc import FFSynchronizer
from .xc7 import DRPInterface, DRPArbiter, DRPFieldController
from .xc7 import GTResetDeferrer, GTPRXPMAResetWorkaround, GTOOBClockDivider
from .lfps import LFPSSquareWaveGenerator, LFPSSquareWaveDetector
from ..pipe import PIPEInterface
Open = Signal
class GTPQuadPLL(Elaboratable):
def __init__(self, refclk, refclk_freq, linerate, channel=0):
assert channel in [0, 1]
self.channel = channel
self._refclk = refclk
self._refclk_freq = refclk_freq
self._linerate = linerate
self.config = self.compute_config(refclk_freq, linerate)
#
# I/O ports
#
self.clk = Signal()
self.refclk = Signal()
self.reset = Signal()
self.lock = Signal()
self.drp = DRPInterface()
def elaborate(self, platform):
gtpe2_params = dict(
# Common Block Attributes
p_BIAS_CFG = 0x0000000000050001,
p_COMMON_CFG = 0x00000000,
# PLL Attributes
p_PLL_CLKOUT_CFG = 0x00,
p_PLLx_CFG = 0x01F03DC,
p_PLLx_DMON_CFG = 0b0,
p_PLLx_FBDIV = self.config["n2"],
p_PLLx_FBDIV_45 = self.config["n1"],
p_PLLx_INIT_CFG = 0x00001E,
p_PLLx_LOCK_CFG = 0x1E8,
p_PLLx_REFCLK_DIV = self.config["m"],
# Common Block - Dynamic Reconfiguration Port
i_DRPCLK = ClockSignal("ss"),
i_DRPADDR = self.drp.addr,
i_DRPDI = self.drp.di,
o_DRPDO = self.drp.do,
i_DRPWE = self.drp.we,
i_DRPEN = self.drp.en,
o_DRPRDY = self.drp.rdy,
# Common Block - Clocking Ports
i_GTREFCLK0 = self._refclk,
o_PLLxOUTCLK = self.clk,
o_PLLxOUTREFCLK = self.refclk,
# Common Block - PLL Ports
o_PLLxLOCK = self.lock,
i_PLLxLOCKEN = 1,
i_PLLxPD = 0,
i_PLLxREFCLKSEL = 0b001,
i_PLLxRESET = self.reset,
i_PLLyPD = 1,
# QPLL Ports
i_BGBYPASSB = 1,
i_BGMONITORENB = 1,
i_BGPDB = 1,
i_BGRCALOVRD = 0b11111,
i_RCALENB = 1,
)
if self.channel == 0:
pll_x, pll_y = "PLL0", "PLL1"
else:
pll_x, pll_y = "PLL1", "PLL0"
return Instance("GTPE2_COMMON", **{
name.replace("PLLx", pll_x).replace("PLLy", pll_y): value
for name, value in gtpe2_params.items()
})
@staticmethod
def compute_config(refclk_freq, linerate):
for n1 in 4, 5:
for n2 in 1, 2, 3, 4, 5:
for m in 1, 2:
vco_freq = refclk_freq*(n1*n2)/m
if 1.6e9 <= vco_freq <= 3.3e9:
for d in 1, 2, 4, 8, 16:
current_linerate = vco_freq*2/d
if current_linerate == linerate:
return {"n1": n1, "n2": n2, "m": m, "d": d,
"vco_freq": vco_freq,
"clkin": refclk_freq,
"linerate": linerate}
msg = "No config found for {:3.2f} MHz refclk / {:3.2f} Gbps linerate."
raise ValueError(msg.format(refclk_freq/1e6, linerate/1e9))
def __repr__(self):
config = self.config
r = """
GTPQuadPLL
==========
overview:
---------
+--------------------------------------------------+
| |
| +---------------------------+ +-----+ |
| +-----+ | Phase Frequency Detector | | | |
CLKIN +----> /M +--> Charge Pump +-> VCO +---> CLKOUT
| +-----+ | Loop Filter | | | |
| +---------------------------+ +--+--+ |
| ^ | |
| | +-------+ +-------+ | |
| +----+ /N2 <----+ /N1 <----+ |
| +-------+ +-------+ |
+--------------------------------------------------+
+-------+
CLKOUT +-> 2/D +-> LINERATE
+-------+
config:
-------
CLKIN = {clkin}MHz
CLKOUT = CLKIN x (N1 x N2) / M = {clkin}MHz x ({n1} x {n2}) / {m}
= {vco_freq}GHz
LINERATE = CLKOUT x 2 / D = {vco_freq}GHz x 2 / {d}
= {linerate}GHz
""".format(clkin = config["clkin"]/1e6,
n1 = config["n1"],
n2 = config["n2"],
m = config["m"],
vco_freq = config["vco_freq"]/1e9,
d = config["d"],
linerate = config["linerate"]/1e9)
return r
class GTPChannel(Elaboratable):
def __init__(self, qpll, tx_pads, rx_pads, ss_clock_frequency):
self._qpll = qpll
self._tx_pads = tx_pads
self._rx_pads = rx_pads
self._ss_clock_frequency = ss_clock_frequency
# For now, always operate at 2x gearing, and using the corresponding width for
# the internal data path.
self._io_words = 2
self._data_width = self._io_words * 10
#
# I/O ports.
#
# Dynamic reconfiguration port
self.drp = DRPInterface()
# Interface clock
self.pclk = Signal()
# Reset sequencing
self.reset = Signal()
self.tx_ready = Signal()
self.rx_ready = Signal()
# Core Rx and Tx lines
self.tx_data = Signal(self._io_words * 8)
self.tx_datak = Signal(self._io_words)
self.rx_data = Signal(self._io_words * 8)
self.rx_datak = Signal(self._io_words)
# TX controls
self.tx_polarity = Signal()
self.tx_elec_idle = Signal()
self.tx_gpio_en = Signal()
self.tx_gpio = Signal()
# RX controls
self.rx_polarity = Signal()
self.rx_eq_training = Signal()
self.rx_termination = Signal()
# RX status
self.rx_valid = Signal()
self.rx_status = Signal(3)
self.rx_elec_idle = Signal()
def elaborate(self, platform):
m = Module()
# Aliases.
qpll = self._qpll
io_words = self._io_words
data_width = self._data_width
#
# Clocking.
#
# Ensure we have a valid PLL/CDR configuration.
assert qpll.config["linerate"] < 6.6e9
# From [UG482: Table 4-14]: CDR Recommended Settings for Protocols with SSC
rxcdr_cfgs = {
1: 0x0_0000_87FE_2060_2448_1010,
2: 0x0_0000_47FE_2060_2450_1010,
4: 0x0_0000_47FE_1060_2450_1010,
}
# Generate the PIPE interface clock from the transmit word clock, and use it to drive both
# the Tx and the Rx FIFOs, to bring both halves of the data bus to the same clock domain.
# The recovered Rx clock will not match the generated Tx clock; use the recovered word
# clock to drive the CTC FIFO in the transceiver, which will compensate for the difference.
txoutclk = Signal()
m.submodules += Instance("BUFG",
i_I=txoutclk,
o_O=self.pclk
)
platform.add_clock_constraint(self.pclk, 250e6)
# Transceiver uses a 25 MHz clock internally, which needs to be derived from
# the reference clock.
for clk25_div in range(1, 33):
if qpll._refclk_freq / clk25_div <= 25e6:
break
# Out of band sequence detector uses an auxiliary clock whose frequency is derived
# from the properties of the sequences.
m.submodules.oob_clkdiv = oob_clkdiv = GTOOBClockDivider(self._ss_clock_frequency)
#
# Initialization.
#
# Per [AR43482], GTP transceivers must not be reset immediately after configuration.
m.submodules.defer_rst = defer_rst = GTResetDeferrer(self._ss_clock_frequency)
m.d.comb += [
defer_rst.tx_i.eq(~qpll.lock | self.reset),
defer_rst.rx_i.eq(~qpll.lock | self.reset),
]
# Per [UG482], GTP receiver reset must follow a specific sequence.
m.submodules.rx_pma_rst = rx_pma_rst = GTPRXPMAResetWorkaround(self._ss_clock_frequency)
m.d.comb += [
rx_pma_rst.i.eq(defer_rst.rx_o)
]
tx_rst_done = Signal()
rx_rst_done = Signal()
m.d.comb += [
self.tx_ready.eq(defer_rst.done & tx_rst_done),
self.rx_ready.eq(defer_rst.done & rx_rst_done),
]
#
# Dynamic reconfiguration.
#
rx_termination = Signal()
m.submodules += FFSynchronizer(self.rx_termination, rx_termination, o_domain="ss")
m.submodules.rx_term = rx_term = DRPFieldController(
addr=0x0011, bits=slice(4, 6), reset=0b10) # RX_CM_SEL
m.d.comb += [
rx_term.value.eq(Mux(rx_termination,
0b11, # Programmable
0b10)), # Floating
]
m.submodules.drp_arbiter = drp_arbiter = DRPArbiter()
drp_arbiter.add_interface(rx_pma_rst.drp)
drp_arbiter.add_interface(rx_term.drp)
drp_arbiter.add_interface(self.drp)
#
# Core SerDes instantiation.
#
m.submodules.gtp = Instance("GTPE2_CHANNEL",
# Simulation-Only Attributes
p_SIM_RECEIVER_DETECT_PASS = "TRUE",
p_SIM_TX_EIDLE_DRIVE_LEVEL = "X",
p_SIM_RESET_SPEEDUP = "FALSE",
p_SIM_VERSION = "2.0",
# RX 8B/10B Decoder Attributes
p_RX_DISPERR_SEQ_MATCH = "FALSE",
p_DEC_MCOMMA_DETECT = "TRUE",
p_DEC_PCOMMA_DETECT = "TRUE",
p_DEC_VALID_COMMA_ONLY = "TRUE",
p_UCODEER_CLR = 0b0,
# RX Byte and Word Alignment Attributes
p_ALIGN_COMMA_DOUBLE = "FALSE",
p_ALIGN_COMMA_ENABLE = 0b1111_111111,
p_ALIGN_COMMA_WORD = 1,
p_ALIGN_MCOMMA_DET = "TRUE",
p_ALIGN_MCOMMA_VALUE = 0b0101_111100, # K28.5 RD- 10b code
p_ALIGN_PCOMMA_DET = "TRUE",
p_ALIGN_PCOMMA_VALUE = 0b1010_000011, # K28.5 RD+ 10b code
p_SHOW_REALIGN_COMMA = "TRUE",
p_RXSLIDE_AUTO_WAIT = 7,
p_RXSLIDE_MODE = "OFF",
p_RX_SIG_VALID_DLY = 10,
# RX Clock Correction Attributes
p_CBCC_DATA_SOURCE_SEL = "DECODED",
p_CLK_CORRECT_USE = "TRUE",
p_CLK_COR_KEEP_IDLE = "FALSE",
p_CLK_COR_MAX_LAT = 14,
p_CLK_COR_MIN_LAT = 11,
p_CLK_COR_PRECEDENCE = "TRUE",
p_CLK_COR_REPEAT_WAIT = 0,
p_CLK_COR_SEQ_LEN = 2,
p_CLK_COR_SEQ_1_ENABLE = 0b1111,
p_CLK_COR_SEQ_1_1 = 0b01_001_11100, # K28.1 1+8b code
p_CLK_COR_SEQ_1_2 = 0b01_001_11100, # K28.1 1+8b code
p_CLK_COR_SEQ_1_3 = 0b0000000000,
p_CLK_COR_SEQ_1_4 = 0b0000000000,
p_CLK_COR_SEQ_2_ENABLE = 0b1111,
p_CLK_COR_SEQ_2_USE = "FALSE",
p_CLK_COR_SEQ_2_1 = 0b0000000000,
p_CLK_COR_SEQ_2_2 = 0b0000000000,
p_CLK_COR_SEQ_2_3 = 0b0000000000,
p_CLK_COR_SEQ_2_4 = 0b0000000000,
# RX Channel Bonding Attributes
p_CHAN_BOND_KEEP_ALIGN = "FALSE",
p_CHAN_BOND_MAX_SKEW = 1,
p_CHAN_BOND_SEQ_LEN = 1,
p_CHAN_BOND_SEQ_1_1 = 0b0000000000,
p_CHAN_BOND_SEQ_1_2 = 0b0000000000,
p_CHAN_BOND_SEQ_1_3 = 0b0000000000,
p_CHAN_BOND_SEQ_1_4 = 0b0000000000,
p_CHAN_BOND_SEQ_1_ENABLE = 0b1111,
p_CHAN_BOND_SEQ_2_1 = 0b0000000000,
p_CHAN_BOND_SEQ_2_2 = 0b0000000000,
p_CHAN_BOND_SEQ_2_3 = 0b0000000000,
p_CHAN_BOND_SEQ_2_4 = 0b0000000000,
p_CHAN_BOND_SEQ_2_ENABLE = 0b1111,
p_CHAN_BOND_SEQ_2_USE = "FALSE",
p_FTS_DESKEW_SEQ_ENABLE = 0b1111,
p_FTS_LANE_DESKEW_CFG = 0b1111,
p_FTS_LANE_DESKEW_EN = "FALSE",
# RX Margin Analysis Attributes
p_ES_CONTROL = 0b000000,
p_ES_ERRDET_EN = "FALSE",
p_ES_EYE_SCAN_EN = "TRUE",
p_ES_HORZ_OFFSET = 0x000,
p_ES_PMA_CFG = 0b0000000000,
p_ES_PRESCALE = 0b00000,
p_ES_QUALIFIER = 0x00000000000000000000,
p_ES_QUAL_MASK = 0x00000000000000000000,
p_ES_SDATA_MASK = 0x00000000000000000000,
p_ES_VERT_OFFSET = 0b000000000,
# FPGA RX Interface Attributes
p_RX_DATA_WIDTH = data_width,
# PMA Attributes
p_OUTREFCLK_SEL_INV = 0b11,
p_PMA_RSV = 0x00000333,
p_PMA_RSV2 = 0x00002040,
p_PMA_RSV3 = 0b00,
p_PMA_RSV4 = 0b0000,
p_RX_BIAS_CFG = 0b0000111100110011,
p_DMONITOR_CFG = 0x000A00,
p_RX_CM_SEL = 0b10,
p_RX_CM_TRIM = 0b1010,
p_RX_DEBUG_CFG = 0b00000000000000,
p_RX_OS_CFG = 0b0000010000000,
p_TERM_RCAL_CFG = 0b100001000010000,
p_TERM_RCAL_OVRD = 0b000,
p_TST_RSV = 0x00000000,
p_RX_CLK25_DIV = clk25_div,
p_TX_CLK25_DIV = clk25_div,
# PCI Express Attributes
p_PCS_PCIE_EN = "FALSE",
# PCS Attributes
p_PCS_RSVD_ATTR = 0x0000_0000_0100, # OOB power up
# RX Buffer Attributes
p_RXBUF_ADDR_MODE = "FULL",
p_RXBUF_EIDLE_HI_CNT = 0b1000,
p_RXBUF_EIDLE_LO_CNT = 0b0000,
p_RXBUF_EN = "TRUE",
p_RX_BUFFER_CFG = 0b000000,
p_RXBUF_RESET_ON_CB_CHANGE = "TRUE",
p_RXBUF_RESET_ON_COMMAALIGN = "FALSE",
p_RXBUF_RESET_ON_EIDLE = "FALSE",
p_RXBUF_RESET_ON_RATE_CHANGE = "TRUE",
p_RXBUFRESET_TIME = 0b00001,
p_RXBUF_THRESH_OVFLW = 61,
p_RXBUF_THRESH_OVRD = "FALSE",
p_RXBUF_THRESH_UNDFLW = 4,
p_RXDLY_CFG = 0x001F,
p_RXDLY_LCFG = 0x030,
p_RXDLY_TAP_CFG = 0x0000,
p_RXPH_CFG = 0xC00002,
p_RXPHDLY_CFG = 0x084020,
p_RXPH_MONITOR_SEL = 0b00000,
p_RX_XCLK_SEL = "RXREC",
p_RX_DDI_SEL = 0b000000,
p_RX_DEFER_RESET_BUF_EN = "TRUE",
# CDR Attributes
p_RXCDR_CFG = rxcdr_cfgs[qpll.config["d"]],
p_RXCDR_FR_RESET_ON_EIDLE = 0b0,
p_RXCDR_HOLD_DURING_EIDLE = 0b0,
p_RXCDR_PH_RESET_ON_EIDLE = 0b0,
p_RXCDR_LOCK_CFG = 0b001001,
# RX Initialization and Reset Attributes
p_RXCDRFREQRESET_TIME = 0b00001,
p_RXCDRPHRESET_TIME = 0b00001,
p_RXISCANRESET_TIME = 0b00001,
p_RXPCSRESET_TIME = 0b00001,
p_RXPMARESET_TIME = 0b00011,
# RX OOB Signaling Attributes
p_RXOOB_CFG = 0b0000110,
# RX Gearbox Attributes
p_RXGEARBOX_EN = "FALSE",
p_GEARBOX_MODE = 0b000,
# PRBS Detection Attribute
p_RXPRBS_ERR_LOOPBACK = 0b0,
# Power-Down Attributes
p_PD_TRANS_TIME_FROM_P2 = 0x03c,
p_PD_TRANS_TIME_NONE_P2 = 0x3c,
p_PD_TRANS_TIME_TO_P2 = 0x64,
# RX OOB Signaling Attributes
p_SAS_MAX_COM = 64,
p_SAS_MIN_COM = 36,
p_SATA_BURST_SEQ_LEN = 0b0101,
p_SATA_BURST_VAL = 0b100,
p_SATA_EIDLE_VAL = 0b100,
p_SATA_MAX_BURST = 8,
p_SATA_MAX_INIT = 21,
p_SATA_MAX_WAKE = 7,
p_SATA_MIN_BURST = 4,
p_SATA_MIN_INIT = 12,
p_SATA_MIN_WAKE = 4,
# RX Fabric Clock Output Control Attributes
p_TRANS_TIME_RATE = 0x0E,
# TX Buffer Attributes
p_TXBUF_EN = "TRUE",
p_TXBUF_RESET_ON_RATE_CHANGE = "TRUE",
p_TXDLY_CFG = 0x001F,
p_TXDLY_LCFG = 0x030,
p_TXDLY_TAP_CFG = 0x0000,
p_TXPH_CFG = 0x0780,
p_TXPHDLY_CFG = 0x084020,
p_TXPH_MONITOR_SEL = 0b00000,
p_TX_XCLK_SEL = "TXOUT",
# FPGA TX Interface Attributes
p_TX_DATA_WIDTH = data_width,
# TX Configurable Driver Attributes
p_TX_DEEMPH0 = 0b000000,
p_TX_DEEMPH1 = 0b000000,
p_TX_DRIVE_MODE = "DIRECT",
p_TX_EIDLE_ASSERT_DELAY = 0b110,
p_TX_EIDLE_DEASSERT_DELAY = 0b100,
p_TX_LOOPBACK_DRIVE_HIZ = "FALSE",
p_TX_MAINCURSOR_SEL = 0b0,
p_TX_MARGIN_FULL_0 = 0b1001110,
p_TX_MARGIN_FULL_1 = 0b1001001,
p_TX_MARGIN_FULL_2 = 0b1000101,
p_TX_MARGIN_FULL_3 = 0b1000010,
p_TX_MARGIN_FULL_4 = 0b1000000,
p_TX_MARGIN_LOW_0 = 0b1000110,
p_TX_MARGIN_LOW_1 = 0b1000100,
p_TX_MARGIN_LOW_2 = 0b1000010,
p_TX_MARGIN_LOW_3 = 0b1000000,
p_TX_MARGIN_LOW_4 = 0b1000000,
p_TX_PREDRIVER_MODE = 0b0,
p_PMA_RSV5 = 0b0,
# TX Gearbox Attributes
p_TXGEARBOX_EN = "FALSE",
# TX Initialization and Reset Attributes
p_TXPCSRESET_TIME = 0b00001,
p_TXPMARESET_TIME = 0b00001,
# TX Receiver Detection Attributes
p_TX_RXDETECT_CFG = 0x1832,
p_TX_RXDETECT_REF = 0b100,
# JTAG Attributes
p_ACJTAG_DEBUG_MODE = 0b0,
p_ACJTAG_MODE = 0b0,
p_ACJTAG_RESET = 0b0,
# CDR Attributes
p_CFOK_CFG = 0x49000040E80,
p_CFOK_CFG2 = 0b0100000,
p_CFOK_CFG3 = 0b0100000,
p_CFOK_CFG4 = 0b0,
p_CFOK_CFG5 = 0x0,
p_CFOK_CFG6 = 0b0000,
p_RXOSCALRESET_TIME = 0b00011,
p_RXOSCALRESET_TIMEOUT = 0b00000,
# PMA Attributes
p_CLK_COMMON_SWING = 0b0,
p_RX_CLKMUX_EN = 0b1,
p_TX_CLKMUX_EN = 0b1,
p_ES_CLK_PHASE_SEL = 0b0,
p_USE_PCS_CLK_PHASE_SEL = 0b0,
p_PMA_RSV6 = 0b0,
p_PMA_RSV7 = 0b0,
# RX Fabric Clock Output Control Attributes
p_RXOUT_DIV = qpll.config["d"],
# TX Fabric Clock Output Control Attributes
p_TXOUT_DIV = qpll.config["d"],
# RX Phase Interpolator Attributes
p_RXPI_CFG0 = 0b000,
p_RXPI_CFG1 = 0b1,
p_RXPI_CFG2 = 0b1,
# RX Equalizer Attributes
p_ADAPT_CFG0 = 0x00000,
p_RXLPMRESET_TIME = 0b0001111,
p_RXLPM_BIAS_STARTUP_DISABLE = 0b0,
p_RXLPM_CFG = 0b0110,
p_RXLPM_CFG1 = 0b0,
p_RXLPM_CM_CFG = 0b0,
p_RXLPM_GC_CFG = 0b111100010,
p_RXLPM_GC_CFG2 = 0b001,
p_RXLPM_HF_CFG = 0b00001111110000,
p_RXLPM_HF_CFG2 = 0b01010,
p_RXLPM_HF_CFG3 = 0b0000,
p_RXLPM_HOLD_DURING_EIDLE = 0b0,
p_RXLPM_INCM_CFG = 0b1,
p_RXLPM_IPCM_CFG = 0b0,
p_RXLPM_LF_CFG = 0b000000001111110000,
p_RXLPM_LF_CFG2 = 0b01010,
p_RXLPM_OSINT_CFG = 0b100,
# TX Phase Interpolator PPM Controller Attributes
p_TXPI_CFG0 = 0b00,
p_TXPI_CFG1 = 0b00,
p_TXPI_CFG2 = 0b00,
p_TXPI_CFG3 = 0b0,
p_TXPI_CFG4 = 0b0,
p_TXPI_CFG5 = 0b000,
p_TXPI_GREY_SEL = 0b0,
p_TXPI_INVSTROBE_SEL = 0b0,
p_TXPI_PPMCLK_SEL = "TXUSRCLK2",
p_TXPI_PPM_CFG = 0x00,
p_TXPI_SYNFREQ_PPM = 0b001,
# LOOPBACK Attributes
p_LOOPBACK_CFG = 0b0,
p_PMA_LOOPBACK_CFG = 0b0,
# RX OOB Signalling Attributes
p_RXOOB_CLK_CFG = "FABRIC",
# TX OOB Signalling Attributes
p_SATA_PLL_CFG = "VCO_3000MHZ",
p_TXOOB_CFG = 0b0,
# RX Buffer Attributes
p_RXSYNC_MULTILANE = 0b0,
p_RXSYNC_OVRD = 0b0,
p_RXSYNC_SKIP_DA = 0b0,
# TX Buffer Attributes
p_TXSYNC_MULTILANE = 0b0,
p_TXSYNC_OVRD = 0b0,
p_TXSYNC_SKIP_DA = 0b0,
# CPLL Ports
i_GTRSVD = 0b0000000000000000,
i_PCSRSVDIN = 0b0000000000000000,
i_TSTIN = 0b11111111111111111111,
# Channel - DRP Ports
i_DRPCLK = ClockSignal("ss"),
i_DRPADDR = drp_arbiter.shared.addr,
i_DRPDI = drp_arbiter.shared.di,
o_DRPDO = drp_arbiter.shared.do,
i_DRPWE = drp_arbiter.shared.we,
i_DRPEN = drp_arbiter.shared.en,
o_DRPRDY = drp_arbiter.shared.rdy,
# Transceiver Reset Mode Operation
i_GTRESETSEL = 0,
i_RESETOVRD = 0,
# Clocking Ports
i_PLL0CLK = qpll.clk if qpll.channel == 0 else 0,
i_PLL0REFCLK = qpll.refclk if qpll.channel == 0 else 0,
i_PLL1CLK = qpll.clk if qpll.channel == 1 else 0,
i_PLL1REFCLK = qpll.refclk if qpll.channel == 1 else 0,
i_RXSYSCLKSEL = 0b00 if qpll.channel == 0 else 0b11,
i_TXSYSCLKSEL = 0b00 if qpll.channel == 0 else 0b11,
# Loopback Ports
i_LOOPBACK = 0b000,
# PMA Reserved Ports
i_PMARSVDIN3 = 0b0,
i_PMARSVDIN4 = 0b0,
# Power-Down Ports
i_RXPD = 0,
i_TXPD = 0b00,
# RX Initialization and Reset Ports
i_EYESCANRESET = 0,
i_GTRXRESET = rx_pma_rst.o,
i_RXLPMRESET = 0,
i_RXOOBRESET = 0,
i_RXPCSRESET = 0,
i_RXPMARESET = 0,
o_RXPMARESETDONE = rx_pma_rst.rxpmaresetdone,
o_RXRESETDONE = rx_rst_done,
i_RXUSERRDY = 1,
# Receive Ports
i_CLKRSVD0 = 0,
i_CLKRSVD1 = 0,
i_DMONFIFORESET = 0,
i_DMONITORCLK = 0,
i_SIGVALIDCLK = oob_clkdiv.o,
# Receive Ports - CDR Ports
i_RXCDRFREQRESET = 0,
i_RXCDRHOLD = 0,
o_RXCDRLOCK = Open(),
i_RXCDROVRDEN = 0,
i_RXCDRRESET = 0,
i_RXCDRRESETRSV = 0,
i_RXOSCALRESET = 0,
i_RXOSINTCFG = 0b0010,
o_RXOSINTDONE = Open(),
i_RXOSINTHOLD = 0,
i_RXOSINTOVRDEN = 0,
i_RXOSINTPD = 0,
o_RXOSINTSTARTED = Open(),
i_RXOSINTSTROBE = 0,
o_RXOSINTSTROBESTARTED = Open(),
i_RXOSINTTESTOVRDEN = 0,
# Receive Ports - Clock Correction Ports
o_RXCLKCORCNT = Open(2),
# Receive Ports - FPGA RX Interface Datapath Configuration
i_RX8B10BEN = 1,
# Receive Ports - FPGA RX Interface Ports
o_RXDATA = self.rx_data,
i_RXUSRCLK = self.pclk,
i_RXUSRCLK2 = self.pclk,
# Receive Ports - Pattern Checker Ports
o_RXPRBSERR = Open(),
i_RXPRBSSEL = 0b000,
i_RXPRBSCNTRESET = 0,
# Receive Ports - PCI Express Ports
o_PHYSTATUS = Open(),
i_RXRATE = 0,
o_RXSTATUS = self.rx_status,
o_RXVALID = self.rx_valid,
# Receive Ports - RX 8B/10B Decoder Ports
o_RXCHARISCOMMA = Open(4),
o_RXCHARISK = self.rx_datak,
o_RXDISPERR = Open(4),
o_RXNOTINTABLE = Open(4),
i_SETERRSTATUS = 0,
# Receive Ports - RX AFE Ports
i_GTPRXN = self._rx_pads.n,
i_GTPRXP = self._rx_pads.p,
i_PMARSVDIN2 = 0b0,
o_PMARSVDOUT0 = Open(),
o_PMARSVDOUT1 = Open(),
# Receive Ports - RX Buffer Bypass Ports
i_RXBUFRESET = 0,
o_RXBUFSTATUS = Open(3),
i_RXDDIEN = 0,
i_RXDLYBYPASS = 1,
i_RXDLYEN = 0,
i_RXDLYOVRDEN = 0,
i_RXDLYSRESET = 0,
o_RXDLYSRESETDONE = Open(),
i_RXPHALIGN = 0,
o_RXPHALIGNDONE = Open(),
i_RXPHALIGNEN = 0,
i_RXPHDLYPD = 0,
i_RXPHDLYRESET = 0,
o_RXPHMONITOR = Open(5),
i_RXPHOVRDEN = 0,
o_RXPHSLIPMONITOR = Open(5),
i_RXSYNCALLIN = 0,
o_RXSYNCDONE = Open(),
i_RXSYNCIN = 0,
i_RXSYNCMODE = 0,
o_RXSYNCOUT = Open(),
# Receive Ports - RX Byte and Word Alignment Ports
o_RXBYTEISALIGNED = Open(),
o_RXBYTEREALIGN = Open(),
o_RXCOMMADET = Open(),
i_RXCOMMADETEN = 1,
i_RXMCOMMAALIGNEN = 1,
i_RXPCOMMAALIGNEN = 1,
i_RXSLIDE = 0,
# Receive Ports - RX Channel Bonding Ports
o_RXCHANBONDSEQ = Open(),
o_RXCHANISALIGNED = Open(),
o_RXCHANREALIGN = Open(),
i_RXCHBONDEN = 0,
i_RXCHBONDI = 0b0000,
i_RXCHBONDLEVEL = 0b000,
i_RXCHBONDMASTER = 0,
o_RXCHBONDO = Open(4),
i_RXCHBONDSLAVE = 0,
# Receive Ports - RX Decision Feedback Equalizer
o_DMONITOROUT = Open(15),
i_RXADAPTSELTEST = 0,
i_RXDFEXYDEN = 0,
i_RXOSINTEN = 0b1,
i_RXOSINTID0 = 0,
i_RXOSINTNTRLEN = 0,
o_RXOSINTSTROBEDONE = Open(),
# Receive Ports - RX Equalizer Ports
i_RXLPMHFHOLD = ~self.rx_eq_training,
i_RXLPMHFOVRDEN = 0,
i_RXLPMLFHOLD = ~self.rx_eq_training,
i_RXLPMLFOVRDEN = 0,
i_RXLPMOSINTNTRLEN = 0,
i_RXOSHOLD = ~self.rx_eq_training,
i_RXOSOVRDEN = 0,
# Receive Ports - RX Fabric Clock Output Control Ports
o_RXRATEDONE = Open(),
i_RXRATEMODE = 0b0,
# Receive Ports - RX Fabric Output Control Ports
o_RXOUTCLK = Open(),
o_RXOUTCLKFABRIC = Open(),
o_RXOUTCLKPCS = Open(),
i_RXOUTCLKSEL = 0b010,
# Receive Ports - RX Gearbox Ports
o_RXDATAVALID = Open(2),
o_RXHEADER = Open(3),
o_RXHEADERVALID = Open(),
o_RXSTARTOFSEQ = Open(2),
i_RXGEARBOXSLIP = 0,
# Receive Ports - RX Margin Analysis Ports
o_EYESCANDATAERROR = Open(),
i_EYESCANMODE = 0,
i_EYESCANTRIGGER = 0,
# Receive Ports - RX OOB Signaling Ports
o_RXCOMSASDET = Open(),
o_RXCOMWAKEDET = Open(),
o_RXCOMINITDET = Open(),
o_RXELECIDLE = self.rx_elec_idle,
i_RXELECIDLEMODE = 0b00,
# Receive Ports - RX Polarity Control Ports
i_RXPOLARITY = self.rx_polarity,
# TX Initialization and Reset Ports
i_CFGRESET = 0,
i_GTTXRESET = defer_rst.tx_o,
i_TXPCSRESET = 0,
i_TXPMARESET = 0,
o_TXPMARESETDONE = Open(),
o_TXRESETDONE = tx_rst_done,
i_TXUSERRDY = 1,
o_PCSRSVDOUT = Open(),
# Transmit Ports - Configurable Driver Ports
o_GTPTXN = self._tx_pads.n,
o_GTPTXP = self._tx_pads.p,
i_TXBUFDIFFCTRL = 0b100,
i_TXDEEMPH = 0,
i_TXDIFFCTRL = 0b1000,
i_TXDIFFPD = 0,
i_TXINHIBIT = self.tx_gpio_en,
i_TXMAINCURSOR = 0b0000000,
i_TXPISOPD = 0,
i_TXPOSTCURSOR = 0b00000,
i_TXPOSTCURSORINV = 0,
i_TXPRECURSOR = 0b00000,
i_TXPRECURSORINV = 0,
i_PMARSVDIN0 = 0b0,
i_PMARSVDIN1 = 0b0,
# Transmit Ports - FPGA TX Interface Datapath Configuration
i_TX8B10BEN = 1,
# Transmit Ports - FPGA TX Interface Ports
i_TXUSRCLK = self.pclk,
i_TXUSRCLK2 = self.pclk,
# Transmit Ports - PCI Express Ports
i_TXELECIDLE = ~self.tx_gpio_en & self.tx_elec_idle,
i_TXMARGIN = 0,
i_TXRATE = 0b000,
i_TXSWING = 0,
# Transmit Ports - Pattern Generator Ports
i_TXPRBSSEL = 0b000,
i_TXPRBSFORCEERR = 0,
# Transmit Ports - TX 8B/10B Encoder Ports
i_TX8B10BBYPASS = 0b0000,
i_TXCHARDISPMODE = 0b0000,
i_TXCHARDISPVAL = 0b0000,
i_TXCHARISK = self.tx_datak,
# Transmit Ports - TX Data Path Interface
i_TXDATA = self.tx_data,
# Transmit Ports - TX Buffer Bypass Ports
i_TXDLYBYPASS = 1,
i_TXDLYEN = 0,
i_TXDLYHOLD = 0,
i_TXDLYOVRDEN = 0,
i_TXDLYSRESET = 0,
o_TXDLYSRESETDONE = Open(),
i_TXDLYUPDOWN = 0,
i_TXPHALIGN = 0,
o_TXPHALIGNDONE = Open(),
i_TXPHALIGNEN = 0,
i_TXPHDLYPD = 0,
i_TXPHDLYRESET = 0,
i_TXPHDLYTSTCLK = 0,
i_TXPHINIT = 0,
o_TXPHINITDONE = Open(),
i_TXPHOVRDEN = 0,
# Transmit Ports - TX Buffer Ports
o_TXBUFSTATUS = Open(2),
# Transmit Ports - TX Buffer and Phase Alignment Ports
i_TXSYNCALLIN = 0,
o_TXSYNCDONE = Open(),
i_TXSYNCIN = 0,
i_TXSYNCMODE = 0,
o_TXSYNCOUT = Open(),
# Transmit Ports - TX Fabric Clock Output Control Ports
o_TXOUTCLK = txoutclk,
o_TXOUTCLKFABRIC = Open(),
o_TXOUTCLKPCS = Open(),
i_TXOUTCLKSEL = 0b010,
i_TXRATEMODE = 0,
o_TXRATEDONE = Open(),
# Transmit Ports - TX Gearbox Ports
o_TXGEARBOXREADY = Open(),
i_TXHEADER = 0b000,
i_TXSEQUENCE = 0b0000000,
i_TXSTARTSEQ = 0,
# Transmit Ports - TX OOB Signalling Ports
o_TXCOMFINISH = Open(),
i_TXCOMINIT = 0,
i_TXCOMSAS = 0,
i_TXCOMWAKE = 0,
i_TXPDELECIDLEMODE = 0,
# Transmit Ports - TX Phase Interpolator PPM Controller Ports
i_TXPIPPMEN = 0,
i_TXPIPPMOVRDEN = 0,
i_TXPIPPMPD = 0,
i_TXPIPPMSEL = 1,
i_TXPIPPMSTEPSIZE = 0,
# Transmit Ports - TX Polarity Control Ports
i_TXPOLARITY = self.tx_polarity ^ (self.tx_gpio_en & self.tx_gpio),
# Transmit Ports - TX Receiver Detection Ports
i_TXDETECTRX = 0,
)
return m
class XC7GTPSerDesPIPE(PIPEInterface, Elaboratable):
""" Wrapper around the core GTP SerDes that adapts it to the PIPE interface.
The implementation-dependent behavior of the standard PIPE signals is described below:
width :
Interface width. Always 2 symbols.
clk :
Reference clock for the PHY receiver and transmitter. Could be routed through fabric,
or connected to the output of an ``IBUFDS_GTE2`` block.
pclk :
Clock for the PHY interface. Frequency is always 250 MHz.
phy_mode :
PHY operating mode. Only SuperSpeed USB mode is supported.
elas_buf_mode :
Elastic buffer mode. Only nominal half-full mode is supported.
rate :
Link signaling rate. Only 5 GT/s is supported.
power_down :
Power management mode. Only P0 is supported.
tx_deemph :
Transmitter de-emphasis level. Only TBD is supported.
tx_margin :
Transmitter voltage levels. Only TBD is supported.
tx_swing :
Transmitter voltage swing level. Only full swing is supported.
tx_detrx_lpbk :
tx_elec_idle :
Transmit control signals. Loopback and receiver detection are not implemented.
tx_compliance :
tx_ones_zeroes :
These inputs are not implemented.
power_present :
This output is not implemented. External logic may drive it if necessary.
"""
def __init__(self, *, tx_pads, rx_pads, refclk_frequency, ss_clock_frequency):
super().__init__(width=2)
self._tx_pads = tx_pads
self._rx_pads = rx_pads
self._refclk_frequency = refclk_frequency
self._ss_clock_frequency = ss_clock_frequency
def elaborate(self, platform):
m = Module()
#
# PLL and SerDes instantiation.
#
m.submodules.qpll = qpll = GTPQuadPLL(
refclk = self.clk,
refclk_freq = self._refclk_frequency,
linerate = 5e9
)
m.submodules.serdes = serdes = GTPChannel(
qpll = qpll,
tx_pads = self._tx_pads,
rx_pads = self._rx_pads,
ss_clock_frequency = self._ss_clock_frequency
)
# Our soft PHY includes some logic that needs to run synchronously to the PIPE clock; create
# a local clock domain to drive it.
m.domains.pipe = ClockDomain(local=True, async_reset=True)
m.d.comb += [
ClockSignal("pipe") .eq(serdes.pclk),
]
#
# LFPS generation.
#
m.submodules.lfps_generator = lfps_generator = LFPSSquareWaveGenerator(25e6, 250e6)
m.d.comb += [
serdes.tx_gpio_en .eq(lfps_generator.tx_gpio_en),
serdes.tx_gpio .eq(lfps_generator.tx_gpio),
]
#
# PIPE interface signaling.
#
m.d.comb += [
qpll.reset .eq(self.reset),
serdes.reset .eq(self.reset),
self.pclk .eq(serdes.pclk),
serdes.tx_elec_idle .eq(self.tx_elec_idle),
serdes.rx_polarity .eq(self.rx_polarity),
serdes.rx_eq_training .eq(self.rx_eq_training),
serdes.rx_termination .eq(self.rx_termination),
lfps_generator.generate .eq(self.tx_detrx_lpbk & self.tx_elec_idle),
self.phy_status .eq(~serdes.tx_ready),
self.rx_valid .eq(serdes.rx_valid),
self.rx_status .eq(serdes.rx_status),
self.rx_elec_idle .eq(serdes.rx_elec_idle),
serdes.tx_data .eq(self.tx_data),
serdes.tx_datak .eq(self.tx_datak),
self.rx_data .eq(serdes.rx_data),
self.rx_datak .eq(serdes.rx_datak),
]
return m
| 41,012 | 15,317 |
"""Create a powder diffraction figure"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import sys
sys.path.append('..')
from plot_diffraction_patterns import powder_diffr_fig
measured_patterns_dir = "../../../results/intermediate/integrated_1D/PS_0p0V_a"
reference_peaks_dir = "../../../results/intermediate/peaks_references"
figure_fn = "../../../results/final/PS_0p0V_a.svg"
references = ['Pd3.97', 'Pd3.91', 'Pd', 'CuCl']
position_subplot_measured=4
#layers = {'Corrosion\nproducts': (0, 149),
# 'Metallic\nglass': (149, 167)}
fig, axs = powder_diffr_fig(measured_patterns_dir=measured_patterns_dir,
position_subplot_measured=position_subplot_measured,
reference_peaks_dir=reference_peaks_dir,
offset_patterns=100,
label_every_nth_pattern=10,#no labels wanted
references=references,
twotheta_range=[27, 36],
linewidth=0.3,
#layers=layers,
height_ratio_measured_to_reference=7)
ax_measured = axs[position_subplot_measured -1]
ax_measured.set(ylim=[-13500, 6500])
ax_measured.annotate('Corrosion\nproducts', xy=(1, 0.6),
xycoords='axes fraction',
xytext=(13, 0), textcoords='offset points', va='top',
color='black')
ax_measured.annotate('Metallic\nglass', xy=(1, 0.15),
xycoords='axes fraction',
xytext=(20, 0), textcoords='offset points', va='top',
color='magenta')
for i in range(3):
ax_i = axs[i]
ax_i.set(ylim=[0, 45])
ax_Pd = axs[2]
ax_Pd.annotate(r'$a = 3.89 \AA$', xy=(1, 0.5), xycoords='axes fraction',
xytext=(10, 0), textcoords='offset points', va='top',
color='blue')
ax_CuCl = axs[-1]
ax_CuCl.set(ylim=[0, 21])
#toplabel = ax_measured.annotate('$y = 4.671 mm', xy=)
axs[-1].xaxis.set_major_locator(MultipleLocator(2))
axs[-1].xaxis.set_minor_locator(MultipleLocator(0.5))
fig.savefig(figure_fn)
#plt.grid()
#plt.show()
| 2,202 | 830 |
#!/usr/bin/env python
import os, sys, random, string
from common import *
import rethinkdb as r
AC = 'ac' # DB
WORDS = 'words' # TABLE
LINE = 'line' # COLUMN
WORD = 'word' # COLUMN
FREQ = 'freq' # COLUMN
PREFS = 'prefs' # TABLE
PREF = 'pref' # COLUMN
LOWER = 'lower' # COLUMN
UPPER = 'upper' # COLUMN
NGINX_EXAMPLE = 'docker run -d -p 80:80 -v %s:/usr/share/nginx/html:ro nginx'
def makeDB(host):
conn = r.connect(host, 28015)
dbs = r.db_list().run(conn)
if AC in dbs:
return 'already there'
r.db_create(AC).run(conn)
r.db(AC).table_create(WORDS, primary_key = LINE).run(conn)
r.db(AC).table_create(PREFS, primary_key = PREF).run(conn)
ra = {LINE: None, WORD: None, FREQ: None}
f = open(os.path.join(SCRIPT_DIR, "wordsCSV.txt"), 'r')
for line in f:
line = line.strip()
linesplit = line.split(',')
ra[LINE] = int(linesplit[0])
ra[WORD] = linesplit[1]
ra[FREQ] = int(linesplit[2])
if int(linesplit[0]) % 5000 == 0:
print linesplit[0]
r.db(AC).table(WORDS).insert(ra).run(conn)
f.close()
print "========================"
g = open(os.path.join(SCRIPT_DIR, "rangesCSV.txt"), 'r')
ra = {PREF: None, LOWER: None, UPPER: None}
for line in g:
line = line.strip()
linesplit = line.split(',')
ra[PREF] = linesplit[0]
ra[LOWER] = int(linesplit[1])
ra[UPPER] = int(linesplit[2])
if len(linesplit[0]) == 1:
print linesplit[0]
r.db(AC).table(PREFS).insert(ra).run(conn)
g.close()
return 'initialized'
def main():
app_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
static_dir = os.path.join(SCRIPT_DIR, 'static')
root_dir = os.path.join(SCRIPT_DIR, '..', '..')
cluster_dir = os.path.join(root_dir, 'util', 'cluster')
builder_dir = os.path.join(root_dir, 'lambda-generator')
if not os.path.exists(cluster_dir):
return 'cluster not running'
# build image
print '='*40
print 'Building image'
builder = os.path.join(builder_dir, 'builder.py')
run(builder + ' -n %s -l %s -c %s -e %s' %
(app_name,
os.path.join(SCRIPT_DIR, 'autocomplete.py'),
os.path.join(SCRIPT_DIR, 'lambda-config.json'),
os.path.join(SCRIPT_DIR, 'environment.json')))
# push image
print '='*40
print 'Pushing image'
registry = rdjs(os.path.join(cluster_dir, 'registry.json'))
img = 'localhost:%s/%s' % (registry['host_port'], app_name)
run('docker tag -f %s %s' % (app_name, img))
run('docker push ' + img)
# setup config
worker0 = rdjs(os.path.join(cluster_dir, 'worker-0.json'))
balancer = rdjs(os.path.join(cluster_dir, 'loadbalancer-1.json'))
config_file = os.path.join(static_dir, 'config.json')
url = ("http://%s:%s/runLambda/%s" %
(balancer['host_ip'], balancer['host_port'], app_name))
wrjs(config_file, {'url': url})
#init DB
print '='*40
print 'Init DB'
makeDB(worker0['ip'])
# directions
print '='*40
print 'Consider serving the app with nginx as follows:'
print NGINX_EXAMPLE % static_dir
return None
if __name__ == '__main__':
rv = main()
if rv != None:
print 'ERROR: ' + rv
sys.exit(1)
sys.exit(0)
| 3,319 | 1,270 |
__package__ = 'clusim'
__title__ = 'CluSim: A python package for clustering similarity'
__description__ = 'This package implements a series of methods to compare \
disjoint, overlapping, and hierarchical clusterings.'
__copyright__ = '2017, Gates, A.J., Ahn YY'
__author__ = """\n""".join([
'Alexander J Gates <ajgates42@gmail.com>',
'YY Ahn <yyahn@iu.edu>'
])
__version__ = '0.3'
__release__ = '0.3'
| 416 | 155 |
from typing import Dict
class Z:
a: int
def __init__(self, a: int):
self.a = a
def z(b: str) -> Dict[str, Z]:
print(f'{z}')
return {b: Z(2)}
print(z(''))
| 182 | 81 |
from .conn import Conn
from .grad import Grad
from .mse import MSE
from .sad import SAD
| 88 | 30 |
import os
import sys
import json
def handle(req):
http_query = os.getenv("Http_Query")
sys.stderr.write("This should be an error message.\n")
return json.dumps({ "Hello": "OpenFaaS", 'test': http_query })
| 219 | 75 |
import os
import torch
import pickle
import argparse
import torch.optim as optim
from gnn_stack import GNNStack
from link_predictor import LinkPredictor
from torch_geometric.data import DataLoader
from ogb.linkproppred import PygLinkPropPredDataset
from train import train
from online_train import online_train
from online_eval import online_eval
from utils import print_and_log
def passed_arguments():
parser = argparse.ArgumentParser(description="Script to train online graph setting")
parser.add_argument('--data_path', type=str,
default='./dataset/online_init:1000-online_nodes:10-seed:0.pkl',
help='Path to data .pkl file')
parser.add_argument('--exp_dir', type=str, default=None,
help="Path to exp dir for model checkpoints and experiment logs")
parser.add_argument('--init_epochs', type=int, default=100,
help="Number of epochs for initial subgraph training")
parser.add_argument('--online_steps', type=int, default=10,
help="Number of gradient steps for online learning.")
parser.add_argument('--init_lr', type=float, default=1e-2,
help="Learning rate for initial graph pre-training")
parser.add_argument('--online_lr', type=float, default=1e-2,
help="Learning rate for online node learning")
parser.add_argument('--node_dim', type=int, default=256,
help='Embedding dimension for nodes')
parser.add_argument('--init_batch_size', type=int, default=1024 * 64,
help='Number of links per batch used in initial pre-training')
parser.add_argument('--online_batch_size', type=int, default=32,
help='Number of links per batch used for online learning')
return parser.parse_args()
if __name__ == "__main__":
args = passed_arguments()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
hidden_dim = 32
dropout = 0.5
num_layers = 4
optim_wd = 0
init_train_epochs = args.init_epochs
num_online_steps = args.online_steps
init_lr = args.init_lr
online_lr = args.online_lr
node_emb_dim = args.node_dim
init_batch_size = args.init_batch_size
online_batch_size = args.online_batch_size
path_to_dataset = args.data_path
exp_dir = args.exp_dir
# Get dataset
with open(path_to_dataset, 'rb') as f:
dataset = pickle.load(f)
init_nodes = dataset['init_nodes'].to(device)
init_edge_index = dataset['init_edge_index'].to(device)
init_pos_train = init_edge_index[:, ::2].to(device) # Relying on interleaved order
online_node_edge_index = dataset['online']
# Configure experiment saving directories
if exp_dir is None:
exp_dir = "./experiments"
dir = f"online.init_nodes:{len(init_nodes)}.num_online:{len(online_node_edge_index)}" \
f".{path_to_dataset.split('-')[2]}" \
f".epochs:{init_train_epochs}.online_steps:{num_online_steps}" \
f".layers:{num_layers}.hidden_dim:{hidden_dim}.node_dim:{node_emb_dim}" \
f".init_lr:{init_lr}.online_lr:{online_lr}.optim_wd:{optim_wd}" \
f".init_batch_size:{init_batch_size}.online_batch_size:{online_batch_size}"
exp_dir = os.path.join(exp_dir, dir)
model_dir = os.path.join(exp_dir, 'checkpoints')
logs_dir = os.path.join(exp_dir, 'logs')
os.makedirs(exp_dir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
os.makedirs(logs_dir, exist_ok=True)
logfile_path = os.path.join(logs_dir, 'log.txt')
resfile_val_path = os.path.join(logs_dir, 'res_val.pkl')
resfile_test_path = os.path.join(logs_dir, 'res_test.pkl')
logfile = open(logfile_path, "a" if os.path.isfile(logfile_path) else "w", buffering=1)
# Create embedding, model, and optimizer
emb = torch.nn.Embedding(len(init_nodes) + max(online_node_edge_index) + 1, node_emb_dim).to(device)
model = GNNStack(node_emb_dim, hidden_dim, hidden_dim, num_layers, dropout, emb=True).to(device)
link_predictor = LinkPredictor(hidden_dim, hidden_dim, 1, num_layers + 1, dropout).to(device)
optimizer = optim.Adam(
list(model.parameters()) + list(link_predictor.parameters()) + list(emb.parameters()),
lr=init_lr, weight_decay=optim_wd
)
# Train on initial subgraph
for e in range(init_train_epochs):
loss = train(model, link_predictor, emb.weight[:len(init_nodes)], init_edge_index, init_pos_train.T,
init_batch_size, optimizer)
print_and_log(logfile, f"Epoch {e + 1}/{init_train_epochs}: Loss = {round(loss, 5)}")
if (e + 1) % 20 == 0:
torch.save(model.state_dict(), os.path.join(model_dir, f"init_train:{e}.pt"))
# New optimizer for online learning (don't update GraphSAGE)
optimizer = optim.Adam(
list(link_predictor.parameters()) + list(emb.parameters()),
lr=online_lr, weight_decay=optim_wd
)
curr_nodes = init_nodes
curr_edge_index = init_edge_index # (2, E)
val_preds, test_preds = {}, {}
for n_id, node_split in online_node_edge_index.items():
train_msg, train_sup, train_neg, valid, valid_neg, test, test_neg = \
node_split['train_msg'], node_split['train_sup'], node_split['train_neg'], \
node_split['valid'], node_split['valid_neg'], node_split['test'], node_split['test_neg']
train_msg = train_msg.to(device)
train_sup = train_sup.to(device)
train_neg = train_neg.to(device)
valid = valid.to(device)
valid_neg = valid_neg.to(device)
test = test.to(device)
test_neg = test_neg.to(device)
# Add message edges to edge index
curr_edge_index = torch.cat((curr_edge_index, train_msg.T), dim=1) # (2, E+Tr_msg)
# Add new node to list of curr_nodes
curr_nodes = torch.cat((curr_nodes, torch.as_tensor([n_id], device=device)))
# Create new embedding for n_id
# optimizer.param_groups[0]['params'].extend(node_emb.parameters())
# Warm start embedding for new node
with torch.no_grad():
emb.weight[n_id] = emb.weight[curr_nodes].mean(dim=0)
# Nodes are ordered sequentially (online node ids start at len(init_nodes))
for t in range(num_online_steps):
loss = online_train(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, train_sup, train_neg, online_batch_size, optimizer, device)
print_and_log(logfile, f"Step {t + 1}/{num_online_steps}: loss = {round(loss, 5)}")
torch.save(model.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_model.pt"))
torch.save(emb.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_emb.pt"))
torch.save(link_predictor.state_dict(), os.path.join(model_dir, f"online_id:{n_id}_lp.pt"))
val_tp, val_tn, val_fp, val_fn, preds = online_eval(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, valid, valid_neg, online_batch_size)
val_preds[n_id] = preds
test_tp, test_tn, test_fp, test_fn, preds = online_eval(model, link_predictor, emb.weight[:n_id + 1],
curr_edge_index, valid, test_neg, online_batch_size,)
test_preds[n_id] = preds
print_and_log(logfile,f"For node {n_id}")
print_and_log(logfile, f"VAL accuracy: {(val_tp + val_tn) / (val_tp + val_tn + val_fp + val_fn)}")
print_and_log(logfile, f"VAL tp: {val_tp}, fn: {val_fn}, tn: {val_tn}, fp: {val_fp}")
print_and_log(logfile, f"TEST accuracy: {(test_tp + test_tn) / (test_tp + test_tn + test_fp + test_fn)}")
print_and_log(logfile, f"TEST tp: {test_tp}, fn: {test_fn}, tn: {test_tn}, fp: {test_fp}")
with open(resfile_val_path, 'wb') as f:
pickle.dump(val_preds, f)
with open(resfile_test_path, 'wb') as f:
pickle.dump(test_preds, f)
logfile.close()
| 8,132 | 2,776 |
__title__ = "simulation"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
import bokeh.plotting
import pandas as pd
import numpy as np
import warnings
import time
import logging
from datetime import datetime as dt
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta as rd
from typing import Callable
# Local imorts
from playground import settings
from playground.notifiers import TwitterNotifier
from playground.simulation import Account, helpers
from playground.util import setup_logger
from playground.util_ops import get_delta_callable_for_tf
class ForwardTestSession():
"""An object representing a Forward Testing Simulation."""
backdata: pd.DataFrame = None
yesterday: pd.DataFrame = None
today: pd.DataFrame = None
data: pd.DataFrame = pd.DataFrame()
initial_capital: float = 1.0
pair: dict = None
tf: dict = None
tracker: list = None
logic: Callable = None
logger: logging.Logger
_name: str = ''
_tts: str = ''
_simple_tts: str = ''
__start_time: dt = None
__next_candle: dt = None
__next_analysis: dt = None
__analysis_throttle: rd = None
__verbosity: bool = False
def __init__(self, data, yesterday, initial_capital, pair, tf, logic,):
"""Initate the ForwardTestSession.
:param data: An HLOCV+ pandas dataframe with a datetime index
:type data: pandas.DataFrame
:param yesterday: An HLOCV+ pandas dataframe with a datetime index
:type yesterday: pandas.DataFrame
:param initial_capital: Starting capital to fund account
:type initial_capital: float
:param pair: Operating market pair
:type pair: MarketPair obj
:param tf: Operating timeframe
:type tf: str
:param logic: A function that will be applied to each lookback period of the data
:type logic: function
:return: A forwardtesting simulation
:rtype: ForwardTestSession
"""
if not isinstance(data, pd.DataFrame):
raise ValueError("Data must be a pandas dataframe")
missing = set(['high', 'low', 'open', 'close', 'volume'])-set(data.columns)
if len(missing) > 0:
msg = "Missing {0} column(s), dataframe must be HLOCV+".format(list(missing))
warnings.warn(msg)
self.tracker = []
self.backdata = data.copy()
self.yesterday = yesterday
self.today = None
self.backdata = self.backdata.set_index('datetime').sort_index(inplace=True, ascending=False)
self.account = Account(initial_capital=initial_capital, pair=pair, tf=tf)
self.logic = logic
self.pair = pair
self.tf = tf
self._simple_tts = '{} - {}\n\n'.format(
self.pair, self.tf, logic.__name__,
)
self._tts = __name__+'\n\n{} - {}\n :: {}\n\n'.format(
self.pair, self.tf, logic.__name__,
)
self._name = __name__+'. {} - {} :: {} :: {}'.format(
self.pair, self.tf, logic.__name__, str(dt.now().date()),
).replace(' ', '')
self.logger = setup_logger(name=self._name)
# rd stands for relativedelta
rd_call: Callable = None
rd_args: dict = None
rd_call, rd_args = get_delta_callable_for_tf(tf=self.tf)
self.__verbosity = settings.FORWARDTESTING_VERBOSITY
self.__analysis_throttle = rd_call(**rd_args)
self.__next_candle = (dt.fromtimestamp(self.yesterday.time) + self.__analysis_throttle)
self.__next_analysis = (self.__next_candle + self.__analysis_throttle)
self.__start_time = dt.now()
self.logger.info('Forwardtesting session started for: {}-{} using {} at {} '.format(
self.pair, self.tf, self.logic.__name__, self.__start_time,
),
)
self.logger.info('next analysis {}'.format(self.__next_analysis))
def update_dataset(self, dataset):
"""Process ForwardTestSession.
:param dataset: An HLOCV+ pandas dataframe with a datetime index
:type dataset: pandas.DataFrame
"""
self.backdata = dataset
def process(self, today):
"""Process ForwardTestSession.
:param today: An HLOCV+ pandas dataframe with the last closed candle
:type today: pandas.DataFrame
:return: A bactesting simulation
:rtype: BackTestSession
"""
current_time = dt.now()
if current_time > (self.__next_analysis):
self.logger.info(
'Processing... %-4s - %-4s - %-4s ' + '------------'*10,
self.pair, self.tf, today.datetime,
)
self.logger.info(
'O: %-6.6g - H: %-6.6g - L: %-6.6g - C: %-6.6g - V: %-6.6g - MRFI:' \
+' %-6.6g - SMRFI: %-6.6g - RSI: %-6.6g - MFI: %-6.6g - EMA50: %-6.6g - EMA100: %-6.6g', \
today.open, today.high, today.low, today.close, today.volumeto, today.mrfi,
today.smrfi, today.rsi, today.mfi, today.ema50, today.ema100,
)
date = today.get('datetime')
equity = self.account.total_value(today.close)
self.data = self.data.append(today)
self.data.sort_index(inplace=True, ascending=False)
# Handle stop loss
for p in self.account.positions:
if p.type == "long":
if p.stop_hit(today.get('low')):
self.account.close_position(p, 1.0, today.get('low'))
if p.type == "short":
if p.stop_hit(today.get('high')):
self.account.close_position(p, 1.0, today.get('high'))
self.account.purge_positions()
# Update account variables
self.account.date = date
self.account.equity.append(equity)
# Equity tracking
self.tracker.append({
'date': date,
'benchmark_equity': today.get('close'),
'strategy_equity': equity,
})
self.logger.info('Executing trading logic... LookbackData: {} :: Data: {}'.format(
self.backdata.shape, self.data.shape
))
# Execute trading logic and allow full lookback
self.logic(
name=self._name,
pair=self.pair,
timeframe=self.tf,
account=self.account,
dataset=self.backdata,
lookback=self.data,
logger=self.logger,
last_candle=today,
_tts=self._tts,
_simple_tts=self._simple_tts
)
self.__next_candle = (dt.fromtimestamp(today.time) + self.__analysis_throttle)
self.__next_analysis = (self.__next_analysis + self.__analysis_throttle)
self.yesterday = today
# Cleanup empty positions
# self.account.purge_positions()
# ------------------------------------------------------------
def print_results(self):
"""Print results"""
self.logger.info("-------------- Results ----------------\n")
being_price = self.data.iloc[0].open
final_price = self.data.iloc[-1].close
pc = helpers.percent_change(being_price, final_price)
tweet_string = "--{}--\n".format(self._name)
tweet_string += "Begin vs end : {0} {0}\n".format(being_price, final_price)
tweet_string += "Buy and Hold : {0}%\n".format(round(pc*100, 2))
tweet_string += "Net Profit : {0}\n".format(round(helpers.profit(self.account.initial_capital, pc), 2))
pc = helpers.percent_change(self.account.initial_capital, self.account.total_value(final_price))
tweet_string += "Strategy : {0}%\n".format(round(pc*100, 2))
tweet_string += "Net Profit : {0}\n".format(round(helpers.profit(self.account.initial_capital, pc), 2))
longs = len([t for t in self.account.opened_trades if t.type == 'long'])
sells = len([t for t in self.account.closed_trades if t.type == 'long'])
shorts = len([t for t in self.account.opened_trades if t.type == 'short'])
covers = len([t for t in self.account.closed_trades if t.type == 'short'])
tweet_string += "Longs : {0}\n".format(longs)
tweet_string += "Sells : {0}\n".format(sells)
tweet_string += "Shorts : {0}\n".format(shorts)
tweet_string += "Covers : {0}\n".format(covers)
tweet_string += "--------------------\n"
tweet_string += "Total Trades : {0}\n".format(longs + sells + shorts + covers)
tweet_string += "---------------------------------------"
self.logger.info(tweet_string)
#tn = TwitterNotifier()
#tn.post_results_tweet(tweet_string)
def _get_results(self):
"""
Return results as dict.
# TODO: please.... lol
# """
longs = len([t for t in self.account.opened_trades if t.type == 'long'])
sells = len([t for t in self.account.closed_trades if t.type == 'long'])
shorts = len([t for t in self.account.opened_trades if t.type == 'short'])
covers = len([t for t in self.account.closed_trades if t.type == 'short'])
if len(self.data) != 0:
begin_price = self.data.iloc[0].open
final_price = self.data.iloc[-1].close
buy_hold_pc = helpers.percent_change(begin_price, final_price)
strategy_pc = helpers.percent_change(self.account.initial_capital, self.account.total_value(final_price))
return {
'name': self._name,
'begin_price': begin_price,
'final_price': final_price,
'buy_and_hold': {
'rate_on_equity': round(buy_hold_pc*100, 2),
'net_profit': round(helpers.profit(self.account.initial_capital, buy_hold_pc), 2),
},
'strategy':{
'rate_on_equity': round(strategy_pc*100, 2),
'net_profit': round(helpers.profit(self.account.initial_capital, strategy_pc), 2),
'long_count': longs,
'sell_count': sells,
'short_count': shorts,
'cover_count': covers,
'total': longs + sells + shorts + covers,
},
'positions': self.account._get_positions(),
}
else:
begin_price = 'N/A'
final_price = 'N/A'
buy_hold_pc = 'N/A'
strategy_pc = 'N/A'
return {
'name': self._name,
'begin_price': begin_price,
'final_price': final_price,
'buy_and_hold': {
'rate_on_equity': 0,
'net_profit': 0,
},
'strategy':{
'rate_on_equity': 0,
'net_profit': 0,
'long_count': longs,
'sell_count': sells,
'short_count': shorts,
'cover_count': covers,
'total': longs + sells + shorts + covers,
},
'positions': self.account._get_positions(),
}
def _get_longs(self):
return self.account._get_longs()
def _get_shorts(self):
return self.account._get_shorts()
def chart(self, show_trades=False, title="Equity Curve"):
"""Chart results.
:param show_trades: Show trades on plot
:type show_trades: bool
:param title: Plot title
:type title: str
"""
bokeh.plotting.output_file("{}chart-{0}.html".format(settings.FORWARDTESTS_CHARTS_FOLDER, self._name), title=title)
p = bokeh.plotting.figure(x_axis_type="datetime", plot_width=1000, plot_height=400, title=title)
p.grid.grid_line_alpha = 0.3
p.xaxis.axis_label = 'Date'
p.yaxis.axis_label = 'Equity'
shares = self.account.initial_capital/self.data.iloc[-1].open
base_equity = [price*shares for price in self.data.open]
p.line(self.data.datetime, base_equity, color='#CAD8DE', legend_label='Buy and Hold')
p.line(self.data.datetime, self.account.equity, color='#49516F', legend_label='Strategy')
p.legend.location = "top_left"
if show_trades:
for trade in self.account.opened_trades:
try:
x = time.mktime(trade.date.timetuple())*1000
y = self.account.equity[np.where(self.data.datetime == trade.date)[0][0]]
if trade.type == 'long': p.circle(x, y, size=6, color='green', alpha=0.5)
elif trade.type == 'short': p.circle(x, y, size=6, color='red', alpha=0.5)
except:
pass
for trade in self.account.closed_trades:
try:
x = time.mktime(trade.date.timetuple())*1000
y = self.account.equity[np.where(self.data.datetime == trade.date)[0][0]]
if trade.type == 'long': p.circle(x, y, size=6, color='blue', alpha=0.5)
elif trade.type == 'short': p.circle(x, y, size=6, color='orange', alpha=0.5)
except:
pass
bokeh.plotting.show(p) | 13,618 | 4,217 |
"""
test evfuncs module
"""
import os
from glob import glob
import unittest
import numpy as np
from scipy.io import loadmat
import evfuncs
class TestEvfuncs(unittest.TestCase):
def setUp(self):
self.test_data_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'.', 'test_data',
'gy6or6_032312_subset',
)
def test_readrecf(self):
rec_files = sorted(glob(os.path.join(self.test_data_dir, '*.rec')))
for rec_file in rec_files:
rec_dict = evfuncs.readrecf(rec_file)
self.assertTrue('header' in rec_dict)
self.assertTrue(type(rec_dict['header']) == list)
self.assertTrue('sample_freq' in rec_dict)
self.assertTrue(type(rec_dict['sample_freq']) == int
or type(rec_dict['sample_freq']) == float)
self.assertTrue('num_channels' in rec_dict)
self.assertTrue(type(rec_dict['num_channels']) == int)
self.assertTrue('num_samples' in rec_dict)
self.assertTrue(type(rec_dict['num_samples']) == int)
self.assertTrue('iscatch' in rec_dict)
self.assertTrue('outfile' in rec_dict)
self.assertTrue('time_before' in rec_dict)
self.assertTrue(type(rec_dict['time_before']) == float)
self.assertTrue('time_after' in rec_dict)
self.assertTrue(type(rec_dict['time_after']) == float)
self.assertTrue('thresholds' in rec_dict)
self.assertTrue(type(rec_dict['thresholds']) == list)
self.assertTrue(all(
[type(thresh) == float for thresh in rec_dict['thresholds']]
))
self.assertTrue('feedback_info' in rec_dict)
self.assertTrue(type(rec_dict['feedback_info']) == dict)
def test_load_cbin(self):
cbins = sorted(glob(os.path.join(self.test_data_dir, '*.cbin')))
for cbin in cbins:
dat, fs = evfuncs.load_cbin(cbin)
self.assertTrue(type(dat) == np.ndarray)
self.assertTrue(dat.dtype == '>i2') # should be big-endian 16 bit
self.assertTrue(type(fs) == int)
def test_load_notmat(self):
notmats = sorted(glob(os.path.join(self.test_data_dir, '*.not.mat')))
for notmat in notmats:
notmat_dict = evfuncs.load_notmat(notmat)
self.assertTrue(type(notmat_dict) is dict)
self.assertTrue('onsets' in notmat_dict)
self.assertTrue(type(notmat_dict['onsets']) == np.ndarray)
self.assertTrue(notmat_dict['onsets'].dtype == float)
self.assertTrue('offsets' in notmat_dict)
self.assertTrue(type(notmat_dict['offsets']) == np.ndarray)
self.assertTrue(notmat_dict['offsets'].dtype == float)
self.assertTrue('labels' in notmat_dict)
self.assertTrue(type(notmat_dict['labels']) == str)
self.assertTrue('Fs' in notmat_dict)
self.assertTrue(type(notmat_dict['Fs']) == int)
self.assertTrue('fname' in notmat_dict)
self.assertTrue(type(notmat_dict['fname']) == str)
self.assertTrue('min_int' in notmat_dict)
self.assertTrue(type(notmat_dict['min_int']) == int)
self.assertTrue('min_dur' in notmat_dict)
self.assertTrue(type(notmat_dict['min_dur']) == int)
self.assertTrue('threshold' in notmat_dict)
self.assertTrue(type(notmat_dict['threshold']) == int)
self.assertTrue('sm_win' in notmat_dict)
self.assertTrue(type(notmat_dict['sm_win']) == int)
def test_bandpass_filtfilt_works(self):
cbins = sorted(glob(os.path.join(self.test_data_dir,
'*.cbin')))
filtsong_mat_files = sorted(glob(os.path.join(self.test_data_dir,
'*filtsong*.mat')))
for cbin, filtsong_mat_file in zip(cbins, filtsong_mat_files):
dat, fs = evfuncs.load_cbin(cbin)
filtsong = evfuncs.bandpass_filtfilt(dat, fs)
self.assertTrue(type(filtsong) == np.ndarray)
filtsong_mat = loadmat(filtsong_mat_file)
filtsong_mat = np.squeeze(filtsong_mat['filtsong'])
self.assertTrue(np.allclose(filtsong,
filtsong_mat))
def test_smooth_data(self):
cbins = sorted(glob(os.path.join(self.test_data_dir, '*.cbin')))
smooth_data_mat_files = sorted(glob(os.path.join(self.test_data_dir,
'*smooth_data*.mat')))
for cbin, smooth_data_mat_file in zip(cbins, smooth_data_mat_files):
dat, fs = evfuncs.load_cbin(cbin)
smoothed = evfuncs.smooth_data(dat, fs, freq_cutoffs=None)
smoothed_500_10k = evfuncs.smooth_data(dat, fs,
freq_cutoffs=(500, 10000))
self.assertTrue(type(smoothed) == np.ndarray)
self.assertTrue(type(smoothed_500_10k) == np.ndarray)
self.assertTrue(not np.all(np.equal(smoothed, smoothed_500_10k)))
smooth_data_mat = loadmat(smooth_data_mat_file)
smooth_data_mat = np.squeeze(smooth_data_mat['sm'])
self.assertTrue(np.allclose(smoothed_500_10k,
smooth_data_mat))
def test_segment_song(self):
cbins = sorted(glob(os.path.join(self.test_data_dir, '*.cbin')))
notmats = sorted(glob(os.path.join(self.test_data_dir, '*.not.mat')))
segment_mats = sorted(glob(os.path.join(self.test_data_dir,
'*unedited_SegmentNotes_output.mat')))
for cbin, notmat, segment_mat in zip(cbins, notmats, segment_mats):
dat, fs = evfuncs.load_cbin(cbin)
smooth = evfuncs.smooth_data(dat, fs)
nmd = evfuncs.load_notmat(notmat)
min_syl_dur = nmd['min_dur'] / 1000
min_silent_dur = nmd['min_int'] / 1000
threshold = nmd['threshold']
onsets, offsets = evfuncs.segment_song(smooth, fs,
threshold, min_syl_dur, min_silent_dur)
segment_dict = loadmat(segment_mat, squeeze_me=True)
onsets_mat = segment_dict['onsets']
offsets_mat = segment_dict['offsets']
# set tolerances for numpy.allclose check.
# difference np.abs(offsets - offsets_mat) is usually ~0.00003125...
# We just don't want error to be larger than a millisecond
# By trial and error, I find that these vals for tolerance result in
# about that ceiling
atol = 0.0005
rtol = 0.00001
# i.e., 0.0005 + 0.00001 * some_onsets_or_offset_array ~ [0.0005, 0.0005, ...]
self.assertTrue(np.allclose(onsets, onsets_mat, rtol, atol))
self.assertTrue(np.allclose(offsets, offsets_mat, rtol, atol))
if __name__ == '__main__':
unittest.main()
| 7,117 | 2,362 |
main_url = "http://www.portalcinema.com.ua/"
info_url = 'products/index/getinfo'
poster_url = 'uploads/products/main/'
access_token = ''
verify_token = ''
webhook_url = ''
allowed_host = ''
test_id = ''
| 203 | 77 |
# -*- coding: utf-8 -*-
"""
Recursive Optimal Linear Estimator of Quaternion
================================================
This is a modified `OLEQ <./oleq.html>`_, where a recursive estimation of the
attitude is made with the measured angular velocity [Zhou2018]_. This
estimation is set as the initial value for the OLEQ estimation, simplyfing the
rotational operations.
First, the quaternion :math:`\\mathbf{q}_\\omega` is estimated from the angular
velocity, :math:`\\boldsymbol\\omega=\\begin{bmatrix}\\omega_x & \\omega_y &
\\omega_z \\end{bmatrix}^T`, measured by the gyroscopes, in rad/s, at a time
:math:`t` as:
.. math::
\\mathbf{q}_\\omega = \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} =
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
Then, the attitude is "corrected" through OLEQ using a single multiplication of
its rotation operator:
.. math::
\\mathbf{q}_\\mathbf{ROLEQ} = \\frac{1}{2}\\Big(\\mathbf{I}_4 + \\sum_{i=1}^na_i\\mathbf{W}_i\\Big)\\mathbf{q}_\\omega
where each :math:`\\mathbf{W}` (one for accelerations and one for magnetic
field) is built from their reference vectors, :math:`D^r`, and measurements,
:math:`D^b`, exactly as in OLEQ:
.. math::
\\begin{array}{rcl}
\\mathbf{W} &=& D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3 \\\\ && \\\\
\\mathbf{M}_1 &=&
\\begin{bmatrix}
D_x^b & 0 & D_z^b & -D_y^b \\\\
0 & D_x^b & D_y^b & D_z^b \\\\
D_z^b & D_y^b & -D_x^b & 0 \\\\
-D_y^b & D_z^b & 0 & -D_x^b
\\end{bmatrix} \\\\
\\mathbf{M}_2 &=&
\\begin{bmatrix}
D_y^b & -D_z^b & 0 & D_x^b \\\\
-D_z^b & -D_y^b & D_x^b & 0 \\\\
0 & D_x^b & D_y^b & D_z^b \\\\
D_x^b & 0 & D_z^b & -D_y^b
\\end{bmatrix} \\\\
\\mathbf{M}_3 &=&
\\begin{bmatrix}
D_z^b & D_y^b & -D_x^b & 0 \\\\
D_y^b & -D_z^b & 0 & D_x^b \\\\
-D_x^b & 0 & -D_z^b & D_y^b \\\\
0 & D_x^b & D_y^b & D_z^b
\\end{bmatrix}
\\end{array}
It is noticeable that, for OLEQ, a random quaternion was used as a starting
value for an iterative procedure to find the optimal quaternion. Here, that
initial value is now :math:`\\mathbf{q}_\\omega` and a simple product (instead
of a large iterative product) is required.
In this way, the quaternions are recursively computed with much fewer
computations, and the accuracy is maintained.
For this case, however the three sensor data (gyroscopes, accelerometers and
magnetometers) have to be provided, along with the an initial quaternion,
:math:`\\mathbf{q}_0` from which the attitude will be built upon.
References
----------
.. [Zhou2018] Zhou, Z.; Wu, J.; Wang, J.; Fourati, H. Optimal, Recursive and
Sub-Optimal Linear Solutions to Attitude Determination from Vector
Observations for GNSS/Accelerometer/Magnetometer Orientation Measurement.
Remote Sens. 2018, 10, 377.
(https://www.mdpi.com/2072-4292/10/3/377)
"""
import numpy as np
from ..common.orientation import ecompass
from ..common.mathfuncs import cosd, sind
class ROLEQ:
"""
Recursive Optimal Linear Estimator of Quaternion
Uses OLEQ to estimate the initial attitude.
Parameters
----------
gyr : numpy.ndarray, default: None
N-by-3 array with measurements of angular velocity in rad/s.
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration in in m/s^2.
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field in mT.
Attributes
----------
gyr : numpy.ndarray
N-by-3 array with N gyroscope samples.
acc : numpy.ndarray
N-by-3 array with N accelerometer samples.
mag : numpy.ndarray
N-by-3 array with N magnetometer samples.
frequency : float
Sampling frequency in Herz
Dt : float
Sampling step in seconds. Inverse of sampling frequency.
Q : numpy.array, default: None
M-by-4 Array with all estimated quaternions, where M is the number of
samples. Equal to None when no estimation is performed.
Raises
------
ValueError
When dimension of input arrays ``gyr``, ``acc`` or ``mag`` are not
equal.
Examples
--------
>>> gyr_data.shape, acc_data.shape, mag_data.shape # NumPy arrays with sensor data
((1000, 3), (1000, 3), (1000, 3))
>>> from ahrs.filters import ROLEQ
>>> orientation = ROLEQ(gyr=gyr_data, acc=acc_data, mag=mag_data)
>>> orientation.Q.shape # Estimated attitude
(1000, 4)
"""
def __init__(self,
gyr: np.ndarray = None,
acc: np.ndarray = None,
mag: np.ndarray = None,
weights: np.ndarray = None,
magnetic_ref: np.ndarray = None,
frame: str = 'NED',
**kwargs
):
self.gyr = gyr
self.acc = acc
self.mag = mag
self.a = weights if weights is not None else np.ones(2)
self.Q = None
self.frequency = kwargs.get('frequency', 100.0)
self.Dt = kwargs.get('Dt', 1.0/self.frequency)
self.q0 = kwargs.get('q0')
self.frame = frame
# Reference measurements
self._set_reference_frames(magnetic_ref, self.frame)
# Estimate all quaternions if data is given
if self.acc is not None and self.gyr is not None and self.mag is not None:
self.Q = self._compute_all()
def _set_reference_frames(self, mref: float, frame: str = 'NED'):
if frame.upper() not in ['NED', 'ENU']:
raise ValueError(f"Invalid frame '{frame}'. Try 'NED' or 'ENU'")
#### Magnetic Reference Vector ####
if mref is None:
# Local magnetic reference of Munich, Germany
from ..common.constants import MUNICH_LATITUDE, MUNICH_LONGITUDE, MUNICH_HEIGHT
from ..utils.wmm import WMM
wmm = WMM(latitude=MUNICH_LATITUDE, longitude=MUNICH_LONGITUDE, height=MUNICH_HEIGHT)
cd, sd = cosd(wmm.I), sind(wmm.I)
self.m_ref = np.array([sd, 0.0, cd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd])
elif isinstance(mref, (int, float)):
# Use given magnetic dip angle (in degrees)
cd, sd = cosd(mref), sind(mref)
self.m_ref = np.array([sd, 0.0, cd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd])
else:
self.m_ref = np.copy(mref)
self.m_ref /= np.linalg.norm(self.m_ref)
#### Gravitational Reference Vector ####
self.a_ref = np.array([0.0, 0.0, -1.0]) if frame.upper() == 'NED' else np.array([0.0, 0.0, 1.0])
def _compute_all(self) -> np.ndarray:
"""
Estimate the quaternions given all data.
Attributes ``gyr``, ``acc`` and ``mag`` must contain data.
Returns
-------
Q : numpy.ndarray
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.gyr.shape:
raise ValueError("acc and gyr are not the same size")
if self.acc.shape != self.mag.shape:
raise ValueError("acc and mag are not the same size")
num_samples = np.atleast_2d(self.acc).shape[0]
if num_samples < 2:
raise ValueError("ROLEQ needs at least 2 samples of each sensor")
Q = np.zeros((num_samples, 4))
Q[0] = ecompass(-self.acc[0], self.mag[0], frame=self.frame, representation='quaternion') if self.q0 is None else self.q0
for t in range(1, num_samples):
Q[t] = self.update(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])
return Q
def attitude_propagation(self, q: np.ndarray, omega: np.ndarray, dt: float) -> np.ndarray:
"""
Attitude estimation from previous quaternion and current angular velocity.
.. math::
\\mathbf{q}_\\omega = \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} =
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
omega : numpy.ndarray
Angular velocity, in rad/s.
dt : float
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q : numpy.ndarray
Attitude as a quaternion.
"""
Omega_t = np.array([
[0.0, -omega[0], -omega[1], -omega[2]],
[omega[0], 0.0, omega[2], -omega[1]],
[omega[1], -omega[2], 0.0, omega[0]],
[omega[2], omega[1], -omega[0], 0.0]])
q_omega = (np.identity(4) + 0.5*dt*Omega_t) @ q # (eq. 37)
return q_omega/np.linalg.norm(q_omega)
def WW(self, Db, Dr):
"""
W Matrix
.. math::
\\mathbf{W} = D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3
Parameters
----------
Db : numpy.ndarray
Normalized tri-axial observations vector.
Dr : numpy.ndarray
Normalized tri-axial reference vector.
Returns
-------
W_matrix : numpy.ndarray
W Matrix.
"""
bx, by, bz = Db
rx, ry, rz = Dr
M1 = np.array([
[bx, 0.0, bz, -by],
[0.0, bx, by, bz],
[bz, by, -bx, 0.0],
[-by, bz, 0.0, -bx]]) # (eq. 18a)
M2 = np.array([
[by, -bz, 0.0, bx],
[-bz, -by, bx, 0.0],
[0.0, bx, by, bz],
[bx, 0.0, bz, -by]]) # (eq. 18b)
M3 = np.array([
[bz, by, -bx, 0.0],
[by, -bz, 0.0, bx],
[-bx, 0.0, -bz, by],
[0.0, bx, by, bz]]) # (eq. 18c)
return rx*M1 + ry*M2 + rz*M3 # (eq. 20)
def oleq(self, acc: np.ndarray, mag: np.ndarray, q_omega: np.ndarray) -> np.ndarray:
"""
OLEQ with a single rotation by R.
Parameters
----------
acc : numpy.ndarray
Sample of tri-axial Accelerometer.
mag : numpy.ndarray
Sample of tri-axial Magnetometer.
q_omega : numpy.ndarray
Preceding quaternion estimated with angular velocity.
Returns
-------
q : numpy.ndarray
Final quaternion.
"""
a_norm = np.linalg.norm(acc)
m_norm = np.linalg.norm(mag)
if not a_norm > 0 or not m_norm > 0: # handle NaN
return q_omega
acc = np.copy(acc) / np.linalg.norm(acc)
mag = np.copy(mag) / np.linalg.norm(mag)
sum_aW = self.a[0]*self.WW(acc, self.a_ref) + self.a[1]*self.WW(mag, self.m_ref) # (eq. 31)
R = 0.5*(np.identity(4) + sum_aW) # (eq. 33)
q = R @ q_omega # (eq. 25)
return q / np.linalg.norm(q)
def update(self, q: np.ndarray, gyr: np.ndarray, acc: np.ndarray, mag: np.ndarray, dt: float = None) -> np.ndarray:
"""
Update Attitude with a Recursive OLEQ
Parameters
----------
q : numpy.ndarray
A-priori quaternion.
gyr : numpy.ndarray
Sample of angular velocity in rad/s
acc : numpy.ndarray
Sample of tri-axial Accelerometer in m/s^2
mag : numpy.ndarray
Sample of tri-axial Magnetometer in mT
dt : float, default: None
Time step, in seconds, between consecutive Quaternions.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
dt = self.Dt if dt is None else dt
q_g = self.attitude_propagation(q, gyr, dt) # Quaternion from previous quaternion and angular velocity
q = self.oleq(acc, mag, q_g) # Second stage: Estimate with OLEQ
return q
| 12,868 | 4,655 |
from bitmovin_api_sdk.encoding.configurations.video.vp9.customdata.customdata_api import CustomdataApi
| 103 | 34 |
from tester import test_case, Env, NodeConfig, Id, TEST_CHECK, TEST_CHECK_EQUAL,\
ClientType, get_distributor_address_path, TransactionStatusCode
from time import sleep
import subprocess, os
from random import randrange
def log_subprocess_output(pipe, env):
for line in iter(pipe.readline, b''): # b'\n'-separated lines
env.logger.info(f"start_bad_network.sh: {line}")
def run_bad_nodes(env):
script_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"..", "..", "test", "integration_test", "tester", "start_bad_network.sh"))
process=subprocess.Popen([f"{script_path}"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
with process.stdout:
log_subprocess_output(process.stdout, env)
exitcode=process.wait()
if exitcode != 0:
env.logger.debug(f"Script exit code = {exitcode}")
return 1
bad_client_pool = []
bad_node_ids = []
bad_node_ids.append(Id(20203, grpc_port=50051, absolute_address="192.168.100.141"))
bad_node_ids.append(Id(20203, grpc_port=50051, absolute_address="192.168.100.142"))
bad_node_ids.append(Id(20203, grpc_port=50051, absolute_address="192.168.100.143"))
env.logger.info("Get client from bad network pool:")
for id in bad_node_ids:
bad_client_pool.append(env.get_grpc_client_to_outside_node(id))
return bad_client_pool, bad_node_ids
@test_case("connect_node_to_bad_network")
def main(env: Env) -> int:
sync_port = 20100
grpc_port = 50100
amount = randrange(1000)
update_time = 0.5
timeout = 2
wait_time = 1
transaction_update_time=2
max_update_request=10
env.logger.debug(f"Random amount for test = {amount}")
bad_client_pool, bad_node_ids = run_bad_nodes(env)
main_id = Id(sync_port, grpc_port = grpc_port)
env.logger.info("Start main node with connecting to bad network nodes:")
# connect to only first node form bad pool, becouse it's IP from good network.
# If connect to this ids, nodes in bad pool synchron across 2 network card
env.start_node(NodeConfig(main_id, nodes=[bad_node_ids[0], ]))
main_client = env.get_client(ClientType.LEGACY_GRPC, main_id)
env.logger.info("Check all nodes:")
TEST_CHECK(main_client.connection_test())
for client in bad_client_pool:
TEST_CHECK(client.connection_test())
env.logger.info("All nodes started success.")
address = main_client.generate_keys(keys_path=f"keys")
distributor_address = main_client.load_address(keys_path=get_distributor_address_path())
TEST_CHECK_EQUAL(main_client.get_balance(address=address.address, timeout=timeout, wait=wait_time), 0)
env.logger.info("New address created.")
transaction = main_client.transfer(to_address=address.address, amount=amount,
from_address=distributor_address, fee=0, wait=wait_time, timeout=timeout)
TEST_CHECK_EQUAL(transaction.status_code, TransactionStatusCode.PENDING)
TEST_CHECK(main_client.transaction_success_wait(transaction=transaction))
TEST_CHECK_EQUAL(main_client.get_balance(address=address.address, timeout=timeout, wait=wait_time), amount)
env.logger.info("Main client transaction checked success.")
for client in bad_client_pool:
TEST_CHECK_EQUAL(client.get_balance(address=address.address, timeout=timeout, wait=wait_time), amount)
env.logger.info("Test ended success.")
return 0
@test_case("double_connection_in_bad_network")
def main(env: Env) -> int:
sync_port = 20100
grpc_port = 50100
amount = randrange(1000)
update_time = 0.5
timeout = 2
wait_time = 1
transaction_update_time=2
max_update_request=10
env.logger.debug(f"Random amount for test = {amount}")
bad_client_pool, bad_node_ids = run_bad_nodes(env)
main_id = Id(sync_port, grpc_port = grpc_port)
env.logger.info("Start main node with connecting to bad network nodes:")
# connect to all nodes form bad pool.
# For nodes in bad pool synchron across 2 network card
env.start_node(NodeConfig(main_id, nodes=bad_node_ids))
main_client = env.get_client(ClientType.LEGACY_GRPC, main_id)
env.logger.info("Check all nodes:")
TEST_CHECK(main_client.connection_test())
for client in bad_client_pool:
TEST_CHECK(client.connection_test())
env.logger.info("All nodes started success.")
address = main_client.generate_keys(keys_path=f"keys")
distributor_address = main_client.load_address(keys_path=get_distributor_address_path())
TEST_CHECK_EQUAL(main_client.get_balance(address=address.address, timeout=timeout, wait=wait_time), 0)
env.logger.info("New address created.")
transaction = main_client.transfer(to_address=address.address, amount=amount,
from_address=distributor_address, fee=0, wait=wait_time, timeout=timeout)
TEST_CHECK_EQUAL(transaction.status_code, TransactionStatusCode.PENDING)
TEST_CHECK(main_client.transaction_success_wait(transaction=transaction))
TEST_CHECK_EQUAL(main_client.get_balance(address=address.address, timeout=timeout, wait=wait_time), amount)
env.logger.info("Main client transaction checked success.")
for client in bad_client_pool:
TEST_CHECK_EQUAL(client.get_balance(address=address.address, timeout=timeout, wait=wait_time), amount)
env.logger.info("Test ended success.")
return 0
def node_transfers(client, addresses, transaction_wait, finish_address, amount, timeout,
wait_time, transaction_update_time, max_update_request):
shift = len(addresses) - 1
pos = 0
from_address = addresses[pos]
transactions = []
for _ in range(len(addresses) * 5):
pos = (pos + shift) % len(addresses)
to_address = addresses[pos]
transactions.append(node.transfer(to_address=finish_address.address, amount=amount,
from_address=from_address, fee=0, wait=wait_time, timeout=timeout))
TEST_CHECK_EQUAL(transactions[-1].status_code, TransactionStatusCode.PENDING)
env.logger.info(f"Transaction {transactions[-1].tx_hash} is PENDING (from {from_address.address})")
for transaction in transactions:
TEST_CHECK(client.transaction_success_wait(transaction=transaction))
@test_case("transaction_stress_in_bad_network")
def main(env: Env) -> int:
amount = randrange(1000)
start_balance = 5*amount
finish_balance = start_balance - amount
update_time = 0.5
timeout = 2
wait_time = 1
transaction_update_time=2
max_update_request=10
number_addresses_per_thread = 5
env.logger.debug(f"Random amount for test = {amount}")
bad_client_pool, bad_node_ids = run_bad_nodes(env)
env.logger.info("Check all nodes:")
for client in bad_client_pool:
TEST_CHECK(client.connection_test())
env.logger.info("All nodes started success.")
addresses = [bad_client_pool[0].generate_keys(keys_path=f"keys{i}")
for i in range(1, number_addresses_per_thread * len(bad_client_pool) + 1)]
distributor_address = bad_client_pool[0].load_address(keys_path=get_distributor_address_path())
for address in addresses:
TEST_CHECK_EQUAL(bad_client_pool[0].get_balance(address=address.address, timeout=timeout, wait=wait_time), 0)
evn.logger.info(f"Balance of ${address.address} 0")
env.logger.info(f"New {number_addresses} addresses created.")
for address in addresses:
transaction = bad_client_pool[0].transfer(to_address=address.address, amount=start_amount,
from_address=distributor_address, fee=0, wait=wait_time, timeout=timeout)
TEST_CHECK_EQUAL(transaction.status_code, TransactionStatusCode.PENDING)
TEST_CHECK(bad_client_pool[0].transaction_success_wait(transaction=transaction))
for client in bad_client_pool:
for address in addresses:
TEST_CHECK_EQUAL(client.get_balance(address=address.address, timeout=timeout, wait=wait_time), start_balance)
env.logger.info(f"Node {client.name} check initialize balance success")
env.logger.info("Initialize transfers success, current balanse {start_balance}")
with concurrent.futures.ThreadPoolExecutor(len(bad_client_pool)) as executor:
threads = []
for i in range(len(bad_client_pool)):
first_address_number = i * number_addresses_per_thread
last_address_number = (i * number_addresses_per_thread) + number_addresses_per_thread
threads.append(
executor.submit(node_transfers, bad_client_pool[i],
addresses[first_address_number:last_address_number], transaction_wait,
addresses[-1], amount, timeout, wait_time, transaction_update_time, max_update_request))
for i in threads:
i.result()
env.logger.info("Check finish_balance (in this test {finish_balance})")
for address in addresses[:-1]:
TEST_CHECK_EQUAL(bad_client_pool[0].get_balance(address=address.address, timeout=timeout,
wait=wait_time), finish_balance)
last_address_finish_balance = start_balance + amount * len(bad_client_pool) * number_addresses_per_thread
env.logger.info("Check balance on last address start_balance + all transfers {last_address_finish_balance}")
TEST_CHECK_EQUAL(bad_client_pool[0].get_balance(address=addresses[-1].address, timeout=timeout,
wait=wait_time), last_address_finish_balance)
return 0
| 9,475 | 3,150 |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
from ambari_commons.exceptions import FatalException
from mock.mock import patch, MagicMock, call
with patch.object(os, "geteuid", new=MagicMock(return_value=0)):
from resource_management.core import sudo
reload(sudo)
import operator
import platform
import StringIO
from unittest import TestCase
os.environ["ROOT"] = ""
from only_for_platform import get_platform, os_distro_value, PLATFORM_WINDOWS
from ambari_commons import os_utils
if get_platform() != PLATFORM_WINDOWS:
pass
import shutil
project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../"))
shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties")
# We have to use this import HACK because the filename contains a dash
_search_file = os_utils.search_file
def search_file_proxy(filename, searchpatch, pathsep=os.pathsep):
global _search_file
if "ambari.properties" in filename:
return "/tmp/ambari.properties"
return _search_file(filename, searchpatch, pathsep)
os_utils.search_file = search_file_proxy
with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))):
with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
with patch("os.access", return_value = MagicMock(return_value=True)):
with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
with patch("platform.linux_distribution", return_value = os_distro_value):
with patch("os.symlink"):
with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']):
_ambari_server_ = __import__('ambari-server')
with patch("__builtin__.open"):
from ambari_server.properties import Properties
from ambari_server.serverConfiguration import configDefaults, JDBC_RCA_PASSWORD_FILE_PROPERTY, JDBC_PASSWORD_PROPERTY, \
JDBC_RCA_PASSWORD_ALIAS, SSL_TRUSTSTORE_PASSWORD_PROPERTY, SECURITY_IS_ENCRYPTION_ENABLED, \
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED, SSL_TRUSTSTORE_PASSWORD_ALIAS, SECURITY_KEY_ENV_VAR_NAME
from ambari_server.setupSecurity import get_alias_string, setup_sensitive_data_encryption, sensitive_data_encryption
from ambari_server.serverClassPath import ServerClassPath
@patch.object(platform, "linux_distribution", new = MagicMock(return_value=('Redhat', '6.4', 'Final')))
@patch("ambari_server.dbConfiguration_linux.get_postgre_hba_dir", new = MagicMock(return_value = "/var/lib/pgsql/data"))
@patch("ambari_server.dbConfiguration_linux.get_postgre_running_status", new = MagicMock(return_value = "running"))
class TestSensitiveDataEncryption(TestCase):
def setUp(self):
out = StringIO.StringIO()
sys.stdout = out
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("os.path.isdir", new = MagicMock(return_value=True))
@patch("os.access", new = MagicMock(return_value=True))
@patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell", new = MagicMock(return_value = 'test' + os.pathsep + 'path12'))
@patch("ambari_server.setupSecurity.find_jdk")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.run_os_command")
def test_sensitive_data_encryption(self, run_os_command_mock, get_ambari_properties_method, find_jdk_mock):
find_jdk_mock.return_value = "/"
environ = os.environ.copy()
run_os_command_mock.return_value = 0,"",""
properties = Properties()
get_ambari_properties_method.return_value = properties
options = self._create_empty_options_mock()
sensitive_data_encryption(options, "encription")
run_os_command_mock.assert_called_with('None -cp test:path12 org.apache.ambari.server.security.encryption.SensitiveDataEncryption encription > /var/log/ambari-server/ambari-server.out 2>&1', environ)
pass
@patch("ambari_server.setupSecurity.print_error_msg")
@patch("ambari_server.setupSecurity.find_jdk")
def test_sensitive_data_encryption_nojdk(self, find_jdk_mock, print_mock):
find_jdk_mock.return_value = None
options = self._create_empty_options_mock()
code = sensitive_data_encryption(options, "encription")
self.assertEquals(code, 1)
print_mock.assert_called_with("No JDK found, please run the \"setup\" "
"command to install a JDK automatically or install any "
"JDK manually to " + configDefaults.JDK_INSTALL_DIR)
pass
@patch("os.path.isdir", new = MagicMock(return_value=True))
@patch("os.access", new = MagicMock(return_value=True))
@patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell", new = MagicMock(return_value = 'test' + os.pathsep + 'path12'))
@patch("ambari_server.setupSecurity.find_jdk")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.run_os_command")
def test_sensitive_data_decryption_not_persisted(self, run_os_command_mock, get_ambari_properties_method, find_jdk_mock):
find_jdk_mock.return_value = "/"
environ = os.environ.copy()
master = "master"
environ[SECURITY_KEY_ENV_VAR_NAME] = master
run_os_command_mock.return_value = 0,"",""
properties = Properties()
get_ambari_properties_method.return_value = properties
options = self._create_empty_options_mock()
sensitive_data_encryption(options, "decryption", master)
run_os_command_mock.assert_called_with('None -cp test:path12 org.apache.ambari.server.security.encryption.SensitiveDataEncryption decryption > /var/log/ambari-server/ambari-server.out 2>&1', environ)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_reset_master_key_not_persisted(self, get_original_master_key_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
get_validated_string_input_method, save_master_key_method,
update_properties_method, read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_YN_input_method.side_effect = [False, True, False]
get_validated_string_input_method.return_value = master_key
get_original_master_key_mock.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "")
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption", master_key), call(options, "encryption", master_key)]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_original_master_key_mock.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(get_validated_string_input_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
self.assertFalse(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_encrypt_part_not_persisted(self, get_original_master_key_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
save_master_key_method,
update_properties_method, read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, get_alias_string(JDBC_RCA_PASSWORD_ALIAS))
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_YN_input_method.side_effect = [False, False, False]
get_original_master_key_mock.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "filePath")
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "encryption", master_key)]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(get_original_master_key_mock.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
self.assertFalse(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.get_original_master_key")
def test_decrypt_missed_masterkey_not_persisted(self, get_original_master_key_mock, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
save_master_key_method,
read_passwd_for_alias_method,
save_passwd_for_alias_method,
read_ambari_user_method,
exists_mock, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, get_alias_string(JDBC_RCA_PASSWORD_ALIAS))
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_YN_input_method.side_effect = [True, False]
get_original_master_key_mock.return_value = None
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (False, "filePath")
options = self._create_empty_options_mock()
self.assertTrue(setup_sensitive_data_encryption(options) == 1)
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
pass
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_setup_sensitive_data_encryption_no_ambari_prop_not_root(self, is_root_method, get_ambari_properties_method):
is_root_method.return_value = False
get_ambari_properties_method.return_value = -1
options = self._create_empty_options_mock()
try:
setup_sensitive_data_encryption(options)
self.fail("Should throw exception")
except FatalException as fe:
self.assertTrue('Failed to read properties file.' == fe.reason)
pass
pass
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.remove_password_file")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.adjust_directory_permissions")
def test_setup_sensitive_data_encryption_not_persist(self, adjust_directory_permissions_mock, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, get_YN_input_method, save_master_key_method,
update_properties_method,
read_ambari_user_method, read_master_key_method,
save_passwd_for_alias_method, remove_password_file_method,
get_is_persisted_method, get_is_secure_method, exists_mock):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
read_master_key_method.return_value = master_key
get_YN_input_method.return_value = False
read_ambari_user_method.return_value = "asd"
save_passwd_for_alias_method.return_value = 0
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertFalse(save_master_key_method.called)
self.assertTrue(save_passwd_for_alias_method.called)
self.assertEquals(2, save_passwd_for_alias_method.call_count)
self.assertTrue(remove_password_file_method.called)
self.assertTrue(adjust_directory_permissions_mock.called)
sensitive_data_encryption_metod.assert_called_with(options, "encryption", master_key)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
def test_setup_sensitive_data_encryption_persist(self, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method, save_master_key_method,
update_properties_method,
read_ambari_user_method, read_master_key_method,
get_is_persisted_method, get_is_secure_method, exists_mock,
save_passwd_for_alias_method):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
search_file_message.return_value = "propertiesfile"
master_key = "aaa"
read_master_key_method.return_value = master_key
get_YN_input_method.return_value = True
read_ambari_user_method.return_value = None
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
save_passwd_for_alias_method.return_value = 0
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(save_master_key_method.called)
sensitive_data_encryption_metod.assert_called_with(options, "encryption")
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.read_master_key")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
def test_reset_master_key_persisted(self, get_is_persisted_method, get_is_secure_method, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method,
save_master_key_method, update_properties_method,
read_passwd_for_alias_method, save_passwd_for_alias_method,
read_ambari_user_method, exists_mock,
read_master_key_method):
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
master_key = "aaa"
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = True
get_YN_input_method.side_effect = [False, True, True]
read_master_key_method.return_value = master_key
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption"), call(options, "encryption")]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertTrue(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.sensitive_data_encryption")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
def test_decrypt_sensitive_data_persister(self, get_is_persisted_method, get_is_secure_method, sensitive_data_encryption_metod, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method,
update_properties_method,
read_passwd_for_alias_method, save_passwd_for_alias_method,
read_ambari_user_method, exists_mock):
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = True
get_YN_input_method.side_effect = [True, False]
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
options = self._create_empty_options_mock()
setup_sensitive_data_encryption(options)
calls = [call(options, "decryption")]
sensitive_data_encryption_metod.assert_has_calls(calls)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(2, read_passwd_for_alias_method.call_count)
self.assertTrue(2, save_passwd_for_alias_method.call_count)
result_expected = {JDBC_PASSWORD_PROPERTY: "fakepassword",
JDBC_RCA_PASSWORD_FILE_PROPERTY: "fakepassword",
SSL_TRUSTSTORE_PASSWORD_PROPERTY: "fakepassword",
SECURITY_IS_ENCRYPTION_ENABLED: 'false',
SECURITY_SENSITIVE_DATA_ENCRYPTON_ENABLED: 'false'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
def _create_empty_options_mock(self):
options = MagicMock()
options.ldap_enabled = None
options.ldap_enabled_ambari = None
options.ldap_manage_services = None
options.ldap_enabled_services = None
options.ldap_url = None
options.ldap_primary_host = None
options.ldap_primary_port = None
options.ldap_secondary_url = None
options.ldap_secondary_host = None
options.ldap_secondary_port = None
options.ldap_ssl = None
options.ldap_user_class = None
options.ldap_user_attr = None
options.ldap_user_group_member_attr = None
options.ldap_group_class = None
options.ldap_group_attr = None
options.ldap_member_attr = None
options.ldap_dn = None
options.ldap_base_dn = None
options.ldap_manager_dn = None
options.ldap_manager_password = None
options.ldap_save_settings = None
options.ldap_referral = None
options.ldap_bind_anonym = None
options.ldap_force_setup = None
options.ambari_admin_username = None
options.ambari_admin_password = None
options.ldap_sync_admin_name = None
options.ldap_sync_username_collisions_behavior = None
options.ldap_sync_disable_endpoint_identification = None
options.ldap_force_lowercase_usernames = None
options.ldap_pagination_enabled = None
options.ldap_sync_admin_password = None
options.custom_trust_store = None
options.trust_store_type = None
options.trust_store_path = None
options.trust_store_password = None
options.security_option = None
options.api_ssl = None
options.api_ssl_port = None
options.import_cert_path = None
options.import_cert_alias = None
options.pem_password = None
options.import_key_path = None
options.master_key = None
options.master_key_persist = None
options.jaas_principal = None
options.jaas_keytab = None
return options
| 32,488 | 10,532 |
# Demo Python Operators
'''
Python Bitwise Operators
~ NOT Inverts all the bits
'''
a = 60
b = 13
variable1 = ~a
variable2 = ~b
print("Variable 'a': ", a, "Variable 'a' en binario: ", format(a, 'b'))
print("Procesando '~a': ", variable1, "Resultado en binario: ", format(variable1, 'b'))
print("Variable 'b': ", b, "Variable 'b' en binario: ", format(b, 'b'))
print("Procesando '~b': ", variable2, "Resultado en binario: ", format(variable2, 'b'))
print("Procesando 'a & b': ", a&b, "Resultado en binario: ", format(a&b, 'b'))
print("Procesando 'a | b': ", a|b, "Resultado en binario: ", format(a|b, 'b'))
print("Procesando 'a ^ b': ", a^b, "Resultado en binario: ", format(a^b, 'b'))
| 720 | 277 |
from mir.tools.mir_storage_ops import MirStorageOps
class TestDatasetController:
def test_get_dataset_info(self, test_client, mocker):
user_id = "user_id"
repo_id = "repo_id"
branch_id = "branch_id"
mir_dataset_content = {
"class_names_count": {
'cat': 34
},
"class_ids_count": {
3: 34
},
"ignored_labels": {
'cat': 5,
},
"negative_info": {
"negative_images_cnt": 0,
"project_negative_images_cnt": 0,
},
"total_images_cnt": 1,
}
mocker.patch.object(MirStorageOps, "load_single_dataset", return_value=mir_dataset_content)
resp = test_client.get(f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/datasets")
assert resp.status_code == 200
assert resp.json()["result"] == {
'class_ids_count': {
'3': 34 # int is converted to str in json.dumps.
},
'class_names_count': {
'cat': 34
},
'ignored_labels': {
'cat': 5
},
'negative_info': {
'negative_images_cnt': 0,
'project_negative_images_cnt': 0
},
'total_images_cnt': 1
}
| 1,398 | 424 |
from flask_taxonomies.constants import INCLUDE_DATA, INCLUDE_ANCESTORS, INCLUDE_URL, INCLUDE_SELF, \
INCLUDE_ANCESTOR_LIST, INCLUDE_ANCESTOR_TAG, INCLUDE_PARENT, INCLUDE_LEVEL
FLASK_TAXONOMIES_REPRESENTATION = {
"taxonomy": {
'include': [INCLUDE_DATA, INCLUDE_ANCESTORS, INCLUDE_URL, INCLUDE_SELF,
INCLUDE_ANCESTOR_LIST, INCLUDE_ANCESTOR_TAG, INCLUDE_PARENT, INCLUDE_LEVEL],
'exclude': [],
'select': None,
'options': {}
}
}
| 489 | 216 |
from core import app, data, utils
from core.ctrl import mailer
def list_notifications(filter_data, sort_data=None):
finder = {
'collection': 'notifications',
'filter': filter_data,
'sort': sort_data
}
return data.ex(finder)
def email(case, template, subject, html_message_data, att = None):
if app.config['user']['username'] != 'system':
valid_users = data.collect(data.ex({
'collection': 'users',
'filter': {
case: True
}
}))
for user in valid_users:
html_message_data['user'] = user
html_message = mailer.assign_template(
template, html_message_data)
mailer.send(
user['email'], subject, html_message, att)
def db(obj_type, obj_id, message, json_data=''):
notifs = app.db['notifications']
notification = {
'user_id': app.config['user']['_id'],
'created_at': utils.now(),
'obj_type': obj_type,
'obj_id': obj_id,
'message': message,
'json_data': json_data
}
notifs.insert_one(notification)
| 1,145 | 345 |
# Generated by Django 2.0.6 on 2018-06-17 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0009_auto_20180522_2318'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='close_reason',
field=models.TextField(blank=True, verbose_name='close reason'),
),
]
| 416 | 148 |
__author__ = 'rcj1492'
__created__ = '2016.02'
__license__ = 'MIT' | 66 | 36 |
class AuthCommand(object):
def __init__(self, api_url, api_token):
self.api_url = api_url
self.api_token = api_token
def execute(self):
url = '%s/connect/%s' % (self.api_url, self.api_token)
print('Open following URL in your browser to authenticate and/or ' \
'claim recorded asciicasts:\n%s' % url)
| 354 | 116 |
# -*- coding: utf-8 -*-
"""
Classes for validating and parsing hOCR, close to the spec.
"""
from .spec import HocrSpec
from .validate import HocrValidator
| 156 | 54 |
#!/usr/bin/env python
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import messagebird
# ACCESS_KEY = ''
# MESSAGE_ID = ''
try:
ACCESS_KEY
except NameError:
print('You need to set an ACCESS_KEY constant in this file')
sys.exit(1)
try:
MESSAGE_ID
except NameError:
print('You need to set a MESSAGE_ID constant in this file')
sys.exit(1)
try:
# Create a MessageBird client with the specified ACCESS_KEY.
client = messagebird.Client(ACCESS_KEY)
# Fetch the Message object for the specified MESSAGE_ID.
msg = client.message(MESSAGE_ID)
# Print the object information.
print('\nThe following information was returned as a Message object:\n')
print(' id : %s' % msg.id)
print(' href : %s' % msg.href)
print(' direction : %s' % msg.direction)
print(' type : %s' % msg.type)
print(' originator : %s' % msg.originator)
print(' body : %s' % msg.body)
print(' reference : %s' % msg.reference)
print(' validity : %s' % msg.validity)
print(' gateway : %s' % msg.gateway)
print(' typeDetails : %s' % msg.typeDetails)
print(' datacoding : %s' % msg.datacoding)
print(' mclass : %s' % msg.mclass)
print(' scheduledDatetime : %s' % msg.scheduledDatetime)
print(' createdDatetime : %s' % msg.createdDatetime)
print(' recipients : %s\n' % msg.recipients)
except messagebird.client.ErrorException as e:
print('\nAn error occured while requesting a Message object:\n')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
| 1,765 | 597 |
import os
import os.path
import shutil
import urllib.parse
def openReadFile(filePath):
try:
openFile = open(filePath)
readFile = openFile.read()
openFile.close()
return readFile
except Exception:
print("Problem reading file, aborting.")
quit()
def createBackups(sourceFile, sourceFolder):
os.chdir(sourceFolder)
os.chdir("../")
if os.path.isdir(f'{os.getcwd()}/backup'):
shutil.rmtree(f'{os.getcwd()}/backup')
else:
pass
try:
os.makedirs("backup")
shutil.copy(sourceFile, f'{os.getcwd()}/backup')
shutil.copytree(sourceFolder, f'{os.getcwd()}/backup/images')
print("• Backups created.")
except Exception as e:
print(f'!!! Exception raised:\n{e}\n-= Shutting Down Process =-')
quit()
def assembleNewNames(prefixString, filteredImages):
newNameList = []
imageCounter = 0
processedRefs = []
for image in filteredImages:
imageCounter += 1
imageExt = ""
if image in processedRefs:
print(f'- Skipping duplicate reference: {image}')
else:
if ".jpg" in image:
imageExt = ".jpg"
elif ".png" in image:
imageExt = ".png"
elif ".gif" in image:
imageExt = ".gif"
else:
pass
if imageCounter < 10:
newNameList.append((image, f'{prefixString}_00{imageCounter}{imageExt}'))
elif imageCounter < 100:
newNameList.append((image, f'{prefixString}_0{imageCounter}{imageExt}'))
else:
newNameList.append((image, f'{prefixString}_{imageCounter}{imageExt}'))
processedRefs.append(image)
return newNameList
def updateSourceFile(workingFile, renamedImageSets):
for imageRef in renamedImageSets:
try:
print(f'-- Replacing {imageRef[0]} with {imageRef[1]}')
workingFile = workingFile.replace(imageRef[0], imageRef[1])
except Exception as e:
print(f'!!! Exception replacing {imageRef[0]} with reason given:\n{e}')
return workingFile
def saveSourceDocument(sourceFile, updatedFile):
try:
rewriteFile = open(sourceFile, "w")
rewriteFile.write(updatedFile)
rewriteFile.close()
except Exception as e:
print(f'!!! Exception raised while saving:\n{e}')
def renameImageFiles(sourceFolder, renamedImageSets):
for imageRef in renamedImageSets:
try:
os.rename(f'{sourceFolder}/{imageRef[0]}', f'{sourceFolder}/{imageRef[1]}')
print(f'-- Image file {imageRef[0]} being renamed {imageRef[1]}')
except OSError:
os.rename(urllib.parse.unquote(f'{sourceFolder}/{imageRef[0]}'), f'{sourceFolder}/{imageRef[1]}')
print(f'-- Image file {urllib.parse.unquote(imageRef[0])} being renamed {imageRef[1]}')
except Exception as e:
print(f'!!! Exception raised for {imageRef[0]}: {e}') | 3,147 | 925 |
from zeropdk.pcell import (
PCell,
PCellParameter,
TypeDouble,
TypeInt,
TypeLayer,
TypePoint,
Port,
ParamContainer,
)
from zeropdk.layout import insert_shape
from zeropdk.layout.polygons import rectangle
from klayout.db import DPoint, DVector
pad_width = PCellParameter(
name="pad_width",
type=TypeDouble,
description="Width of electrical pad.",
default=120,
unit="um",
)
pad_height = PCellParameter(
name="pad_height",
type=TypeDouble,
description="Height of electrical pad.",
default=120,
unit="um",
)
port_width = PCellParameter(
name="port_width",
type=TypeDouble,
description="Port width (same as trace width)",
default=20,
unit="um",
)
pad_array_count = PCellParameter(
name="pad_array_count", type=TypeInt, description="Number of pads", default=10
)
pad_array_pitch = PCellParameter(
name="pad_array_pitch",
type=TypeDouble,
description="Pad array pitch",
default=150,
unit="um",
)
origin = PCellParameter(name="origin", type=TypePoint, description="Origin", default=DPoint(0, 0))
ex = PCellParameter(
name="ex", type=TypePoint, description="x-axis unit vector", default=DPoint(1, 0)
)
ey = PCellParameter(
name="ey", type=TypePoint, description="y-axis unit vector", default=DPoint(0, 1)
)
layer_metal = PCellParameter(name="layer_metal", type=TypeLayer, description="Metal Layer")
layer_opening = PCellParameter(name="layer_opening", type=TypeLayer, description="Open Layer")
class OrientedCell(PCell):
"""A standard cell that has the following parameters:
- origin: Point
- ex: unit vector of x axis
- ey: unit vector of y axis
"""
params = ParamContainer(origin, ex, ey)
def origin_ex_ey(self):
origin = DPoint(self.params["origin"])
ex = DVector(self.params.ex)
ey = DVector(self.params.ey)
return origin, ex, ey
class DCPad(OrientedCell):
"""A standard DC pad.
Ports: el0
"""
params = ParamContainer(pad_width, pad_height, port_width, layer_metal, layer_opening)
def draw(self, cell):
layout = cell.layout()
origin, ex, ey = self.origin_ex_ey()
cp = self.params
def make_shape_from_dpolygon(dpoly, resize_dx, dbu, layer):
dpoly.resize(resize_dx, dbu)
# if resize_dx > dbu:
# dpoly.round_corners(resize_dx, 100)
insert_shape(cell, layer, dpoly)
return dpoly
def make_pad(origin, pad_width, pad_height, ex, ey):
pad_square = rectangle(origin, pad_width, pad_height, ex, ey)
make_shape_from_dpolygon(pad_square, 0, layout.dbu, cp.layer_metal)
make_shape_from_dpolygon(pad_square, -2.5, layout.dbu, cp.layer_opening)
make_pad(origin + cp.pad_height * ey / 2, cp.pad_width, cp.pad_height, ex, ey)
port = Port("el0", origin + cp.port_width * ey / 2, -ey, cp.port_width, "el_dc")
return cell, {"el0": port}
class DCPadArray(DCPad):
params = ParamContainer(pad_array_count, pad_array_pitch)
def draw(self, cell):
cp = self.params
origin, ex, _ = self.origin_ex_ey()
ports = dict()
for i in range(cp.pad_array_count):
dcpad = DCPad(name=f"pad_{i}", params=cp)
dc_ports = dcpad.place_cell(cell, origin + cp.pad_array_pitch * i * ex)
ports[f"el_{i}"] = dc_ports["el0"].rename(f"el_{i}")
# self.add_port(dc_ports["el0"].rename(f"el_{i}"))
return cell, ports
| 3,554 | 1,236 |
# -*- coding: utf-8 -*-
"""
Functions and classes for manipulating 10X Visium spatial transcriptomic (ST) and
histological imaging data
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
import scanpy as sc
sc.set_figure_params(dpi=100, dpi_save=400)
sns.set_style("white")
plt.rcParams["font.family"] = "monospace"
from math import ceil
from matplotlib.lines import Line2D
from scipy.spatial import cKDTree
from scipy.interpolate import interpnd, griddata
from sklearn.metrics.pairwise import euclidean_distances
def bin_threshold(mat, threshmin=None, threshmax=0.5):
"""
Generate binary segmentation from probabilities
Parameters
----------
mat : np.array
The data
threshmin : float or None
Minimum value on [0,1] to assign binary IDs from probabilities.
thresmax : float
Maximum value on [0,1] to assign binary IDs from probabilities. Values higher
than threshmax -> 1. Values lower than thresmax -> 0.
Returns
-------
a : np.array
Thresholded matrix
"""
a = np.ma.array(mat, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin:
mask |= (a < threshmin).filled(False)
if threshmax:
mask |= (a > threshmax).filled(False)
a[mask] = 1
a[~mask] = 0
return a
def map_pixels(adata, filter_label="in_tissue", img_key="hires", library_id=None):
"""
Map spot IDs to 'pixel space' by assigning spot ID values to evenly spaced grid
Parameters
----------
adata : AnnData.anndata
The data
filter_label : str or None
adata.obs column key that contains binary labels for filtering barcodes. If
None, do not filter.
img_key : str
adata.uns key containing the image to use for mapping
Returns
-------
adata : AnnData.anndata
with the following attributes:
adata.uns["pixel_map_df"] : pd.DataFrame
Long-form dataframe of Visium spot barcode IDs, pixel coordinates, and
.obs metadata
adata.uns["pixel_map"] : np.array
Pixel space array of Visium spot barcode IDs
"""
adata.uns["pixel_map_params"] = {
"img_key": img_key
} # create params dict for future use
# add library_id key to params
if library_id is None:
library_id = adata.uns["pixel_map_params"]["library_id"] = list(
adata.uns["spatial"].keys()
)[0]
else:
adata.uns["pixel_map_params"]["library_id"] = library_id
# first get center-to-face pixel distance of hexagonal Visium spots
dist = euclidean_distances(adata.obsm["spatial"])
adata.uns["pixel_map_params"]["ctr_to_face"] = (
np.unique(dist)[np.unique(dist) != 0].min() / 2
)
# also save center-to-vertex pixel distance as vadata attribute
adata.uns["pixel_map_params"]["ctr_to_vert"] = adata.uns["pixel_map_params"][
"ctr_to_face"
] / np.cos(30 * (np.pi / 180))
# get the spot radius from adata.uns["spatial"] as well
adata.uns["pixel_map_params"]["radius"] = (
adata.uns["spatial"][library_id]["scalefactors"]["spot_diameter_fullres"] / 2
)
# get scale factor from adata.uns["spatial"]
adata.uns["pixel_map_params"]["scalef"] = adata.uns["spatial"][library_id][
"scalefactors"
][f"tissue_{img_key}_scalef"]
# determine pixel bounds from spot coords, adding center-to-face distance
adata.uns["pixel_map_params"]["xmin_px"] = int(
np.floor(
adata.uns["pixel_map_params"]["scalef"]
* (
adata.obsm["spatial"][:, 0].min()
- adata.uns["pixel_map_params"]["radius"]
)
)
)
adata.uns["pixel_map_params"]["xmax_px"] = int(
np.ceil(
adata.uns["pixel_map_params"]["scalef"]
* (
adata.obsm["spatial"][:, 0].max()
+ adata.uns["pixel_map_params"]["radius"]
)
)
)
adata.uns["pixel_map_params"]["ymin_px"] = int(
np.floor(
adata.uns["pixel_map_params"]["scalef"]
* (
adata.obsm["spatial"][:, 1].min()
- adata.uns["pixel_map_params"]["radius"]
)
)
)
adata.uns["pixel_map_params"]["ymax_px"] = int(
np.ceil(
adata.uns["pixel_map_params"]["scalef"]
* (
adata.obsm["spatial"][:, 1].max()
+ adata.uns["pixel_map_params"]["radius"]
)
)
)
print("Creating pixel grid and mapping to nearest barcode coordinates")
# define grid for pixel space
grid_y, grid_x = np.mgrid[
adata.uns["pixel_map_params"]["ymin_px"] : adata.uns["pixel_map_params"][
"ymax_px"
],
adata.uns["pixel_map_params"]["xmin_px"] : adata.uns["pixel_map_params"][
"xmax_px"
],
]
# map barcodes to pixel coordinates
pixel_coords = np.column_stack((grid_x.ravel(order="C"), grid_y.ravel(order="C")))
barcode_list = griddata(
np.multiply(adata.obsm["spatial"], adata.uns["pixel_map_params"]["scalef"]),
adata.obs_names,
(pixel_coords[:, 0], pixel_coords[:, 1]),
method="nearest",
)
# save grid_x and grid_y to adata.uns
adata.uns["grid_x"], adata.uns["grid_y"] = grid_x, grid_y
# put results into DataFrame for filtering and reindexing
print("Saving barcode mapping to adata.uns['pixel_map_df'] and adding metadata")
adata.uns["pixel_map_df"] = pd.DataFrame(pixel_coords, columns=["x", "y"])
# add barcodes to long-form dataframe
adata.uns["pixel_map_df"]["barcode"] = barcode_list
# merge master df with self.adata.obs for metadata
adata.uns["pixel_map_df"] = adata.uns["pixel_map_df"].merge(
adata.obs, how="outer", left_on="barcode", right_index=True
)
# filter using label from adata.obs if desired (i.e. "in_tissue")
if filter_label is not None:
print(
"Filtering barcodes using labels in self.adata.obs['{}']".format(
filter_label
)
)
# set empty pixels (no Visium spot) to "none"
adata.uns["pixel_map_df"].loc[
adata.uns["pixel_map_df"][filter_label] == 0,
"barcode",
] = "none"
# subset the entire anndata object using filter_label
adata = adata[adata.obs[filter_label] == 1, :].copy()
print("New size: {} spots x {} genes".format(adata.n_obs, adata.n_vars))
print("Done!")
return adata
def trim_image(
adata, distance_trim=False, threshold=None, channels=None, plot_out=True, **kwargs
):
"""
Trim pixels in image using pixel map output from Visium barcodes
Parameters
----------
adata : AnnData.anndata
The data
distance_trim : bool
Manually trim pixels by distance to nearest Visium spot center
threshold : int or None
Number of pixels from nearest Visium spot center to call barcode ID. Ignored
if `distance_trim==False`.
channels : list of str or None
Names of image channels in axis order. If None, channels are named "ch_0",
"ch_1", etc.
plot_out : bool
Plot final trimmed image
**kwargs
Arguments to pass to `show_pita()` function if `plot_out==True`
Returns
-------
adata.uns["pixel_map_trim"] : np.array
Contains image with unused pixels set to `np.nan`
adata.obsm["spatial_trim"] : np.array
Contains spatial coords with adjusted pixel values after image cropping
"""
assert (
adata.uns["pixel_map_params"] is not None
), "Pixel map not yet created. Run map_pixels() first."
print(
"Cropping image to pixel dimensions and adding values to adata.uns['pixel_map_df']"
)
cropped = adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]][
"images"
][adata.uns["pixel_map_params"]["img_key"]].transpose(1, 0, 2)[
int(adata.uns["pixel_map_params"]["xmin_px"]) : int(
(adata.uns["pixel_map_params"]["xmax_px"])
),
int(adata.uns["pixel_map_params"]["ymin_px"]) : int(
(adata.uns["pixel_map_params"]["ymax_px"])
),
]
# crop x,y coords and save to .obsm as well
print("Cropping Visium spot coordinates and saving to adata.obsm['spatial_trim']")
adata.obsm["spatial_trim"] = adata.obsm["spatial"] - np.repeat(
[
[
adata.uns["pixel_map_params"]["xmin_px"],
adata.uns["pixel_map_params"]["ymin_px"],
]
],
adata.obsm["spatial"].shape[0],
axis=0,
)
# manual trimming of pixels by distance if desired
if distance_trim:
print("Calculating pixel distances from spot centers for thresholding")
tree = cKDTree(adata.obsm["spatial"])
xi = interpnd._ndim_coords_from_arrays(
(adata.uns["grid_x"], adata.uns["grid_y"]),
ndim=adata.obsm["spatial"].shape[1],
)
dists, _ = tree.query(xi)
# determine distance threshold
if threshold is None:
threshold = int(adata.uns["pixel_map_params"]["ctr_to_vert"] + 1)
print(
"Using distance threshold of {} pixels from adata.uns['pixel_map_params']['ctr_to_vert']".format(
threshold
)
)
dist_mask = bin_threshold(dists, threshmax=threshold)
if plot_out:
# plot pixel distances from spot centers on image
show_pita(pita=dists, figsize=(4, 4))
# plot binary thresholded image
show_pita(pita=dist_mask, figsize=(4, 4))
print(
"Trimming pixels by spot distance and adjusting labels in adata.uns['pixel_map_df']"
)
mask_df = pd.DataFrame(dist_mask.T.ravel(order="F"), columns=["manual_trim"])
adata.uns["pixel_map_df"] = adata.uns["pixel_map_df"].merge(
mask_df, left_index=True, right_index=True
)
adata.uns["pixel_map_df"].loc[
adata.uns["pixel_map_df"]["manual_trim"] == 1, ["barcode"]
] = "none" # set empty pixels to empty barcode
adata.uns["pixel_map_df"].drop(
columns="manual_trim", inplace=True
) # remove unneeded label
if channels is None:
# if channel names not specified, name them numerically
channels = ["ch_{}".format(x) for x in range(cropped.shape[2])]
# cast image intensity values to long-form and add to adata.uns["pixel_map_df"]
rgb = pd.DataFrame(
np.column_stack(
[cropped[:, :, x].ravel(order="F") for x in range(cropped.shape[2])]
),
columns=channels,
)
adata.uns["pixel_map_df"] = adata.uns["pixel_map_df"].merge(
rgb, left_index=True, right_index=True
)
adata.uns["pixel_map_df"].loc[
adata.uns["pixel_map_df"]["barcode"] == "none", channels
] = np.nan # set empty pixels to invalid image intensity value
# calculate mean image values for each channel and create .obsm key
adata.obsm["image_means"] = (
adata.uns["pixel_map_df"]
.loc[adata.uns["pixel_map_df"]["barcode"] != "none", ["barcode"] + channels]
.groupby("barcode")
.mean()
.values
)
print(
"Saving cropped and trimmed image to adata.uns['spatial']['{}']['images']['{}_trim']".format(
adata.uns["pixel_map_params"]["library_id"],
adata.uns["pixel_map_params"]["img_key"],
)
)
adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]]["images"][
"{}_trim".format(adata.uns["pixel_map_params"]["img_key"])
] = np.dstack(
[
adata.uns["pixel_map_df"]
.pivot(index="y", columns="x", values=[channels[x]])
.values
for x in range(len(channels))
]
)
# save scale factor as well
adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]]["scalefactors"][
"tissue_{}_trim_scalef".format(adata.uns["pixel_map_params"]["img_key"])
] = adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]][
"scalefactors"
][
"tissue_{}_scalef".format(adata.uns["pixel_map_params"]["img_key"])
]
# plot results if desired
if plot_out:
if len(channels) == 3:
show_pita(
pita=adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]][
"images"
]["{}_trim".format(adata.uns["pixel_map_params"]["img_key"])],
RGB=True,
label=channels,
**kwargs,
)
else:
show_pita(
pita=adata.uns["spatial"][adata.uns["pixel_map_params"]["library_id"]][
"images"
]["{}_trim".format(adata.uns["pixel_map_params"]["img_key"])],
RGB=False,
label=channels,
**kwargs,
)
print("Done!")
def assemble_pita(
adata, features=None, use_rep=None, layer=None, plot_out=True, histo=None, **kwargs
):
"""
Cast feature into pixel space to construct gene expression image ("pita")
Parameters
----------
adata : AnnData.anndata
the data
features : list of int or str
Names or indices of features to cast onto spot image. If `None`, cast all
features. If `plot_out`, first feature in list will be plotted. If not
specified and `plot_out`, first feature (index 0) will be plotted.
use_rep : str
Key from `adata.obsm` to use for plotting. If `None`, use `adata.X`.
layer :str
Key from `adata.layers` to use for plotting. Ignored if `use_rep` is not `None`
plot_out : bool
Show resulting image?
histo : str or `None`, optional (default=`None`)
Histology image to show along with pita in gridspec (i.e. "hires",
"hires_trim", "lowres"). If `None` or if `plot_out`==`False`, ignore.
**kwargs
Arguments to pass to `show_pita()` function
Returns
-------
assembled : np.array
Image of desired expression in pixel space
"""
assert (
adata.uns["pixel_map_params"] is not None
), "Pixel map not yet created. Run map_pixels() first."
# coerce features to list if only single string
if features and not isinstance(features, list):
features = [features]
if use_rep is None:
# use all genes if no gene features specified
if not features:
features = adata.var_names # [adata.var.highly_variable == 1].tolist()
if layer is None:
print("Assembling pita with {} features from adata.X".format(len(features)))
mapper = pd.DataFrame(
adata.X[:, [adata.var_names.get_loc(x) for x in features]],
index=adata.obs_names,
)
else:
print(
"Assembling pita with {} features from adata.layers['{}']".format(
len(features), layer
)
)
mapper = pd.DataFrame(
adata.layers[layer][:, [adata.var_names.get_loc(x) for x in features]],
index=adata.obs_names,
)
elif use_rep in [".obs", "obs"]:
assert features is not None, "Must provide feature(s) from adata.obs"
print("Assembling pita with {} features from adata.obs".format(len(features)))
if all(isinstance(x, int) for x in features):
mapper = adata.obs.iloc[:, features].copy()
else:
mapper = adata.obs[features].copy()
features = None # set features to None in case show==True
else:
if not features:
print(
"Assembling pita with {} features from adata.obsm['{}']".format(
adata.obsm[use_rep].shape[1], use_rep
)
)
mapper = pd.DataFrame(adata.obsm[use_rep], index=adata.obs_names)
else:
assert all(
isinstance(x, int) for x in features
), "Features must be integer indices if using rep from adata.obsm"
print(
"Assembling pita with {} features from adata.obsm['{}']".format(
len(features), use_rep
)
)
mapper = pd.DataFrame(
adata.obsm[use_rep][:, features], index=adata.obs_names
)
# cast barcodes into pixel dimensions for reindexing
print("Casting barcodes to pixel dimensions and saving to adata.uns['pixel_map']")
pixel_map = (
adata.uns["pixel_map_df"].pivot(index="y", columns="x", values="barcode").values
)
assembled = np.array(
[mapper.reindex(index=pixel_map[x], copy=True) for x in range(len(pixel_map))]
).squeeze()
if plot_out:
# determine where the histo image is in anndata
if histo is not None:
assert (
histo
in adata.uns["spatial"][list(adata.uns["spatial"].keys())[0]][
"images"
].keys()
), "Must provide one of {} for histo".format(
adata.uns["spatial"][list(adata.uns["spatial"].keys())[0]][
"images"
].keys()
)
histo = adata.uns["spatial"][list(adata.uns["spatial"].keys())[0]][
"images"
][histo]
show_pita(pita=assembled, features=features, histo=histo, **kwargs)
print("Done!")
return assembled
def show_pita(
pita,
features=None,
RGB=False,
histo=None,
label="feature",
ncols=4,
figsize=(7, 7),
save_to=None,
**kwargs,
):
"""
Plot assembled pita using `plt.imshow()`
Parameters
----------
pita : np.array
Image of desired expression in pixel space from `.assemble_pita()`
features : list of int, optional (default=`None`)
List of features by index to show in plot. If `None`, use all features.
RGB : bool, optional (default=`False`)
Treat 3-dimensional array as RGB image
histo : np.array or `None`, optional (default=`None`)
Histology image to show along with pita in gridspec. If `None`, ignore.
label : str
What to title each panel of the gridspec (i.e. "PC" or "usage") or each
channel in RGB image. Can also pass list of names e.g. ["NeuN","GFAP",
"DAPI"] corresponding to channels.
ncols : int
Number of columns for gridspec
figsize : tuple of float
Size in inches of output figure
save_to : str or None
Path to image file to save results. if `None`, show figure.
**kwargs
Arguments to pass to `plt.imshow()` function
Returns
-------
Matplotlib object (if plotting one feature or RGB) or gridspec object (for
multiple features). Saves plot to file if `save_to` is not `None`.
"""
assert pita.ndim > 1, "Pita does not have enough dimensions: {} given".format(
pita.ndim
)
assert pita.ndim < 4, "Pita has too many dimensions: {} given".format(pita.ndim)
# if only one feature (2D), plot it quickly
if (pita.ndim == 2) and histo is None:
fig = plt.figure(figsize=figsize)
plt.imshow(pita, **kwargs)
plt.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
plt.colorbar(shrink=0.8)
plt.tight_layout()
if save_to:
plt.savefig(fname=save_to, transparent=True, bbox_inches="tight", dpi=800)
return fig
if (pita.ndim == 2) and histo is not None:
n_rows, n_cols = 1, 2 # two images here, histo and RGB
fig = plt.figure(figsize=(ncols * n_cols, ncols * n_rows))
# arrange axes as subplots
gs = gridspec.GridSpec(n_rows, n_cols, figure=fig)
# add plots to axes
ax = plt.subplot(gs[0])
im = ax.imshow(histo, **kwargs)
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
ax.set_title(
label="Histology",
loc="left",
fontweight="bold",
fontsize=16,
)
ax = plt.subplot(gs[1])
im = ax.imshow(pita, **kwargs)
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
cbar = plt.colorbar(im, shrink=0.8)
fig.tight_layout()
if save_to:
plt.savefig(fname=save_to, transparent=True, bbox_inches="tight", dpi=800)
return fig
if RGB:
# if third dim has 3 features, treat as RGB and plot it quickly
assert (pita.ndim == 3) & (
pita.shape[2] == 3
), "Need 3 dimensions and 3 given features for an RGB image; shape = {}; features given = {}".format(
pita.shape, len(features)
)
print("Plotting pita as RGB image")
if isinstance(label, str):
# if label is single string, name channels numerically
channels = ["{}_{}".format(label, x) for x in range(pita.shape[2])]
else:
assert (
len(label) == 3
), "Please pass 3 channel names for RGB plot; {} labels given: {}".format(
len(label), label
)
channels = label
if histo is not None:
n_rows, n_cols = 1, 2 # two images here, histo and RGB
fig = plt.figure(figsize=(ncols * n_cols, ncols * n_rows))
# arrange axes as subplots
gs = gridspec.GridSpec(n_rows, n_cols, figure=fig)
# add plots to axes
ax = plt.subplot(gs[0])
im = ax.imshow(histo, **kwargs)
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
ax.set_title(
label="Histology",
loc="left",
fontweight="bold",
fontsize=16,
)
ax = plt.subplot(gs[1])
im = ax.imshow(pita, **kwargs)
# add legend for channel IDs
custom_lines = [
Line2D([0], [0], color=(1, 0, 0), lw=5),
Line2D([0], [0], color=(0, 1, 0), lw=5),
Line2D([0], [0], color=(0, 0, 1), lw=5),
]
plt.legend(custom_lines, channels, fontsize="medium")
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
fig.tight_layout()
if save_to:
plt.savefig(
fname=save_to, transparent=True, bbox_inches="tight", dpi=800
)
return fig
else:
fig = plt.figure(figsize=figsize)
plt.imshow(pita, **kwargs)
# add legend for channel IDs
custom_lines = [
Line2D([0], [0], color=(1, 0, 0), lw=5),
Line2D([0], [0], color=(0, 1, 0), lw=5),
Line2D([0], [0], color=(0, 0, 1), lw=5),
]
plt.legend(custom_lines, channels, fontsize="medium")
plt.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
plt.tight_layout()
if save_to:
plt.savefig(
fname=save_to, transparent=True, bbox_inches="tight", dpi=800
)
return fig
# if pita has multiple features, plot them in gridspec
if isinstance(features, int): # force features into list if single integer
features = [features]
# if no features are given, use all of them
if features is None:
features = [x + 1 for x in range(pita.shape[2])]
else:
assert (
pita.ndim > 2
), "Not enough features in pita: shape {}, expecting 3rd dim with length {}".format(
pita.shape, len(features)
)
assert (
len(features) <= pita.shape[2]
), "Too many features given: pita has {}, expected {}".format(
pita.shape[2], len(features)
)
if isinstance(label, str):
# if label is single string, name channels numerically
labels = ["{}_{}".format(label, x) for x in features]
else:
assert len(label) == len(
features
), "Please provide the same number of labels as features; {} labels given, {} features given.".format(
len(label), len(features)
)
labels = label
# calculate gridspec dimensions
if histo is not None:
labels = ["Histology"] + labels # append histo to front of labels
if len(features) + 1 <= ncols:
n_rows, n_cols = 1, len(features) + 1
else:
n_rows, n_cols = ceil((len(features) + 1) / ncols), ncols
else:
if len(features) <= ncols:
n_rows, n_cols = 1, len(features)
else:
n_rows, n_cols = ceil(len(features) / ncols), ncols
fig = plt.figure(figsize=(ncols * n_cols, ncols * n_rows))
# arrange axes as subplots
gs = gridspec.GridSpec(n_rows, n_cols, figure=fig)
# add plots to axes
i = 0
if histo is not None:
# add histology plot to first axes
ax = plt.subplot(gs[i])
im = ax.imshow(histo, **kwargs)
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
ax.set_title(
label=labels[i],
loc="left",
fontweight="bold",
fontsize=16,
)
i = i + 1
for feature in features:
ax = plt.subplot(gs[i])
im = ax.imshow(pita[:, :, feature - 1], **kwargs)
ax.tick_params(labelbottom=False, labelleft=False)
sns.despine(bottom=True, left=True)
ax.set_title(
label=labels[i],
loc="left",
fontweight="bold",
fontsize=16,
)
cbar = plt.colorbar(im, shrink=0.8)
i = i + 1
fig.tight_layout()
if save_to:
plt.savefig(fname=save_to, transparent=True, bbox_inches="tight", dpi=800)
return fig
| 26,374 | 8,436 |