id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11592797
|
from vergeml import ModelPlugin, model, train, predict, option, VergeMLError
from vergeml.display import DISPLAY
import numpy as np
import os
import os.path
import random
import csv
_TEMPLATE = """\
model: imagenet
# Uncomment for better ml list output:
# list:
# columns:
# - model
# - status
# - num-samples
# - epochs
# - auc
# - test-acc
# To get precision/recall/f1 for your positive class, add this:
# - <label>-precision
# - <label>-recall
# - <label>-f1
"""
@model('imagenet', descr='Image classifier model, with weights pre-trained on ImageNet.')
class ImageNetModelPlugin(ModelPlugin):
@train('train', descr='Train an image classifier.')
@option('epochs', 5)
@option('architecture', 'resnet-50', 'Name of the pretrained network.')
@option('variant', 'auto', 'Network variant.')
@option('size', "auto", 'Image input size.', type='Union[int,str]')
@option('alpha', 1.0, 'Network alpha value.')
@option('layers', 1, 'Number of layers to add.')
@option('output-layer', 'last', 'Name or index of the output layer.', type='Union[int,str]')
@option('batch-size', 64)
@option('optimizer', 'sgd', validate=('adam', 'sgd'))
@option('learning-rate', 0.0001)
@option('decay', 0.)
@option('dropout', 0.)
@option('early-stopping-delta', 0.0, 'Early stopping delta.')
@option('early-stopping-patience', 0, 'Early stopping patience (0 means off).')
@option('name', None, type='Optional[str]', descr='Optional name of the AI.')
# supported features:
# - retrain a model
# . model name
# - finetune
# . model name
def train(self, args, env):
from vergeml.sources.features import get_image_size, evaluate_args
evaluate_args(args['architecture'], env.get('trainings-dir'), args['variant'], args['alpha'], args['size'])
# configure libraries
env.configure('keras')
self.model = ImageNetModel()
size = get_image_size(args['architecture'], args['variant'], args['size'])
# gather arguments
trainargs = dict(xy_train=env.data.load('train', view='batch', layout='arrays', batch_size=args['batch-size'],
randomize=True, infinite=True),
xy_val=list(env.data.load('val', view='list', layout='arrays')),
xy_test=list(env.data.load('test', view='list', layout='arrays')),
labels=env.data.meta['labels'])
# set up hyperparameters
hyperparameters = args.copy()
hyperparameters.update({'labels': env.data.meta['labels'], 'size': size})
env.start_training(name=args['name'], hyperparameters=hyperparameters)
trainargs.update(env.args_for(self.model.train, args))
trainargs['callbacks'] = [env.keras_callback()]
# train
final_results = self.model.train(**trainargs)
# done
env.end_training(final_results)
@predict('predict', descr="Predict image labels.")
@option('@AI')
@option('labels', default=5, type=int, validate='>0', descr="The number of labels to predict.")
@option('resize', default='fill', type=str, validate=('fill', 'aspect-fill', 'aspect-fit'), descr="Resize Mode.")
@option('compact', default=False, descr="Show results in a compact representation.", flag=True, command_line=True)
@option('<files>', type='List[File]', descr="The images to predict.")
def predict(self, args, env):
if not self.model:
self.load(env)
files = args['<files>']
res = []
for ix, f in enumerate(files):
results = self.model.predict(f, k=args['labels'], resize_mode=args['resize'])
res.append(results)
if args['compact']:
DISPLAY.print("{}\t{}".format(f, results['prediction'][0]['label']))
else:
DISPLAY.print(f)
for pred in results['prediction']:
DISPLAY.print("{:>6.2f}% {}".format(pred['probability']*100, pred['label']))
if ix + 1 < len(files):
DISPLAY.print("")
return res
def load(self, env):
env.configure('keras')
# load the AI
self.model = ImageNetModel()
input_size = env.get("hyperparameters.size")
architecture = env.get("hyperparameters.architecture")
self.model.load(os.path.join(env.checkpoints_dir()), architecture, input_size)
def set_defaults(self, cmd, args, env):
if cmd in ('train', 'preprocess'):
for k in ('input', 'output'):
type_ = env.get(f"data.{k}.type")
output_layer = args.get('output-layer', 'last')
architecture = args.get('architecture', 'resnet-50')
variant = args.get('variant', 'auto')
size = args.get('size', 'auto')
alpha = args.get('alpha', 1.0)
# Output has to be labeled-image-features.
if (k == 'input' and type_ in (None, 'labeled-image-features')) or k == 'output':
env.set(f"data.{k}.type", 'labeled-image-features')
env.set(f"data.{k}.output-layer", output_layer)
env.set(f"data.{k}.architecture", architecture)
env.set(f"data.{k}.variant", variant)
env.set(f"data.{k}.size", size)
env.set(f"data.{k}.alpha", alpha)
def project_file_template(self):
return _TEMPLATE
class ImageNetModel:
labels = None
model = None
trained_model = None
base_model = None
image_size = None
preprocess_input = None
def load(self, model_dir, architecture, image_size):
from keras.models import load_model
from vergeml.sources.features import get_preprocess_input
labels_txt = os.path.join(model_dir, "labels.txt")
if not os.path.exists(labels_txt):
raise VergeMLError("labels.txt not found: {}".format(labels_txt))
model_h5 = os.path.join(model_dir, "model.h5")
if not os.path.exists(model_h5):
raise VergeMLError("model.h5 not found: {}".format(model_h5))
with open(labels_txt, "r") as f:
self.labels = f.read().splitlines()
self.model = load_model(model_h5)
self.image_size = image_size
self.preprocess_input = get_preprocess_input(architecture)
def train(self,
labels,
xy_train,
xy_val,
xy_test,
epochs=20,
batch_size=64,
architecture="resnet-50",
variant="auto",
size="auto",
alpha=1.0,
layers=1,
output_layer="last",
optimizer='sgd',
learning_rate=0.0001,
decay=0.,
dropout=0.,
early_stopping_delta=0.,
early_stopping_patience=0,
random_seed=42,
callbacks=[],
trainings_dir="trainings",
checkpoints_dir="checkpoints",
stats_dir='stats'):
from vergeml.sources.features import get_imagenet_architecture, get_image_size, get_preprocess_input
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from keras.models import Model
from keras import optimizers
if architecture.startswith("@"):
raise NotImplementedError
self.labels = labels
nclasses = len(self.labels)
num_batches = len(xy_train)
self.image_size = get_image_size(architecture, variant, size)
self.preprocess_input = get_preprocess_input(architecture)
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
self.base_model = get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet')
input_size = np.array(self.base_model.layers[-1].output_shape[1:]).prod()
input_layer = Input(shape=(input_size,))
x = _makenet(input_layer, layers, dropout, random_seed)
output_layer = Dense(nclasses, activation="softmax", name="predictions")(x)
self.trained_model = Model(input=input_layer, output=output_layer)
if optimizer == 'adam':
optimizer = optimizers.Adam(lr=learning_rate, decay=decay)
else:
optimizer = optimizers.SGD(lr=learning_rate, decay=decay, momentum=0.9)
self.trained_model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
callbacks = callbacks.copy()
has_val_step = bool(len(xy_val[0]))
# during training, save the new layers only
checkpoint = ModelCheckpoint(os.path.join(checkpoints_dir, "last_layers.h5"),
monitor='val_acc',
verbose=0,
save_best_only=has_val_step,
save_weights_only=True,
mode='auto',
period=1)
callbacks.append(checkpoint)
if early_stopping_delta:
callbacks.append(EarlyStopping(min_delta=early_stopping_delta, patience=early_stopping_patience))
callbacks.append(TensorBoard(log_dir=stats_dir))
try:
self.trained_model.fit_generator(xy_train,
epochs=epochs,
verbose=0,
validation_data=xy_val if has_val_step else None,
steps_per_epoch=num_batches,
callbacks=callbacks)
except KeyboardInterrupt:
pass
history = self.trained_model.history # pylint: disable=E1101
final_results = {}
if hasattr(history, 'epoch') and len(history.epoch):
# load the best weights
self.trained_model.load_weights(os.path.join(checkpoints_dir, "last_layers.h5"))
pred_test, final_results = self._evaluate_final(self.trained_model, xy_test, batch_size, history)
self.model = _save(self.trained_model, self.base_model, layers, labels, random_seed, checkpoints_dir)
if pred_test is not None:
# save predictions and ground truth values for metrics like ROC etc.
path = os.path.join(stats_dir, "predictions.csv")
with open(path, "w", newline='') as f:
writer = csv.writer(f, dialect="excel")
_, y_test = xy_test
for pred, y in zip(pred_test, y_test):
row = pred.tolist() + y.tolist()
writer.writerow(row)
return final_results
def predict(self, f, k=5, resize_mode='fill'):
from keras.preprocessing import image
from vergeml.img import resize_image
filename = os.path.basename(f)
if not os.path.exists(f):
return dict(filename=filename, prediction=[])
img = image.load_img(f)
img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = self.preprocess_input(x)
preds = self.model.predict(x)
pred = self._decode(preds, top=k)[0]
prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]
return dict(filename=filename, prediction=prediction)
def _decode(self, preds, top):
preds = list(preds[0])
dec = list(zip([None] * len(self.labels), self.labels, preds))
dec = sorted(dec, key=lambda x: x[2], reverse=True)
return [dec[:top]]
def _evaluate_final(self, model, xy_test, batch_size, history):
res = {}
pred_test = None
if 'val_acc' in history.history:
res['val_acc'] = max(history.history['val_acc'])
rev_ix = -1 - list(reversed(history.history['val_acc'])).index(res['val_acc'])
res['val_loss'] = history.history['val_loss'][rev_ix]
res['acc'] = history.history['acc'][-1]
res['loss'] = history.history['loss'][-1]
if len(xy_test[0]):
from sklearn.metrics import classification_report, roc_auc_score
# evaluate with test data
x_test, y_test = xy_test
pred_test = model.predict(x_test, batch_size=batch_size, verbose=0)
test_loss, test_acc = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
res['test_loss'] = test_loss
res['test_acc'] = test_acc
report = classification_report(y_true = np.argmax(y_test, axis=1),
y_pred = np.argmax(pred_test, axis=1),
target_names=self.labels,
digits=4,
output_dict=True)
res['auc'] = roc_auc_score(y_test.astype(np.int), pred_test)
for label in self.labels:
stats = report[label]
res[label+"-precision"] = stats['precision']
res[label+"-recall"] = stats['recall']
res[label+"-f1"] = stats['f1-score']
return pred_test, res
def _makenet(x, num_layers, dropout, random_seed):
from keras.layers import Dense, Dropout
dropout_seeder = random.Random(random_seed)
for i in range(num_layers - 1):
# add intermediate layers
if dropout:
x = Dropout(dropout, seed=dropout_seeder.randint(0, 10000))(x)
x = Dense(1024, activation="relu", name='dense_layer_{}'.format(i))(x)
if dropout:
# add the final dropout layer
x = Dropout(dropout, seed=dropout_seeder.randint(0, 10000))(x)
return x
def _save(model, base_model, layers, labels, random_seed, checkpoints_dir):
from keras.layers import Flatten, Dense
from keras import Model
nclasses = len(labels)
x = Flatten()(base_model.output)
x = _makenet(x, layers, dropout=None, random_seed=random_seed)
predictions = Dense(nclasses, activation="softmax", name="predictions")(x)
model_final = Model(inputs=base_model.input, outputs=predictions)
for i in range(layers - 1):
weights = model.get_layer(name='dense_layer_{}'.format(i)).get_weights()
model_final.get_layer(name='dense_layer_{}'.format(i)).set_weights(weights)
weights = model.get_layer(name='predictions').get_weights()
model_final.get_layer(name='predictions').set_weights(weights)
model_final.save(os.path.join(checkpoints_dir, "model.h5"))
with open(os.path.join(checkpoints_dir, "labels.txt"), "w") as f:
f.write("\n".join(labels))
return model_final
|
11592842
|
import base64
import datetime
import hmac
import hashlib
import requests
from requests.auth import AuthBase
from bitcoin_arbitrage.monitor import settings
from bitcoin_arbitrage.monitor.currency import CurrencyPair
from bitcoin_arbitrage.monitor.exchange import Exchange, BTCAmount
from bitcoin_arbitrage.monitor.log import setup_logger
from bitcoin_arbitrage.monitor.order import Order, OrderState, OrderId
logger = setup_logger('gdax')
# Create custom authentication
class GdaxAuth(AuthBase):
def __init__(self, key: str, secret: str, passphrase: str):
self.api_key = key
self.secret_key = secret
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(datetime.time())
message = timestamp + request.method + request.path_url + (request.body or '')
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = signature.digest().encode('base64').rstrip('\n')
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'Content-Type': 'application/json'
})
return request
class Gdax(Exchange):
base_url = "https://api.gdax.com"
currency_pair_api_representation = {
CurrencyPair.BTC_USD: "BTC-USD",
CurrencyPair.BTC_EUR: "BTC-EUR",
CurrencyPair.ETH_USD: "ETH-USD",
CurrencyPair.ETH_EUR: "ETH-EUR",
}
def __init__(self,
currency_pair: CurrencyPair,
api_key: str=settings.GDAX_KEY,
secret_key: str=settings.GDAX_SECRET,
passphrase: str=settings.GDAX_PASSPHRASE):
super().__init__(currency_pair)
self.auth = GdaxAuth(api_key, secret_key, passphrase)
@property
def ticker_url(self) -> str:
return f"{self.base_url}/products/{self.currency_pair_api_representation[self.currency_pair]}/ticker"
def _place_limit_order(self, side: str, amount: BTCAmount, limit: float) -> OrderId:
url = f"{self.base_url}/orders/"
data = {
'product_id': self.currency_pair_api_representation.get(self.currency_pair),
'side': side,
'size': amount,
'price': limit,
}
response = requests.post(url, json=data, auth=self.auth)
json = response.json()
order_id = json.get('id')
return order_id
def limit_sell_order(self, amount: BTCAmount, limit: float) -> Order:
order_id = self._place_limit_order('sell', amount, limit)
return Order(exchange=self, order_id=order_id)
def limit_buy_order(self, amount: BTCAmount, limit: float) -> Order:
order_id = self._place_limit_order('buy', amount, limit)
return Order(exchange=self, order_id=order_id)
def get_order_state(self, order: Order) -> OrderState:
url = f'{self.base_url}/orders/{order.order_id}'
response = requests.get(url, auth=self.auth)
if response.status_code == 404:
logger.info(f'Order {order} doesn\'t return a status, it might be cancelled')
return OrderState.CANCELLED
state_string = response.json().get('state')
if state_string in ['done', 'settled']:
return OrderState.DONE
elif state_string in ['open', 'pending']:
return OrderState.PENDING
|
11592867
|
import asyncio
from discord.ext import commands, tasks
from IreneUtility.util import u_logger as log
import datetime
import pytz
import discord
import typing
from IreneUtility.Utility import Utility
# noinspection PyBroadException,PyPep8
class Reminder(commands.Cog):
def __init__(self, ex):
self.ex: Utility = ex
self.set_timezone_format = "settimezone (timezone abbreviation) (country code)"
# TODO: add remindlater command or reacts to the reminder
@commands.command(aliases=["listreminds", "reminders", "reminds"])
async def listreminders(self, ctx):
"""
Lists out all of your reminders.
[Format: %listreminders]
"""
remind_list = await self.ex.u_reminder.get_reminders(ctx.author.id)
user_timezone = await self.ex.u_reminder.get_user_timezone(ctx.author.id)
if not remind_list:
msg = await self.ex.get_msg(ctx, "reminder", "no_reminders", ['name', ctx.author.display_name])
return await ctx.send(msg)
m_embed = await self.ex.create_embed(title="Reminders List")
embed_list = []
remind_number = 1
index_number = 1
for remind_id, remind_reason, remind_time in remind_list:
await asyncio.sleep(0)
if user_timezone:
remind_time = remind_time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone(user_timezone))
m_embed.add_field(name=f"{index_number}) {remind_reason}",
value=f"{await self.ex.u_reminder.get_locale_time(remind_time,user_timezone)}",
inline=False)
remind_number += 1
index_number += 1
if remind_number == 11:
embed_list.append(m_embed)
m_embed = await self.ex.create_embed(title="Reminders List")
remind_number = 1
if remind_number:
embed_list.append(m_embed)
msg = await ctx.send(embed=embed_list[0])
await self.ex.check_left_or_right_reaction_embed(msg, embed_list)
@commands.command(aliases=["removeremind"])
async def removereminder(self, ctx, reminder_index: int):
"""
Remove one of your reminders.
[Format: %removereminder (reminder index)]
"""
reminders = await self.ex.u_reminder.get_reminders(ctx.author.id)
user_timezone = await self.ex.u_reminder.get_user_timezone(ctx.author.id)
if not reminders:
msg = await self.ex.get_msg(ctx, "reminder", "no_reminders", ['name', ctx.author.display_name])
return await ctx.send(msg)
else:
try:
remind_id, remind_reason, remind_time = reminders[reminder_index-1]
if user_timezone:
remind_time = remind_time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone(user_timezone))
msg = await self.ex.get_msg(ctx, "reminder", "remove_reminder",
[['name', ctx.author.display_name],
['reason', remind_reason],
['time', await self.ex.u_reminder.get_locale_time(remind_time,
user_timezone)]])
await ctx.send(msg)
await self.ex.u_reminder.remove_user_reminder(ctx.author.id, remind_id)
except:
msg = await self.ex.get_msg(ctx, "reminder", "index_not_found",
[['name', ctx.author.display_name], ['index', reminder_index]])
return await ctx.send(msg)
@commands.command(aliases=["remind"])
async def remindme(self, ctx, *, user_input):
"""
Create a reminder to do a task at a certain time.
[Format: %remindme to ______ at 9PM
or
%remindme to ____ in 6hrs 30mins]
"""
reminders = await self.ex.u_reminder.get_reminders(ctx.author.id)
user_timezone = await self.ex.u_reminder.get_user_timezone(ctx.author.id)
if reminders:
if len(reminders) >= self.ex.keys.reminder_limit:
msg = await self.ex.get_msg(ctx, "reminder", "max_reminders", [["name", ctx.author.display_name],
["reminder_limit",
self.ex.keys.reminder_limit]])
return await ctx.send(msg)
server_prefix = await self.ex.get_server_prefix(ctx)
# msgs are repeated numerous times. setting the values beforehand.
incorrect_format_msg = await self.ex.get_msg(ctx, "reminder", "incorrect_format",
[["name", ctx.author.display_name],
["server_prefix", server_prefix]])
try:
is_relative_time, type_index = await self.ex.u_reminder.determine_time_type(user_input)
except self.ex.exceptions.ImproperFormat:
return await ctx.send(incorrect_format_msg)
if is_relative_time is None:
return await ctx.send(incorrect_format_msg)
remind_reason = await self.ex.u_reminder.process_reminder_reason(user_input, type_index)
try:
remind_time = await self.ex.u_reminder.process_reminder_time(user_input, type_index, is_relative_time,
ctx.author.id)
except self.ex.exceptions.ImproperFormat:
msg = await self.ex.get_msg(ctx, "reminder", "incorrect_time_format", ["name", ctx.author.display_name])
return await ctx.send(msg)
except self.ex.exceptions.TooLarge:
msg = await self.ex.get_msg(ctx, "reminder", "too_long", ['name', ctx.author.display_name])
return await ctx.send(msg)
except self.ex.exceptions.NoTimeZone:
msg = await self.ex.get_msg(ctx, "reminder", "no_timezone", [['name', ctx.author.display_name],
['server_prefix', server_prefix],
['format', self.set_timezone_format]])
return await ctx.send(msg)
await self.ex.u_reminder.set_reminder(remind_reason, remind_time, ctx.author.id)
msg = await self.ex.get_msg(ctx, "reminder", "will_remind", [['name', ctx.author.display_name],
['reason', remind_reason],
['time', await self.ex.u_reminder.get_locale_time(
remind_time, user_timezone)]])
# we should put the msg in an embed to avoid custom inputing mentioning @everyone.
embed = await self.ex.create_embed(title="Reminder", title_desc=msg)
return await ctx.send(embed=embed)
@commands.command(aliases=['gettz', 'time'])
async def gettimezone(self, ctx, user_input: typing.Union[discord.Member, str] = None):
"""
Get your current set timezone.
[Format: %gettimezone]
"""
server_prefix = await self.ex.get_server_prefix(ctx)
if isinstance(user_input, str):
try:
timezone_input = await self.ex.u_reminder.process_timezone_input(user_input)
current_time = await self.ex.u_reminder.format_time('%I:%M:%S %p', timezone_input)
msg = await self.ex.get_msg(ctx, "reminder", "current_time", [["name", ctx.author.display_name],
["tz", timezone_input],
["time", current_time]])
return await ctx.send(msg)
except:
msg = await self.ex.get_msg(ctx, "reminder", "incorrect_tz_input")
return await ctx.send(msg)
elif isinstance(user_input, discord.Member):
user = user_input
elif not user_input:
user = None
else:
msg = await self.ex.get_msg(ctx, "reminder", "incorrect_tz_input")
return await ctx.send(msg)
if not user:
user = ctx.author
user_timezone = await self.ex.u_reminder.get_user_timezone(user.id)
if not user_timezone:
msg = await self.ex.get_msg(ctx, "reminder", "no_timezone", [['name', user.display_name],
['server_prefix', server_prefix],
['format', self.set_timezone_format]])
return await ctx.send(msg)
current_time = await self.ex.u_reminder.format_time('%I:%M:%S %p', user_timezone)
timezone_abbrev = await self.ex.u_reminder.format_time('UTC%z', user_timezone)
msg = await self.ex.get_msg(ctx, "reminder", "user_time", [["name", user.display_name], ["time", current_time],
["tz", f"{user_timezone} {timezone_abbrev}"]])
return await ctx.send(msg)
@commands.command(aliases=['settz'])
async def settimezone(self, ctx, timezone_name=None, country_code=None):
"""
Set your local timezone with a timezone abbreviation and country code.
[Format: %settimezone (timezone name) (country code)]
"""
if not timezone_name and not country_code:
await self.ex.u_reminder.remove_user_timezone(ctx.author.id)
msg = await self.ex.get_msg(ctx, "reminder", "remove_tz", ["name", ctx.author.display_name])
return await ctx.send(msg)
server_prefix = await self.ex.get_server_prefix(ctx)
user_timezone = await self.ex.u_reminder.process_timezone_input(timezone_name, country_code)
if not user_timezone:
msg = await self.ex.get_msg(ctx, "reminder", "invalid_tz", [["name", ctx.author.display_name],
["server_prefix", server_prefix],
["format", self.set_timezone_format]])
return await ctx.send(msg)
timezone_utc = await self.ex.u_reminder.format_time('UTC%z', user_timezone)
native_time = await self.ex.u_reminder.format_time('%c', user_timezone)
await self.ex.u_reminder.set_user_timezone(ctx.author.id, user_timezone)
msg = await self.ex.get_msg(ctx, "reminder", "add_tz", [["name", ctx.author.display_name],
["tz", f"{user_timezone} {timezone_utc}"],
["time", native_time]])
return await ctx.send(msg)
@tasks.loop(seconds=5, minutes=0, hours=0, reconnect=True)
async def reminder_loop(self):
"""Process for checking for reminders and sending them out if they are past overdue."""
while not self.ex.irene_cache_loaded:
await asyncio.sleep(1)
try:
# we do not want dictionary to change size during iteration, so we will make a copy.
users_copy = self.ex.cache.users.copy()
for user in users_copy.values():
if not user.reminders:
continue
for remind_id, remind_reason, remind_time in user.reminders:
try:
current_time = datetime.datetime.now(remind_time.tzinfo)
if current_time < remind_time:
continue
dm_channel = await self.ex.get_dm_channel(user_id=user.id)
if dm_channel:
title_desc = await self.ex.get_msg(user, "reminder", "remind_dm", ["reason", remind_reason])
embed = await self.ex.create_embed(title="Reminder", title_desc=title_desc)
await dm_channel.send(embed=embed)
await self.ex.u_reminder.remove_user_reminder(user.id, remind_id)
except discord.Forbidden as e:
# likely forbidden error -> do not have access to dm user
log.useless(f"{e} (discord.Forbidden) - Likely do not have access to dm user {user.id}",
method=self.reminder_loop)
# remove the reminder since we do not want to try constantly reminding someone we cant.
await self.ex.u_reminder.remove_user_reminder(user.id, remind_id)
except Exception as e:
log.console(f"{e} - Reminder Loop")
except Exception as e:
log.useless(f"{e} (Exception) - Reminder.reminder_loop")
|
11592882
|
from __future__ import unicode_literals
class PreindexNotFinished(Exception):
"""Thrown when a preindex of the database is not finished in a certain time period"""
class NoHostsMatch(Exception):
pass
|
11592895
|
import numpy as np
import pytest
from pandas.core.arrays import TimedeltaArray
class TestTimedeltaArrayConstructor:
def test_only_1dim_accepted(self):
# GH#25282
arr = np.array([0, 1, 2, 3], dtype="m8[h]").astype("m8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 3-dim, we allow 2D to sneak in for ops purposes GH#29853
TimedeltaArray(arr.reshape(2, 2, 1))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
TimedeltaArray(arr[[0]].squeeze())
def test_freq_validation(self):
# ensure that the public constructor cannot create an invalid instance
arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10 ** 9
msg = (
"Inferred frequency None from passed values does not "
"conform to passed frequency D"
)
with pytest.raises(ValueError, match=msg):
TimedeltaArray(arr.view("timedelta64[ns]"), freq="D")
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
TimedeltaArray([1, 2, 3])
def test_other_type_raises(self):
with pytest.raises(ValueError, match="dtype bool cannot be converted"):
TimedeltaArray(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
# TODO: why TypeError for 'category' but ValueError for i8?
with pytest.raises(
ValueError, match=r"category cannot be converted to timedelta64\[ns\]"
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
with pytest.raises(
ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]"
):
TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64"))
def test_copy(self):
data = np.array([1, 2, 3], dtype="m8[ns]")
arr = TimedeltaArray(data, copy=False)
assert arr._data is data
arr = TimedeltaArray(data, copy=True)
assert arr._data is not data
assert arr._data.base is not data
def test_from_sequence_dtype(self):
msg = "dtype .*object.* cannot be converted to timedelta64"
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence([], dtype=object)
|
11592907
|
import numpy as np
from ..constants import LEN_CONV
class XSF(object):
xsf_units = 'Angstrom'
def __init__(self, filexsf):
self.filexsf = filexsf
self.title = ''
self.cutoffvars = {}
def write(self, system, field=None):
"""
Write a system object into an xsf file.
Not all specifications of the xsf file format are implemented, they will
be added as needed.
So far it can:
- write the system cell and atoms
- write the 1D/2D/3D grid data
"""
title = system.name
cell = system.cell
ions = system.ions
# it can be useful to override the plot inside the system object,
# for example if we want to plot a 2D/3D custom cut of the density grid
if field is None:
field = system.field
with open(self.filexsf, 'w') as fileout:
self._write_header(fileout, title)
self._write_cell(fileout, cell)
self._write_coord(fileout, ions)
self._write_datagrid(fileout, field)
return 0
def _write_header(self, fileout, title):
mywrite(fileout, ("# ", title))
mywrite(fileout, "CRYSTAL \n", True)
def _write_cell(self, fileout, cell):
mywrite(fileout, "PRIMVEC", True)
for ilat in range(3):
latt = cell.lattice[:, ilat] * LEN_CONV["Bohr"][self.xsf_units]
mywrite(fileout, latt, True)
def _write_coord(self, fileout, ions):
mywrite(fileout, "PRIMCOORD", True)
mywrite(fileout, (len(ions), 1), True)
for iat, atom in enumerate(ions):
#mywrite(fileout, (atom.label, atom.pos.conv(self.xsf_units)), True)
mywrite(fileout, (atom.label, atom.pos*LEN_CONV["Bohr"][self.xsf_units]), True)
def _write_datagrid(self, fileout, plot):
ndim = plot.span # 2D or 3D grid?
if ndim < 2:
return # XSF format doesn't support one data grids
val_per_line = 5
values = plot.get_values_flatarray(pad=1, order='F')
mywrite(fileout, "BEGIN_BLOCK_DATAGRID_{}D".format(ndim), True)
mywrite(fileout, "{}d_datagrid_from_pbcpy".format(ndim), True)
mywrite(fileout, "BEGIN_DATAGRID_{}D".format(ndim), True)
nnr = len(values)
origin = plot.grid.origin * LEN_CONV["Bohr"][self.xsf_units]
if ndim == 3:
mywrite(fileout, (plot.grid.nr[
0] + 1, plot.grid.nr[1] + 1, plot.grid.nr[2] + 1), True)
elif ndim ==2:
mywrite(fileout, (plot.grid.nr[
0] + 1, plot.grid.nr[1] + 1), True)
mywrite(fileout, origin, True) # TODO, there might be an actual origin if we're dealing with a custom cut of the grid
for ilat in range(ndim):
latt = plot.grid.lattice[:, ilat] * LEN_CONV["Bohr"][self.xsf_units]
mywrite(fileout, latt, True)
nlines = nnr // val_per_line
for iline in range(nlines):
igrid = iline * val_per_line
mywrite(fileout, values[igrid:igrid + val_per_line], True)
igrid = nlines * val_per_line
mywrite(fileout, values[igrid:nnr], True)
mywrite(fileout, "END_DATAGRID_{}D".format(ndim), True)
mywrite(fileout, "END_BLOCK_DATAGRID_{}D".format(ndim), True)
def mywrite(fileobj, iterable, newline=False):
if newline:
fileobj.write('\n ')
if isinstance(iterable, (np.ndarray, list, tuple)):
for ele in iterable:
mywrite(fileobj, ele)
# fileobj.write(str(ele)+' ')
else:
fileobj.write(str(iterable) + ' ')
|
11592956
|
import handlers
from router import any_method
routes = [(any_method, "*.py", handlers.python_script_handler),
("GET", "*.asis", handlers.as_is_handler),
("GET", "*", handlers.file_handler),
]
|
11593021
|
load("@bazel_gazelle//:deps.bzl", "go_repository")
def kube_state_metrics_dependencies():
go_repository(
name = "com_github_kubernetes_kube_state_metrics",
commit = "4c0e83b3407e489eda34c26f7794ec69856ccd76", # v1.7.2
importpath = "k8s.io/kube-state-metrics",
)
|
11593078
|
import logging
import threading
from contextlib import closing, contextmanager
from datetime import datetime
from io import BytesIO
import dropbox
import six
from dropbox import Dropbox
from dropbox.exceptions import ApiError
from dropbox.files import (DownloadError, FileMetadata, FolderMetadata,
LookupError, WriteMode)
from fs import errors
from fs.base import FS
from fs.enums import ResourceType, Seek
from fs.info import Info
from fs.mode import Mode
from fs.subfs import SubFS
from fs.time import datetime_to_epoch, epoch_to_datetime
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class DropboxFile(BytesIO):
def __init__(self, dropbox, path, mode):
self.dropbox = dropbox
self.path = path
self.mode = mode
self._lock = threading.RLock()
initialData = None
self.rev = None
try:
metadata, response = self.dropbox.files_download(self.path)
self.rev = metadata.rev
with closing(response):
if self.mode.appending or (
self.mode.reading and not self.mode.truncate
):
initialData = response.content
except ApiError:
pass
super(DropboxFile, self).__init__(initialData)
if self.mode.appending and initialData is not None:
# seek to the end
self.seek(len(initialData))
if six.PY2:
def __length_hint__(self):
return len(self.getvalue())
else:
def __length_hint__(self):
return self.getbuffer().nbytes
def truncate(self, size=None):
super(DropboxFile, self).truncate(size)
data_size = self.__length_hint__()
if size and data_size < size:
self.write(b"\0" * (size - data_size))
self.seek(data_size)
return size or data_size
def close(self):
if not self.mode.writing:
super(DropboxFile, self).close()
return
if self.rev is None:
writeMode = WriteMode("add")
else:
writeMode = WriteMode("update", self.rev)
metadata = self.dropbox.files_upload(
self.getvalue(),
self.path,
mode=writeMode,
autorename=False,
client_modified=datetime.utcnow(),
mute=False,
)
self.path = None
self.mode = None
self.dropbox = None
super(DropboxFile, self).close()
def write(self, data):
if self.mode.writing == False:
raise IOError("File is not in write mode")
return super(DropboxFile, self).write(data)
def read(self, size=None):
if self.mode.reading == False:
raise IOError("File is not in read mode")
return super(DropboxFile, self).read(size)
def readable(self):
return self.mode.reading
def writable(self):
return self.mode.writing
class DropboxFS(FS):
_meta = {
"case_insensitive": False,
"invalid_path_chars": "\0",
"network": True,
"read_only": False,
"thread_safe": True,
"unicode_paths": True,
"virtual": False,
}
def __init__(self, accessToken, session=None):
super(DropboxFS, self).__init__()
self._lock = threading.RLock()
self.dropbox = Dropbox(accessToken, session=session)
def fix_path(self, path):
if isinstance(path, bytes):
try:
path = path.decode("utf-8")
except AttributeError:
pass
if not path.startswith("/"):
path = "/" + path
if path == "." or path == "./":
path = "/"
path = self.validatepath(path)
return path
def __repr__(self):
return "<DropboxDriveFS>"
def _infoFromMetadata(self, metadata):
rawInfo = {
"basic": {
"name": metadata.name,
"is_dir": isinstance(metadata, FolderMetadata),
}
}
if isinstance(metadata, FileMetadata):
rawInfo.update(
{"details": {"size": metadata.size, "type": ResourceType.file}}
)
else:
rawInfo.update({"details": {"type": ResourceType.directory}})
return Info(rawInfo)
def getinfo(self, path, namespaces=None):
_path = self.fix_path(path)
if _path == "/":
info_dict = {
"basic": {"name": "", "is_dir": True},
"details": {"type": ResourceType.directory},
}
return Info(info_dict)
try:
metadata = self.dropbox.files_get_metadata(
_path, include_media_info=True
)
except ApiError as e:
raise errors.ResourceNotFound(path=path, exc=e)
return self._infoFromMetadata(metadata)
def setinfo(self, path, info):
if not self.exists(path):
raise errors.ResourceNotFound(path)
def listdir(self, path):
_path = self.fix_path(path)
if _path == "/":
_path = ""
if not self.exists(_path):
raise errors.ResourceNotFound(path)
meta = self.getinfo(_path)
if meta.is_file:
raise errors.DirectoryExpected(path)
result = self.dropbox.files_list_folder(_path, include_media_info=True)
allEntries = result.entries
while result.has_more:
result = self.dropbox.files_list_folder_continue(result.cursor)
allEntries += result.entries
return [x.name for x in allEntries]
def makedir(self, path, permissions=None, recreate=False):
path = self.fix_path(path)
if self.exists(path) and not recreate:
raise errors.DirectoryExists(path)
if path == "/":
return SubFS(self, path)
if self.exists(path):
meta = self.getinfo(path)
if meta.is_dir:
if recreate == False:
raise errors.DirectoryExists(path)
else:
return SubFS(self, path)
if meta.is_file:
raise errors.DirectoryExpected(path)
ppath = self.get_parent(path)
if not self.exists(ppath):
raise errors.ResourceNotFound(ppath)
try:
folderMetadata = self.dropbox.files_create_folder_v2(path)
except ApiError as e:
raise errors.DirectoryExpected(path=path)
return SubFS(self, path)
def openbin(self, path, mode="r", buffering=-1, **options):
path = self.fix_path(path)
_mode = Mode(mode)
mode = _mode
_mode.validate_bin()
_path = self.validatepath(path)
log.debug("openbin: %s, %s", path, mode)
with self._lock:
try:
info = self.getinfo(_path)
except errors.ResourceNotFound:
if not _mode.create:
raise
# Target doesn't exist and we're in create mode. Ensure the
# parent is an existing directory before we try to create a file
# in it.
parent_path = self.get_parent(_path)
# Can't use self.isdir() because it doesn't crash if the
# directory doesn't exist, and we don't want to stat a file twice
# if we can avoid it.
info = self.getinfo(parent_path)
if not info.is_dir:
raise errors.DirectoryExpected(parent_path)
return DropboxFile(self.dropbox, path, mode)
# Target exists.
if info.is_dir:
raise errors.FileExpected(path)
if _mode.exclusive:
raise errors.FileExists(path)
return DropboxFile(self.dropbox, path, mode)
def remove(self, path):
_path = self.fix_path(path)
try:
info = self.getinfo(path)
if info.is_dir:
raise errors.FileExpected(path=path)
self.dropbox.files_delete_v2(_path)
except ApiError as e:
if isinstance(e.error._value, LookupError):
raise errors.ResourceNotFound(path=path)
log.debug(e)
raise errors.FileExpected(path=path, exc=e)
def removedir(self, path):
_path = self.fix_path(path)
if _path == "/":
raise errors.RemoveRootError()
try:
info = self.getinfo(path)
if not info.is_dir:
raise errors.DirectoryExpected(path=path)
if len(self.listdir(path)) > 0:
raise errors.DirectoryNotEmpty(path=path)
self.dropbox.files_delete_v2(_path)
except ApiError as e:
if isinstance(e.error._value, LookupError):
raise errors.ResourceNotFound(path=path)
raise errors.FileExpected(path=path, exc=e)
def copy(self, src_path, dst_path, overwrite=False):
src_path = self.fix_path(src_path)
dst_path = self.fix_path(dst_path)
try:
src_meta = self.getinfo(src_path)
if src_meta.is_dir:
raise errors.FileExpected(src_path)
except ApiError as e:
raise errors.ResourceNotFound
dst_meta = None
try:
dst_meta = self.getinfo(dst_path)
except Exception as e:
pass
if dst_meta is not None:
if overwrite == True:
self.remove(dst_path)
else:
raise errors.DestinationExists(dst_path)
parent_path = self.get_parent(dst_path)
if not self.exists(parent_path):
raise errors.ResourceNotFound(dst_path)
self.dropbox.files_copy_v2(src_path, dst_path)
def get_parent(self, dst_path):
import os
parent_path = os.path.abspath(os.path.join(dst_path, ".."))
return parent_path
def exists(self, path):
path = self.fix_path(path)
try:
self.getinfo(path)
return True
except errors.ResourceNotFound as e:
return False
def move(self, src_path, dst_path, overwrite=False):
_src_path = self.fix_path(src_path)
_dst_path = self.fix_path(dst_path)
if not self.getinfo(_src_path).is_file:
raise errors.FileExpected(src_path)
if not overwrite and self.exists(_dst_path):
raise errors.DestinationExists(dst_path)
if "/" in dst_path and not self.exists(self.get_parent(_dst_path)):
raise errors.ResourceNotFound(src_path)
with self._lock:
try:
if overwrite:
try:
# remove file anyways
self.dropbox.files_delete_v2(_dst_path)
except Exception as e:
pass
self.dropbox.files_move_v2(_src_path, _dst_path)
except ApiError as e:
raise errors.ResourceNotFound(src_path, exc=e)
def apierror_map(self, error):
log.debug(error)
|
11593084
|
import itertools
import Image
from xml.dom import minidom
from xml.parsers.expat import ExpatError
def clear_logo_space(array, size, filename):
if filename is None:
return
# remove any data where logo is
try:
logo = Image.open(filename)
logo_size = logo.size
logo_array = logo.load()
if logo_size == (size, size):
neighbour4 = [(0, 0), (1, 0), (0, 1), (-1, 0), (0, -1)]
for x, y in itertools.product(xrange(size), repeat=2):
if logo_array[x, y][3] != 0:
for offset in neighbour4:
array[x + offset[0], y + offset[1]] = 255
else:
print "Raster logo size mismatch, ignoring"
except IOError, e:
print "Error opening raster logo: [%s] Ignoring." % e.strerror
def get_svg_logo(filename):
if filename is None:
return ''
try:
with open(filename) as logo_svg:
try:
dom = minidom.parse(logo_svg)
svg_node = dom.getElementsByTagName('svg')[0]
ignored_nodes = ['metadata', 'defs', 'sodipodi:namedview']
logo_xml = "\n".join([n.toxml() for n in svg_node.childNodes
if n.nodeName not in ignored_nodes])
return logo_xml
except ExpatError, e:
print "Error parsing logo svg. [%s] Ignoring logo." % e
except IndexError:
print ("Error parsing logo svg: No <svg> node found. "
"Ignoring logo.")
except IOError, e:
print "Error opening logo: [%s] Ignoring." % e.strerror
return ''
|
11593089
|
from reaver.models.sc2.policy import SC2MultiPolicy
from reaver.models.sc2.fully_conv import build_fully_conv
|
11593090
|
import pyglet
from pyglet.gl import *
# from pyglet import clock
from pyglet.window import key
import pyshaders
import win32gui
# Window creation
style = pyglet.window.Window.WINDOW_STYLE_BORDERLESS
window = pyglet.window.Window(width=960, height=540,
style=style, resizable=False)
window.set_size(1920, 1080)
# set behind icons
progman = win32gui.FindWindow("Progman", None)
result = win32gui.SendMessageTimeout(progman, 0x052c, 0, 0, 0x0, 1000)
workerw = 0
def _enum_windows(tophandle, topparamhandle):
p = win32gui.FindWindowEx(tophandle, 0, "SHELLDLL_DefView", None)
if p != 0:
workerw = win32gui.FindWindowEx(0, tophandle, "WorkerW", None)
pyglet_hwnd = window._hwnd
# pyglet_hdc = win32gui.GetWindowDC(pyglet_hwnd)
win32gui.SetParent(pyglet_hwnd, workerw)
return True
win32gui.EnumWindows(_enum_windows, 0) # sets window behind icons
# Shader creation
vert = './shader/vert.glsl'
# frag = './shader/frag/7.glsl'
# frag = './shader/frag/9.glsl'
# frag = './shader/frag/10.glsl'
# frag = './shader/frag/11.glsl'
# frag = './shader/frag/pastel-psx.glsl'
# frag = './shader/frag/space2.glsl'
frag = './shader/frag/trees.glsl'
shader = pyshaders.from_files_names(vert, frag)
shader.use()
framerate = 60
timescale = 0.5
def _update_shader_time(dt):
if 'time' in shader.uniforms:
shader.uniforms.time += dt * timescale
pyglet.clock.schedule_interval(_update_shader_time, 1 / framerate)
vert_count = 70000
vert_mode = GL_TRIANGLES
vertex_list = pyglet.graphics.vertex_list(
vert_count, 'v3f', 'c4B', 't2f', 'n3f'
)
if 'vertexCount' in shader.uniforms:
shader.uniforms.vertexCount = vert_count
tris = pyglet.graphics.vertex_list(
6,
('v2f', (-1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1))
)
@window.event
def on_draw():
gl.glClearColor(0, 0, 0, 0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
tris.draw(GL_TRIANGLES)
@window.event
def on_mouse_motion(x, y, dx, dy):
nx = -(-x + window.width / 2) / (window.width / 2)
ny = -(-y + window.height / 2) / (window.height / 2)
# normalized (-1 to 1)
if 'mouse' in shader.uniforms:
shader.uniforms['mouse'] == (nx, ny)
@window.event
def on_key_press(symbol, modifiers):
if 'time' in shader.uniforms:
print(shader.uniforms.time)
if symbol == key.Q:
pyglet.app.exit()
@window.event
def on_resize(width, height):
if 'resolution' in shader.uniforms:
shader.uniforms.resolution = (width, height)
if __name__ == '__main__':
pyglet.app.run()
|
11593121
|
import os
import sys
from PyQt5.QtCore import QUrl, QObject
from PyQt5.QtWidgets import QApplication
from PyQt5.QtQml import QQmlApplicationEngine
from UI import classres
os.environ['QT_QUICK_CONTROLS_STYLE'] = "Imagine"
app = QApplication(sys.argv)
# Create QML engine
engine = QQmlApplicationEngine()
# Load the qml file into the engine
engine.load(QUrl('qrc:/UI/qml/main.qml'))
# Qml file error handling
if not engine.rootObjects():
sys.exit(-1)
# Send QT_QUICK_CONTROLS_STYLE to main qml (only for demonstration)
qtquick2Themes = engine.rootObjects()[0].findChild(
QObject,
'qtquick2Themes'
)
qtquick2Themes.setProperty('text', os.environ['QT_QUICK_CONTROLS_STYLE'])
sys.exit(app.exec_())
|
11593211
|
import click
from graviteeio_cli.exeptions import GraviteeioError
from graviteeio_cli.http_client.apim.api import ApiClient
from graviteeio_cli.services import lint_service
from graviteeio_cli.lint.types.document import DocumentType
from graviteeio_cli.core.config import GraviteeioConfig
@click.command(short_help="Create/Update an API from spec.")
@click.option('--api', 'api_id',
help='API id',
required=False)
@click.option('--spec', '-sf', 'spec_file', type=click.Path(exists=True), required=True,
help="Spec file (Swagger 2.0 / OAS 3.0)")
@click.pass_obj
def apply(obj, api_id, spec_file):
"""
Allow to create/update an API from spec API like Swagger or OpenApiSpec (OAS)
"""
api_client: ApiClient = obj['api_client']
gio_config: GraviteeioConfig = obj['config']
try:
with open(spec_file, 'r') as f:
api_spec = f.read()
except FileNotFoundError:
raise GraviteeioError("Missing values file {}".format(spec_file))
#lint
valid = lint_service.validate_from_file(spec_file, api_spec, DocumentType.oas, gio_config.linter_conf)
if not valid:
click.echo(click.style(" oas specification has not been applied", fg="red"))
return
if api_id:
resp = api_client.update_oas(api_id, api_spec)
click.echo("API {} is updated".format(api_id))
else:
click.echo("Start Create")
resp = api_client.create_oas(api_spec)
click.echo("API has been created with id {}".format(resp["id"]))
|
11593258
|
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView
from django.views.generic import UpdateView
from django.views.generic import DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from search_views.search import SearchListView
from drdown.users.models.model_patient import Patient
from drdown.users.models.model_health_team import HealthTeam
from search_views.filters import BaseFilter
from drdown.appointments.models import AppointmentRequest
from drdown.appointments.forms.requests_form import RequestSearchForm, \
RequestForm
class RequestFilter(LoginRequiredMixin, BaseFilter):
search_fields = {
'search_speciality': ['speciality'],
'search_name': ['doctor__user__name', 'patient__user__name'],
}
class RequestListView(LoginRequiredMixin, SearchListView):
model = AppointmentRequest
template_name = 'appointments/request_list.html'
form_class = RequestSearchForm
filter_class = RequestFilter
paginate_by = 10
def prepare_queryset(self, request):
user = request.user
if hasattr(user, 'patient'):
queryset = AppointmentRequest.objects.filter(
patient=user.patient
).order_by('id')
elif hasattr(user, 'responsible'):
queryset = AppointmentRequest.objects.filter(
patient__in=user.responsible.patient_set.all()
).order_by('id')
elif hasattr(user, 'employee'):
queryset = AppointmentRequest.objects.filter(
).order_by('risk', 'id')
else:
queryset = AppointmentRequest.objects.none()
return queryset
def get_queryset(self):
return self.prepare_queryset(self.request)
class RequestCreateView(LoginRequiredMixin, CreateView):
model = AppointmentRequest
template_name = 'appointments/request_form.html'
form_class = RequestForm
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
def form_valid(self, form):
speciality = form.instance.speciality
risk = 5
if speciality == AppointmentRequest.CARDIOLOGY:
risk = form.instance.patient.risk.priority_cardiology
if speciality == AppointmentRequest.NEUROLOGY:
risk = form.instance.patient.risk.priority_neurology
if speciality == AppointmentRequest.PEDIATRICS:
risk = form.instance.patient.risk.priority_pediatrics
if speciality == AppointmentRequest.SPEECH_THERAPHY:
risk = form.instance.patient.risk.priority_speech_theraphy
if speciality == AppointmentRequest.PHYSIOTHERAPY:
risk = form.instance.patient.risk.priority_physiotherapy
if speciality == AppointmentRequest.PSYCHOLOGY:
risk = form.instance.patient.risk.priority_psychology
if speciality == AppointmentRequest.GENERAL_PRACTITIONER:
risk = form.instance.patient.risk.priority_general_practitioner
form.instance.risk = risk
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super(RequestCreateView, self).get_context_data(**kwargs)
context['health_team'] = HealthTeam.objects.all()
if hasattr(self.request.user, 'patient'):
context['patients'] = Patient.objects.filter(
user=self.request.user)
elif hasattr(self.request.user, 'responsible'):
context['patients'] = \
self.request.user.responsible.patient_set.all()
return context
def load_doctors(request):
speciality = request.GET.get('speciality')
doctors = HealthTeam.objects.filter(
speciality=speciality
).order_by('user__name')
return render(request,
'appointments/doctors_dropdown_list_options.html',
{'doctors': doctors}
)
class RequestUpdateView(LoginRequiredMixin, UpdateView):
model = AppointmentRequest
template_name = 'appointments/request_form.html'
fields = [
'speciality',
'doctor',
'patient',
'shift',
'day',
'motive',
]
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
class RequestDeleteView(LoginRequiredMixin, DeleteView):
model = AppointmentRequest
template_name = 'appointments/request_confirm_delete.html'
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
class RequestUpdateStatusView(LoginRequiredMixin, UpdateView):
model = AppointmentRequest
template_name = 'appointments/request_confirm_cancel.html'
fields = ['observation']
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
def form_valid(self, form):
form.instance.status = AppointmentRequest.DECLINED
form.save()
return super(RequestUpdateStatusView, self).form_valid(form)
class RequestAfterResultDeleteView(LoginRequiredMixin, DeleteView):
model = AppointmentRequest
template_name = 'appointments/request_after_result_confirm_delete.html'
success_url = reverse_lazy(
viewname='appointments:list_requests',
)
pk_url_kwarg = 'request_pk'
|
11593274
|
import sys
import os
sys.path.append(os.getcwd())
from models.config import Config
from models.resunet_conv8_vocals.modules import *
import torch.utils
import torch.utils.data
import torch.nn.functional as F
from utils.f_helper import FDomainHelper
from torchlibrosa.stft import magphase
import numpy as np
import pytorch_lightning as pl
from torchlibrosa import STFT
from utils.overlapadd import LambdaOverlapAdd
from utils.file_io import *
class L1(nn.Module):
def __init__(self):
super(L1, self).__init__()
self.loss = torch.nn.L1Loss()
def __call__(self, output, target):
return self.loss(output,target)
class L1_Wav_L1_Sp(nn.Module):
def __init__(self):
super(L1_Wav_L1_Sp, self).__init__()
self.f_helper = FDomainHelper()
self.window_size = 2048
hop_size = 441
center = True
pad_mode = "reflect"
window = "hann"
self.l1 = L1()
self.stft = STFT(
n_fft=self.window_size,
hop_length=hop_size,
win_length=self.window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
def __call__(self, output, target, alpha_t=1.0):
wav_loss = self.l1(output, target)
if(alpha_t < 1):
sp_loss = self.l1(
self.f_helper.wav_to_spectrogram(output, eps=1e-8),
self.f_helper.wav_to_spectrogram(target, eps=1e-8)
)
sp_loss /= math.sqrt(self.window_size)
else: sp_loss = 0.0
return alpha_t*wav_loss + (1-alpha_t)*sp_loss
class UNetResComplex_100Mb(pl.LightningModule):
def __init__(self, channels, target, nsrc=1, subband=4, use_lsd_loss=False,
lr=0.002, gamma=0.9,
batchsize=None, frame_length=None,
sample_rate=None,
warm_up_steps=1000, reduce_lr_steps=15000,
# datas
check_val_every_n_epoch=5, # inside a validation set, how many samples gonna saved
):
# sub4 52.041G 66.272M
super(UNetResComplex_100Mb, self).__init__()
window_size = 2048
hop_size = 441
center = True,
pad_mode = 'reflect'
window = 'hann'
activation = 'relu'
momentum = 0.01
freeze_parameters = True
self.use_lsd_loss = use_lsd_loss
self.save_hyperparameters()
self.nsrc = nsrc
self.subband = subband
self.channels = channels
self.lr = lr
self.gamma = gamma
self.sample_rate = sample_rate
self.batchsize = batchsize
self.frame_length = frame_length
# self.hparams['channels'] = 2
self.target = target
self.wav_spec_loss = L1_Wav_L1_Sp()
# self.lsd_loss = get_loss_function("lsd")
self.train_step = 0
self.val_step = 0
self.check_val_every_n_epoch = check_val_every_n_epoch
self.val_result_save_dir = None
self.val_result_save_dir_step = None
self.downsample_ratio = 2 ** 6 # This number equals 2^{#encoder_blcoks}
self.f_helper = FDomainHelper(
window_size=window_size,
hop_size=hop_size,
center=center,
pad_mode=pad_mode,
window=window,
freeze_parameters=freeze_parameters,
subband=self.subband if(self.subband != 1) else None,
root=Config.ROOT
)
if (subband == 8): self.bn0 = nn.BatchNorm2d(129, momentum=momentum)
elif (subband == 4):
self.bn0 = nn.BatchNorm2d(257, momentum=momentum)
elif (subband == 2):
self.bn0 = nn.BatchNorm2d(513, momentum=momentum)
else:
self.bn0 = nn.BatchNorm2d(1025, momentum=momentum)
self.encoder_block1 = EncoderBlockRes1(in_channels=channels * nsrc * subband, out_channels=32,
downsample=(2, 2), activation=activation, momentum=momentum)
self.encoder_block2 = EncoderBlockRes1(in_channels=32, out_channels=64,
downsample=(2, 2), activation=activation, momentum=momentum)
self.encoder_block3 = EncoderBlockRes1(in_channels=64, out_channels=128,
downsample=(2, 2), activation=activation, momentum=momentum)
self.encoder_block4 = EncoderBlockRes1(in_channels=128, out_channels=256,
downsample=(2, 2), activation=activation, momentum=momentum)
self.encoder_block5 = EncoderBlockRes1(in_channels=256, out_channels=384,
downsample=(2, 2), activation=activation, momentum=momentum)
self.encoder_block6 = EncoderBlockRes1(in_channels=384, out_channels=384,
downsample=(2, 2), activation=activation, momentum=momentum)
self.conv_block7 = EncoderBlockRes1(in_channels=384, out_channels=384,
downsample=(1,1), activation=activation, momentum=momentum)
self.conv_block8 = EncoderBlockRes1(in_channels=384, out_channels=384,
downsample=(1,1), activation=activation, momentum=momentum)
self.conv_block9 = EncoderBlockRes1(in_channels=384, out_channels=384,
downsample=(1,1), activation=activation, momentum=momentum)
self.conv_block10 = EncoderBlockRes1(in_channels=384, out_channels=384,
downsample=(1,1), activation=activation, momentum=momentum)
self.decoder_block1 = DecoderBlockRes1(in_channels=384, out_channels=384,
stride=(2, 2), activation=activation, momentum=momentum)
self.decoder_block2 = DecoderBlockRes1(in_channels=384, out_channels=384,
stride=(2, 2), activation=activation, momentum=momentum)
self.decoder_block3 = DecoderBlockRes1(in_channels=384, out_channels=256,
stride=(2, 2), activation=activation, momentum=momentum)
self.decoder_block4 = DecoderBlockRes1(in_channels=256, out_channels=128,
stride=(2, 2), activation=activation, momentum=momentum)
self.decoder_block5 = DecoderBlockRes1(in_channels=128, out_channels=64,
stride=(2, 2), activation=activation, momentum=momentum)
self.decoder_block6 = DecoderBlockRes1(in_channels=64, out_channels=32,
stride=(2, 2), activation=activation, momentum=momentum)
self.after_conv_block1 = EncoderBlockRes1(in_channels=32, out_channels=32, downsample=(1,1),
activation=activation, momentum=momentum)
self.after_conv2 = nn.Conv2d(in_channels=32, out_channels=channels * nsrc * 4 * subband,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
self.init_weights()
self.lr_lambda = lambda step: self.get_lr_lambda(step,
gamma = self.gamma,
warm_up_steps=warm_up_steps,
reduce_lr_steps=reduce_lr_steps)
def get_lr_lambda(self,step, gamma, warm_up_steps, reduce_lr_steps):
r"""Get lr_lambda for LambdaLR. E.g.,
.. code-block: python
lr_lambda = lambda step: get_lr_lambda(step, warm_up_steps=1000, reduce_lr_steps=10000)
from torch.optim.lr_scheduler import LambdaLR
LambdaLR(optimizer, lr_lambda)
"""
if step <= warm_up_steps:
return step / warm_up_steps
else:
return gamma ** (step // reduce_lr_steps)
def init_weights(self):
init_bn(self.bn0)
init_layer(self.after_conv2)
def forward(self, input):
"""
Args:
input: (batch_size, channels_num, segment_samples)
Outputs:
output_dict: {
'wav': (batch_size, channels_num, segment_samples),
'sp': (batch_size, channels_num, time_steps, freq_bins)}
"""
# sp, cos_in, sin_in = self.f_helper.wav_to_spectrogram_phase(input)
sp, cos_in, sin_in = self.f_helper.wav_to_mag_phase_subband_spectrogram(input)
"""(batch_size, channels_num, time_steps, freq_bins)"""
# Batch normalization
x = sp.transpose(1, 3)
"""(batch_size, freq_bins, time_steps, channels_num)"""
x = self.bn0(x) # normalization to freq bins
"""(batch_size, freq_bins, time_steps, channels_num)"""
x = x.transpose(1, 3)
"""(batch_size, chanenls, time_steps, freq_bins)"""
# Pad spectrogram to be evenly divided by downsample ratio.
origin_len = x.shape[2] # time_steps
pad_len = int(np.ceil(x.shape[2] / self.downsample_ratio)) * self.downsample_ratio - origin_len
x = F.pad(x, pad=(0, 0, 0, pad_len))
cos_in = F.pad(cos_in, pad=(0, 0, 0, pad_len))
sin_in = F.pad(sin_in, pad=(0, 0, 0, pad_len))
"""(batch_size, channels, padded_time_steps, freq_bins)"""
# Let frequency bins be evenly divided by 2, e.g., 513 -> 512
x = x[..., 0: x.shape[-1] - 1] # (bs, channels, T, F)
(N_, C_, T_, F_) = x.shape
# UNet
(x1_pool, x1) = self.encoder_block1(x) # x1_pool: (bs, 32, T / 2, F / 2)
(x2_pool, x2) = self.encoder_block2(x1_pool) # x2_pool: (bs, 64, T / 4, F / 4)
(x3_pool, x3) = self.encoder_block3(x2_pool) # x3_pool: (bs, 128, T / 8, F / 8)
(x4_pool, x4) = self.encoder_block4(x3_pool) # x4_pool: (bs, 256, T / 16, F / 16)
(x5_pool, x5) = self.encoder_block5(x4_pool) # x5_pool: (bs, 512, T / 32, F / 32)
(x6_pool, x6) = self.encoder_block6(x5_pool) # x6_pool: (bs, 1024, T / 64, F / 64)
x_center,_ = self.conv_block7(x6_pool) # (bs, 2048, T / 64, F / 64)
x_center,_ = self.conv_block8(x_center) # (bs, 2048, T / 64, F / 64)
x_center,_ = self.conv_block9(x_center) # (bs, 2048, T / 64, F / 64)
x_center,_ = self.conv_block10(x_center) # (bs, 2048, T / 64, F / 64)
x7 = self.decoder_block1(x_center, x6) # (bs, 1024, T / 32, F / 32)
x8 = self.decoder_block2(x7, x5) # (bs, 512, T / 16, F / 16)
x9 = self.decoder_block3(x8, x4) # (bs, 256, T / 8, F / 8)
x10 = self.decoder_block4(x9, x3) # (bs, 128, T / 4, F / 4)
x11 = self.decoder_block5(x10, x2) # (bs, 64, T / 2, F / 2)
x12 = self.decoder_block6(x11, x1) # (bs, 32, T, F)
x,_ = self.after_conv_block1(x12) # (bs, 32, T, F)
x = self.after_conv2(x) # (bs, channels, T, F)
# Recover shape
x = F.pad(x, pad=(0, 1))
x = x[:, :, 0: origin_len, :]
cos_in = cos_in[:, :, 0:origin_len, :]
sin_in = sin_in[:, :, 0:origin_len, :]
mag, real,imag, residual, cos,sin,cos_out,sin_out = [],[],[],[],[],[],[], []
sub_channels = self.subband*self.channels*self.nsrc
for i in range(self.subband*self.channels*self.nsrc):
# print(i + sub_channels, i + 1 + sub_channels)
real.append(x[:,i+sub_channels:i+1+sub_channels,:,:])
# print(i + sub_channels*2, i + 1 + sub_channels*2)
imag.append(x[:, i + sub_channels*2:i + 1 + sub_channels*2, :, :])
# print(i + sub_channels * 3, i + 1 + sub_channels * 3)
residual.append(x[:, i + sub_channels*3:i + 1 + sub_channels*3, :, :])
mag.append(torch.relu(torch.sigmoid(x[:, i:i + 1, :, :]) * sp[:, i:i + 1, :, :] + residual[-1]))
(_, sub_cos, sub_sin) = magphase(real[-1],imag[-1])
cos.append(sub_cos)
sin.append(sub_sin)
cos_out.append(cos_in[:,i:i+1,:,:]*sub_cos-sin_in[:,i:i+1,:,:]*sub_sin)
sin_out.append(sin_in[:,i:i+1,:,:]*sub_cos+cos_in[:,i:i+1,:,:]*sub_sin)
length = input.shape[2] // self.subband
mag = torch.cat(mag,dim=1)
cos_out = torch.cat(cos_out, dim=1)
sin_out = torch.cat(sin_out, dim=1)
wav_out = self.f_helper.mag_phase_subband_spectrogram_to_wav(sps=mag,coss = cos_out,sins=sin_out,length=length)
pad_tail = input.size()[-1]-wav_out.size()[-1]
wav_out = torch.nn.functional.pad(wav_out,(0,pad_tail))
output_dict = {'wav': wav_out}
return output_dict
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr, amsgrad=True)
# StepLR = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=self.gamma)
scheduler = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer, self.lr_lambda),
'interval': 'step',
'frequency': 1,
}
return [optimizer], [scheduler]
def preprocess(self, batch, train=False):
if (train):
vocal = batch['front']
acc = batch['background']
vocal, acc = vocal.float().permute(0, 2, 1), acc.float().permute(0, 2, 1)
mixture = vocal + acc
return vocal, acc, mixture
else: # during test or validaton
vocal = batch[self.target]
if(self.target == "bass"): acc = batch['no_bass']
elif (self.target == "other"): acc = batch['no_other']
elif (self.target == "drums"): acc = batch['no_drums']
elif (self.target == "vocals"): acc = batch['acc']
else: acc = None
vocal, acc = vocal.float().permute(0, 2, 1), acc.float().permute(0, 2, 1)
if(acc.size()[-1] != vocal.size()[-1]):
min_length = min(acc.size()[-1], vocal.size()[-1])
acc = acc[...,:min_length]
vocal = vocal[..., :min_length]
mixture = acc + vocal
return vocal, acc, mixture, batch['fname'][0] # a sample for a batch
def calc_loss(self, output, vocal):
l1 = self.wav_spec_loss(output, vocal)
self.log("loss", l1, on_step=True, on_epoch=True, logger=True, sync_dist=True)
return l1
def training_step(self, batch, batch_nb):
vocal, acc, mixture = self.preprocess(batch, train=True)
output = self(mixture)['wav']
loss = self.calc_loss(output, vocal)
self.train_step += 1
return {"loss": loss}
def validation_step(self, batch, batch_nb):
vocal, acc, mixture, fname = self.preprocess(batch)
continuous_nnet = LambdaOverlapAdd(
nnet=self,
n_src=self.channels * self.nsrc,
window_size=self.sample_rate * 20,
in_margin =int(self.sample_rate*1.5),
window="boxcar",
reorder_chunks=False,
enable_grad=False,
device=self.device
)
output = continuous_nnet.forward(mixture) # [bs, samples, channels]
loss = self.calc_loss(output, vocal)
output = torch.transpose(output,2,1)
os.makedirs(os.path.join(self.val_result_save_dir_step, str(fname)),exist_ok=True)
save_wave((tensor2numpy(output) * 2 ** 15).astype(np.short),
fname=os.path.join(self.val_result_save_dir_step, str(fname), self.target + ".wav"))
return {'val_loss':loss}
def validation_epoch_end(self, outputs):
# Use the default log function to gather info from gpus
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
self.log("val_loss", avg_loss, on_step=False, on_epoch=True, logger=True, sync_dist=True)
|
11593281
|
from database.main import session
from models.misc.levelup_stats import LevelUpStatsSchema
from models.misc.level_xp_requirement import LevelXpRequirementSchema
from decorators import run_once
from utils.helper import parse_int
@run_once
def load_character_level_stats() -> dict:
"""
Return a dictionary holding information about the amount of stats that a character should get according
to the level he has just attained
"""
# Define these here as well. We can't import from constants because constants imports from here
KEY_LEVELUP_STATS_HEALTH = 'health'
KEY_LEVELUP_STATS_MANA = 'mana'
KEY_LEVELUP_STATS_STRENGTH = 'strength'
KEY_LEVELUP_STATS_ARMOR = 'armor'
KEY_LEVELUP_STATS_AGILITY = 'agility'
level_stats = {}
loaded_stats = session.query(LevelUpStatsSchema).all()
for stat in loaded_stats:
level = stat.level
health = parse_int(stat.health)
mana = parse_int(stat.mana)
strength = parse_int(stat.strength)
agility = parse_int(stat.agility)
armor = parse_int(stat.armor)
level_stats[level] = {
KEY_LEVELUP_STATS_HEALTH: health,
KEY_LEVELUP_STATS_MANA: mana,
KEY_LEVELUP_STATS_AGILITY: agility,
KEY_LEVELUP_STATS_STRENGTH: strength,
KEY_LEVELUP_STATS_ARMOR: armor
}
return level_stats
@run_once
def load_character_xp_requirements() -> {int: int}:
"""
Load the information about the necessary XP needed to reach a certain level.
"""
loaded_xp_reqs = session.query(LevelXpRequirementSchema).all()
return {xp_req.level: xp_req.xp_required for xp_req in loaded_xp_reqs}
|
11593291
|
import pytest
from cloudify.models_states import BlueprintUploadState
from cloudify_rest_client.exceptions import CloudifyClientError
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
from packaging.version import parse as parse_version
pytestmark = pytest.mark.group_deployments
class BlueprintUploadAutouploadPluginsTest(AgentlessTestCase):
def test_blueprint_upload_autoupload_plugins(self):
self._upload_and_verify_blueprint(
'bp',
'blueprint_with_plugins_from_catalog.yaml')
plugins = {p.package_name: p.package_version
for p in self.client.plugins.list()}
self.assertEqual(plugins["cloudify-openstack-plugin"], "3.2.16")
self.assertIn("cloudify-utilities-plugin", plugins)
self.assertGreater(parse_version(plugins["cloudify-fabric-plugin"]),
parse_version("2"))
def test_blueprint_upload_autoupload_plugins_bad_version(self):
blueprint_id = 'bp_bad_version'
blueprint_filename = 'blueprint_with_plugins_from_' \
'catalog_bad_version.yaml'
self.assertRaisesRegexp(
CloudifyClientError,
'Couldn\'t find plugin "cloudify-openstack-plugin" with.*=3.1.99',
self.client.blueprints.upload,
resource('dsl/{}'.format(blueprint_filename)),
entity_id=blueprint_id)
blueprint = self.client.blueprints.get(blueprint_id)
self.assertEqual(blueprint['state'], BlueprintUploadState.INVALID)
def test_blueprint_upload_autoupload_plugins_bad_plugin(self):
blueprint_id = 'bp_bad_plugin'
blueprint_filename = 'blueprint_with_plugins_from_' \
'catalog_bad_plugin.yaml'
self.assertRaisesRegexp(
CloudifyClientError,
'Couldn\'t find plugin "my-custom-plugin"',
self.client.blueprints.upload,
resource('dsl/{}'.format(blueprint_filename)),
entity_id=blueprint_id)
blueprint = self.client.blueprints.get(blueprint_id)
self.assertEqual(blueprint['state'], BlueprintUploadState.INVALID)
def test_blueprint_upload_autoupload_plugins_conflicting_versions(self):
blueprint_id = 'bp_two_plugin_versions'
blueprint_filename = 'blueprint_with_plugins_from_' \
'catalog_conflicting_versions.yaml'
self.assertRaisesRegexp(
CloudifyClientError,
'Could not merge \'plugins\' due to conflict on \'openstack\'',
self.client.blueprints.upload,
resource('dsl/{}'.format(blueprint_filename)),
entity_id=blueprint_id)
blueprint = self.client.blueprints.get(blueprint_id)
self.assertEqual(blueprint['state'], BlueprintUploadState.INVALID)
def test_blueprint_upload_autoupload_plugins_newer_version(self):
self._upload_and_verify_blueprint(
'bp1_os2',
'blueprint_with_plugins_from_catalog_os2.yaml')
plugin_versions = sorted(p.package_version
for p in self.client.plugins.list())
self.assertEqual(len(plugin_versions), 1)
self.assertRegex(plugin_versions[0], "2.*")
self._upload_and_verify_blueprint(
'bp1_os3',
'blueprint_with_plugins_from_catalog_os3.yaml')
plugin_versions = sorted(p.package_version for
p in self.client.plugins.list())
self.assertEqual(len(plugin_versions), 2)
self.assertRegex(plugin_versions[0], "2.*")
self.assertGreater(parse_version(plugin_versions[1]),
parse_version("3"))
def test_blueprint_upload_autoupload_plugins_older_version(self):
self._upload_and_verify_blueprint(
'bp1_os3',
'blueprint_with_plugins_from_catalog_os3.yaml')
plugin_versions = sorted(p.package_version
for p in self.client.plugins.list())
self.assertEqual(len(plugin_versions), 1)
self.assertGreater(parse_version(plugin_versions[0]),
parse_version("3"))
self._upload_and_verify_blueprint(
'bp1_os2',
'blueprint_with_plugins_from_catalog_os2.yaml')
plugin_versions = sorted(p.package_version
for p in self.client.plugins.list())
self.assertEqual(len(plugin_versions), 2)
self.assertRegex(plugin_versions[0], "2.*")
self.assertGreater(parse_version(plugin_versions[1]),
parse_version("3"))
def _upload_and_verify_blueprint(self, blueprint_id, blueprint_filename):
blueprint = self.client.blueprints.upload(
resource('dsl/{}'.format(blueprint_filename)),
entity_id=blueprint_id)
self.assertEqual(blueprint['state'], BlueprintUploadState.UPLOADED)
|
11593309
|
def bin_search(A, left, right, k):
while(left<=right):
mid=(left+right)//2
if A[mid]==k:
return mid
elif A[mid]>k:
right=mid-1
else:
left=mid+1
return -1
n=int(input())
arr=list(map(int, input().strip().split(' ')))
x=int(input())
print (bin_search(arr, 0, n-1, x))
|
11593324
|
import click
import yaml
import json
from ckan_cloud_operator import logs
from ckan_cloud_operator.providers.ckan.db import migration as db_migration_manager
from ckan_cloud_operator.providers.ckan import manager
from .storage import cli as ckan_storage_cli
from .deployment import cli as ckan_deployment_cli
from .instance import cli as instance_cli
from .env import cli as env_cli
@click.group()
def ckan():
"""Manage CKAN Instances"""
pass
# ckan.add_command(ckan_storage_cli.storage)
ckan.add_command(ckan_deployment_cli.deployment)
ckan.add_command(instance_cli.instance)
ckan.add_command(env_cli.env)
@ckan.command()
@click.option('--environment-name', help='name of the environment to initialize (one of the result of `cco ckan env list`)')
def init(environment_name):
'''
Initialize ckan-cloud-operator for working with environment.
This command gets all the necessary credentials for working
with the given environment. Eg, gets and saves `kubeconfig`
file in `~/.kube/config` directory
Without flags initializes current working environment added with `cco ckan env add`
\b
cco ckan init --environment-name poc
> POC environment was succesfully initialized
'''
############################################################################
## Commands Below are not used for now and are making unnecessary noise ##
## I'm commenting them for now, but not deleting as they might be useful ##
## In future. Will bring them back as needed ##
############################################################################
#
# @ckan.command()
# @click.option('--interactive', is_flag=True)
# def initialize(interactive):
# manager.initialize(interactive=interactive)
# logs.exit_great_success()
#
#
# @ckan.command()
# @click.argument('OLD_SITE_ID')
# @click.argument('NEW_INSTANCE_ID', required=False)
# @click.argument('ROUTER_NAME', required=False)
# @click.option('--skip-gitlab', is_flag=True)
# @click.option('--force', is_flag=True)
# @click.option('--rerun', is_flag=True)
# @click.option('--recreate-dbs', is_flag=True)
# @click.option('--recreate', is_flag=True)
# @click.option('--recreate-instance', is_flag=True)
# @click.option('--skip-routes', is_flag=True)
# @click.option('--skip-solr', is_flag=True)
# @click.option('--skip-deployment', is_flag=True)
# @click.option('--no-db-proxy', is_flag=True)
# def migrate_deis_instance(old_site_id, new_instance_id, router_name, skip_gitlab, force, rerun, recreate_dbs, recreate,
# recreate_instance, skip_routes, skip_solr, skip_deployment, no_db_proxy):
# """Run a full end-to-end migration of an instasnce"""
# manager.migrate_deis_instance(old_site_id, new_instance_id, router_name, skip_gitlab, force, rerun, recreate_dbs,
# recreate, recreate_instance, skip_routes, skip_solr, skip_deployment, no_db_proxy)
# logs.exit_great_success()
#
#
# @ckan.command()
# @click.argument('OLD_SITE_ID')
# @click.option('--db-name')
# @click.option('--datastore-name')
# @click.option('--force', is_flag=True)
# @click.option('--rerun', is_flag=True)
# @click.option('--recreate-dbs', is_flag=True)
# @click.option('--dbs-suffix')
# @click.option('--skip-create-dbs', is_flag=True)
# @click.option('--skip-datastore-import', is_flag=True)
# def migrate_deis_dbs(old_site_id, db_name, datastore_name, force, rerun, recreate_dbs, dbs_suffix, skip_create_dbs,
# skip_datastore_import):
# migration_generator = db_migration_manager.migrate_deis_dbs(
# old_site_id, db_name, datastore_name, force=force, rerun=rerun, recreate_dbs=recreate_dbs, dbs_suffix=dbs_suffix,
# skip_create_dbs=skip_create_dbs, skip_datastore_import=skip_datastore_import
# )
# for event in migration_generator:
# db_migration_manager.print_event_exit_on_complete(
# event,
# f'{old_site_id} -> {db_name}, {datastore_name}'
# )
#
#
# @ckan.command()
# @click.option('--db-name')
# def migrate_list(db_name):
# for item in db_migration_manager.get()['items']:
# data = dict(item.get('spec', {}), **{'resource-name': item.get('metadata', {}).get('name')})
# if data.get('db-name') == db_name or not db_name:
# print(yaml.dump([data], default_flow_style=False))
#
#
# @ckan.command()
# @click.argument('MIGRATION_NAME', nargs=-1)
# @click.option('--delete-dbs', is_flag=True)
# def migrate_delete(migration_name, delete_dbs):
# for name in migration_name:
# db_migration_manager.delete(name, delete_dbs)
# logs.exit_great_success()
#
#
# @ckan.command()
# def get_all_dbs_users():
# dbs, users = manager.get_all_dbs_users()
# print(yaml.dump({
# 'dbs': [' | '.join(map(str, db)) for db in dbs],
# 'users': [' | '.join(map(str, user)) for user in users]
# }, default_flow_style=False))
#
#
# @ckan.command()
# @click.argument('INSTANCE_ID')
# def post_create_checks(instance_id):
# manager.post_create_checks(instance_id)
# logs.exit_great_success()
#
#
# @ckan.command()
# @click.argument('INSTANCE_ID')
# def admin_credentials(instance_id):
# logs.print_yaml_dump(manager.ckan_admin_credentials(instance_id))
#
#
# @ckan.command()
# @click.argument('OLD_SITE_ID')
# @click.option('-r', '--raw', is_flag=True)
# def db_migration_import_urls(old_site_id, raw):
# urls = db_migration_manager.get_db_import_urls(old_site_id)
# if raw:
# print(' '.join(urls))
# else:
# logs.print_yaml_dump(list(urls))
|
11593335
|
import re
from enum import unique
from pycsp3.classes.auxiliary.ptypes import auto, AbstractType, TypeXML
from pycsp3.classes.main.constraints import ConstraintUnmergeable
from pycsp3.classes.main.variables import Variable
from pycsp3.tools.inspector import checkType
from pycsp3.tools.utilities import flatten
@unique
class TypeVarHeuristic(AbstractType):
LEXICO, DOM, DEG, DDEG, WDEG, IMPACT, ACTIVITY = auto(7)
@unique
class TypeValHeuristic(AbstractType):
CONFLICTS, VALUE = auto(2)
@unique
class TypeConsistency(AbstractType):
FC, BC, AC, SAC, FPWC, PC, CDC, FDAC, EDAC, VAC = auto(10)
def __str__(self):
return self.name
@unique
class TypeBranching(AbstractType):
TWO_WAY, D_WAY = auto(2)
def __str__(self):
return self.name.replace("_", "-").replace("TWO", "2").lower()
@unique
class TypeRestart(AbstractType):
LUBY, GEOMETRIC = auto(2)
@unique
class TypeAnnArg(AbstractType):
TYPE = auto()
STATIC, RANDOM, MIN, MAX = auto(4)
LC = auto()
ORDER = auto()
CONSISTENCY, BRANCHING, CUTOFF, FACTOR = auto(4)
START_INDEX, START_ROW_INDEX, START_COL_INDEX = auto(3)
class Annotation(ConstraintUnmergeable):
pass
class AnnotationDecision(Annotation):
def __init__(self, variables):
super().__init__(TypeXML.DECISION)
variables = flatten(variables)
checkType(variables, [Variable])
self.arg(TypeXML.DECISION, variables)
class AnnotationOutput(Annotation):
def __init__(self, variables):
super().__init__(TypeXML.OUTPUT)
variables = flatten(variables)
checkType(variables, [Variable])
self.arg(TypeXML.OUTPUT, variables)
class AnnotationHeuristic(Annotation):
def __init__(self, name):
super().__init__(name)
# To keep the good order
def add_arguments(self, random_part, min_part, max_part):
if random_part:
self.arg(TypeAnnArg.RANDOM, random_part[0] if random_part[0] else [None])
if min_part:
self.arg(TypeAnnArg.MIN, min_part[0] if min_part[0] else [None], attributes=[(TypeAnnArg.TYPE, min_part[1])])
if max_part:
self.arg(TypeAnnArg.MAX, max_part[0] if max_part[0] else [None], attributes=[(TypeAnnArg.TYPE, max_part[1])])
class AnnotationVarHeuristic(AnnotationHeuristic):
def __init__(self, h):
super().__init__(TypeXML.VAR_HEURISTIC)
checkType(h, VarHeuristic)
self.attributes.append((TypeAnnArg.LC, h.lc))
if h.staticData:
self.arg(TypeAnnArg.STATIC, h.staticData)
self.add_arguments(h.randomPart, h.minPart, h.maxPart)
class AnnotationValHeuristic(AnnotationHeuristic):
def __init__(self, h):
super().__init__(TypeXML.VAL_HEURISTIC)
checkType(h, ValHeuristic)
if h.staticData:
self.arg(TypeAnnArg.STATIC, h.staticData[0], attributes=[(TypeAnnArg.ORDER, " ".join(str(ele) for ele in h.staticData[1]))])
self.add_arguments(h.randomData, h.minData, h.maxData)
class AnnotationFiltering(Annotation):
def __init__(self, consistency):
super().__init__(TypeXML.FILTERING)
checkType(consistency, TypeConsistency)
self.attributes.append((TypeAnnArg.TYPE, consistency))
class AnnotationPrepro(Annotation):
def __init__(self, consistency):
super().__init__(TypeXML.PREPRO)
checkType(consistency, TypeConsistency)
self.attributes.append((TypeAnnArg.CONSISTENCY, consistency))
class AnnotationSearch(Annotation):
def __init__(self, search):
super().__init__(TypeXML.SEARCH)
checkType(search, Search)
self.attributes = [(TypeAnnArg.CONSISTENCY, search.consistency), (TypeAnnArg.BRANCHING, search.branching)]
class AnnotationRestarts(Annotation):
def __init__(self, restarts):
super().__init__(TypeXML.RESTARTS)
checkType(restarts, Restarts)
self.attributes = [(TypeAnnArg.TYPE, restarts.type), (TypeAnnArg.CUTOFF, restarts.cutoff), (TypeAnnArg.FACTOR, restarts.factor)]
''' Annotations classes '''
class Search:
def __init__(self, *, consistency=None, branching=None):
assert consistency or branching
assert isinstance(consistency, (TypeConsistency, type(None))) and isinstance(branching, (TypeBranching, type(None)))
self.consistency = consistency
self.branching = branching
class Restarts:
def __init__(self, *, type, cutoff, factor=1):
assert isinstance(type, TypeRestart) and isinstance(cutoff, int) and isinstance(factor, float)
self.type = type
self.cutoff = cutoff
self.factor = factor
class VHeuristic:
def __init__(self):
self.staticPart = None
self.randomPart = None
self.minPart = None
self.maxPart = None
def random(self, variables=None):
variables = flatten(variables)
checkType(variables, ([Variable], type(None)))
self.randomPart = (variables,)
return self
def _opt(self, variables, type):
if variables:
variables = flatten(variables)
checkType(variables, [Variable])
types = TypeVarHeuristic if isinstance(self, VarHeuristic) else TypeValHeuristic
assert isinstance(type, str) and all(p in [t.name for t in types] for p in re.split(r'/|\+', type)), "Bad value for " + type
return variables, type
def min(self, variables=None, *, type):
self.minPart = self._opt(variables, type)
return self
def max(self, variables=None, *, type):
self.maxPart = self._opt(variables, type)
return self
class VarHeuristic(VHeuristic):
def __init__(self, *, lc=None):
super().__init__()
self.lc = lc
def static(self, variables):
variables = flatten(variables)
checkType(variables, [Variable])
self.staticPart = variables
return self
class ValHeuristic(VHeuristic):
def __init__(self):
super().__init__()
def static(self, variables, *, order):
variables = flatten(variables)
checkType(variables, [Variable])
order = flatten(order)
checkType(order, [int])
self.staticPart = (variables, order)
return self
|
11593349
|
import unittest, random, sys, time, re, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util
import h2o_print as h2p, h2o_gbm, h2o_summ
DO_PLOT = getpass.getuser()=='kevin'
DO_MEDIAN = False
MAX_QBINS = 1000
MULTI_PASS = 1
DO_SCIPY_COMPARE = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=14)
@classmethod
def tearDownClass(cls):
# h2o.sleep(3600)
h2o.tear_down_cloud()
def test_quant_cols(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
('home-0xdiag-datasets', 'airlines/year2013.csv', None, None, 'cE', 300),
]
# h2b.browseTheCloud()
trial = 0
for (bucket, csvPathname, iColCount, oColCount, hex_key, timeoutSecs) in tryList:
xList = []
eList = []
fList = []
# PARSE*******************************************************
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=200, doSummary=False)
csvPathnameFull = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'])
h2o_cmd.infoFromInspect(inspect, csvPathname)
numRows = inspect['numRows']
numCols = inspect['numCols']
if not oColCount:
iColCount = 0
if not oColCount:
oColCount = numCols
colCount = iColCount + oColCount
for i in range (0,numCols):
print "Column", i, "summary"
h2o_cmd.runSummary(key=hex_key, max_qbins=1, cols=i);
# print h2o.dump_json(inspect)
levels = h2o.nodes[0].levels(source=hex_key)
# print "levels result:", h2o.dump_json(levels)
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
# error if any col has constant values
if len(constantValuesDict) != 0:
# raise Exception("Probably got a col NA'ed and constant values as a result %s" % constantValuesDict)
print "Probably got a col NA'ed and constant values as a result %s" % constantValuesDict
# start after the last input col
levels = h2o.nodes[0].levels(source=hex_key);
l = levels['levels']
for column in range(iColCount, iColCount+oColCount):
if l[column]:
print "Skipping", column, "because it's enum (says levels)"
continue
# QUANTILE*******************************************************
quantile = 0.5 if DO_MEDIAN else .999
# first output col. always fed by an exec cut, so 0?
start = time.time()
# file has headers. use col index
q = h2o.nodes[0].quantiles(source_key=hex_key, column=column,
quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=1)
qresult = q['result']
h2p.red_print("result:", q['result'], "quantile", quantile,
"interpolated:", q['interpolated'], "iterations", q['iterations'])
elapsed = time.time() - start
print "quantile end on ", hex_key, 'took', elapsed, 'seconds.'
quantileTime = elapsed
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
# something wrong with mapping to float for some col. maybe don't use_genfromtxt
# talking about airlines
if DO_SCIPY_COMPARE:
h2o_summ.quantile_comparisons(
csvPathnameFull,
skipHeader=True,
col=column, # what col to extract from the csv
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
# h2oSummary2=pctile[5 if DO_MEDIAN else 10],
# h2oQuantilesApprox=qresult_single,
h2oQuantilesExact=qresult,
use_genfromtxt=True,
)
trial += 1
execTime = 0
xList.append(column)
eList.append(execTime)
fList.append(quantileTime)
# remove all keys*******************************************************
# what about hex_key?
if 1==0:
start = time.time()
h2o.nodes[0].remove_all_keys()
elapsed = time.time() - start
print "remove all keys end on took", elapsed, 'seconds.'
#****************************************************************
# PLOTS. look for eplot.jpg and fplot.jpg in local dir?
if DO_PLOT:
xLabel = 'column (0 is first)'
eLabel = 'exec cut time'
fLabel = 'quantile time'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel, server=True)
if __name__ == '__main__':
h2o.unit_main()
|
11593389
|
import base64
import io
import json
import os
import sys
from google.cloud import storage
from google.cloud import vision
from google.cloud.vision import types
from google.cloud import firestore
db = firestore.Client()
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods = ['POST'])
def handlePubSubMessage():
try:
print(request.json['message']['attributes']['eventType'], flush=True)
if request.json['message']['attributes']['eventType'] == 'OBJECT_FINALIZE':
file = decodeBase64Json(request.json['message']['data'])
tags = getImageTags(file['bucket'], file['name'])
writeToFirestore(file['name'], tags)
deleteFile(file['bucket'], file['name'])
except:
print(sys.exc_info()[0], flush=True)
return '', 204
def decodeBase64Json(data):
return json.loads(base64.b64decode(data))
def getImageTags(bucketName, fileName):
client = vision.ImageAnnotatorClient()
image_uri = "gs://" + bucketName + "/" + fileName
request = {
"image": {"source": {"image_uri": image_uri}},
"features": [
{
"type": vision.enums.Feature.Type.LABEL_DETECTION,
"max_results": 6
},
{
"type": vision.enums.Feature.Type.LANDMARK_DETECTION,
"max_results": 3
}
],
}
response = client.annotate_image(request)
return getTagsFromResponse(response)
def getTagsFromResponse(visionApiResp):
landmarks = [ x.description for x in visionApiResp.landmark_annotations ]
labels = [ x.description for x in visionApiResp.label_annotations ]
return landmarks + labels
def writeToFirestore(fileName, tags):
doc_ref = db.collection('photos').document(fileName)
doc_ref.set({'tags': tags})
def deleteFile(bucketName, fileName):
client = storage.Client()
bucket = client.bucket(bucketName)
file = bucket.blob(fileName)
file.delete()
if __name__ == "__main__":
app.run(
debug=True,
host='0.0.0.0',
port=int(os.environ.get('PORT', 8080))
)
|
11593394
|
import time
from artemis.plotting.live_plotting import LiveStream, LivePlot, LiveCanal
from artemis.plotting.matplotlib_backend import MovingImagePlot, MovingPointPlot, LinePlot, ImagePlot, HistogramPlot
from itertools import count
from six.moves import xrange
__author__ = 'peter'
import numpy as np
def test_streaming(duration = 10):
c = count()
stream = LiveStream(lambda: {
'text': ['Veni', 'Vidi', 'Vici'][next(c) % 3],
'images': {
'bw_image': np.random.randn(20, 20),
'col_image': np.random.randn(20, 20, 3),
'vector_of_bw_images': np.random.randn(11, 20, 20),
'vector_of_colour_images': np.random.randn(11, 20, 20, 3),
'matrix_of_bw_images': np.random.randn(5, 6, 20, 20),
'matrix_of_colour_images': np.random.randn(5, 6, 20, 20, 3),
},
'line': np.random.randn(20),
'lines': np.random.randn(20, 3),
'moving_point': np.random.randn(),
'moving_points': np.random.randn(3),
})
for i in xrange(duration):
if i==1:
start_time = time.time()
elif i>1:
print('Average Frame Rate: %.2f FPS' % (i/(time.time()-start_time), ))
stream.update()
def test_dynamic_rebuild():
def grab_data():
if i < 10:
data = {'bw_image': np.random.randn(20, 20)}
else:
data = {
'bw_image': np.random.randn(20, 20),
'lines': np.random.randn(2)
}
return data
duration = 20
stream = LiveStream(grab_data)
for i in xrange(duration):
if i==1:
start_time = time.time()
elif i>1:
print('Average Frame Rate: %.2f FPS' % (i/(time.time()-start_time), ))
stream.update()
def test_canaling(duration = 10):
n_dims = 4
# Don't be frightened by the double-lambda here - the point is just to get a callback
# that spits out the same data when called in sequence.
cb_constructor_1d = lambda: lambda rng = np.random.RandomState(0): rng.randn(n_dims)
cb_image_data = lambda: lambda rng = np.random.RandomState(1): rng.rand(20, 30)
cb_sinusoid_data = lambda: lambda c=count(): np.sin(next(c)/40.)
canal = LiveCanal({
'histo-mass': LivePlot(plot = HistogramPlot([-2.5, 0, 0.5, 1, 1.5, 2, 2.5], mode = 'mass'), cb = lambda: np.random.randn(np.random.randint(10))),
'histo-density': LivePlot(plot = HistogramPlot([-2.5, 0, 0.5, 1, 1.5, 2, 2.5], mode = 'density'), cb = lambda: np.random.randn(np.random.randint(10))),
'1d-default': cb_constructor_1d(),
'1d-image': LivePlot(plot = MovingImagePlot(buffer_len=20), cb = cb_constructor_1d()),
'1d-seizmic': LivePlot(plot = MovingPointPlot(), cb = cb_constructor_1d()),
'1d-line': LivePlot(plot = LinePlot(), cb = cb_constructor_1d()),
'image-autoscale': LivePlot(ImagePlot(), cb_image_data()),
'image-overexposed': LivePlot(ImagePlot(clims = (0, 0.2)), cb_image_data()),
'image-jet': LivePlot(ImagePlot(cmap='jet'), cb_image_data()),
'trace-default': cb_sinusoid_data(),
'trace-prescaled': LivePlot(MovingPointPlot(y_bounds=(-1, 1)), cb_sinusoid_data()),
})
for i in xrange(duration):
canal.update()
if __name__ == '__main__':
# set_test_mode(True)
test_dynamic_rebuild()
test_streaming(10)
test_canaling(10)
|
11593408
|
import pytest
from djangocities.utils.validation.html.tags import HTML_TAGS
from djangocities.utils.validation.html.exceptions import UnsupportedHtmlTag
class TestHtmlTagClass:
def test_doctype_is_banned(self):
doctype = HTML_TAGS["!doctype"]
with pytest.raises(UnsupportedHtmlTag):
doctype.validate(1)
|
11593410
|
from flask import Blueprint, g, render_template, request
from gitmostwanted.app import app, db
from gitmostwanted.models.user import UserAttitude
from gitmostwanted.models.repo import Repo
repo_rating = Blueprint('repo_rating', __name__)
@repo_rating.route('/', defaults={'page': 1, 'sort_by': 'wanted', 'filter_worth_by': 'solid'})
@repo_rating.route('/top/<sort_by>/', defaults={'page': 1, 'filter_worth_by': 'solid'})
@repo_rating.route('/top/<sort_by>/<filter_worth_by>/<int:page>', defaults={'page': 1})
@repo_rating.route('/top/<sort_by>/<filter_worth_by>/<int:page>')
def top(page, sort_by, filter_worth_by):
query = Repo.filter_by_args(Repo.query, request.args).filter(Repo.mature.is_(True))
sorts = {
'wanted': [Repo.worth.desc(), Repo.stargazers_count.desc()],
'stars': [Repo.stargazers_count.desc()]
}
if filter_worth_by not in ('rising', 'solid'):
filter_worth_by = 'solid'
query = query.filter(
(Repo.worth >= app.config['REPOSITORY_WORTH_SOLID']) if filter_worth_by == 'solid' else (
(Repo.worth > app.config['REPOSITORY_WORTH_DEFAULT']) &
(Repo.worth < app.config['REPOSITORY_WORTH_SOLID'])
)
)
for f in [sort for sort in (sorts['wanted'] if sort_by not in sorts else sorts[sort_by])]:
query = query.order_by(f)
if not g.user:
query = query.add_columns(db.null())
else:
query = UserAttitude.join_by_user_and_repo(query, g.user.id, Repo.id)\
.add_columns(UserAttitude.attitude)
entries = query.paginate(page if page > 0 else 1, per_page=20, error_out=False)
if entries.pages and entries.pages < entries.page:
return top(entries.pages, sort_by, filter_worth_by)
return render_template(
'repository/top.html', languages=Repo.language_distinct(),
repos=entries, page=page, sort_by=sort_by, filter_worth_by=filter_worth_by
)
|
11593434
|
import logging
from pprint import pformat
from pymongo import MongoClient
from pymongo.errors import OperationFailure
from pymongo import monitoring
from logger import get_logger
from config import get_config
config = get_config()
logger = get_logger(__name__, log_level=("MONGODB", "LOGLEVEL"))
class CommandLogger(monitoring.CommandListener):
def started(self, event):
dbname = event.database_name
request_id = event.request_id
if event.command_name == "find":
collection = event.command["find"]
filter_ = event.command.get("filter", {})
projection = event.command.get("projection", {})
if event.command.get("singleBatch", False):
command = "find_one"
else:
command = "find"
logger.debug(f"[{request_id}] {dbname}.{collection}.{command}({filter_}, {projection})")
def succeeded(self, event):
if event.command_name == "find":
result = pformat(event.reply["cursor"]["firstBatch"])
logger.debug(f"[{event.request_id}] Result:\n{result}")
def failed(self, event):
logger.error("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"failed in {0.duration_micros} "
"microseconds".format(event))
monitoring.register(CommandLogger())
client = MongoClient(
host=config.get("MONGODB", "HOST"),
port=config.getint("MONGODB", "PORT"),
serverSelectionTimeoutMS=5000,
socketTimeoutMS=5000,
)
class Database(object):
def __init__(self, name):
self.name = name
self.auth = (config.get("MONGODB", "USERNAME"),
config.get("MONGODB", "PASSWORD"))
self._database = None
def _get_database(self):
if self._database is not None:
return self._database
db = client.get_database(self.name)
logger.debug("Authenticating database: %s", self.name)
res = db.authenticate(*self.auth)
logger.debug("Database authenticated: %s, output: %s", self.name, res)
self._database = db
return db
def __getitem__(self, collection):
return self._get_database().get_collection(collection)
def __getattribute__(self, attribute):
try:
return super().__getattribute__(attribute)
except AttributeError:
return getattr(self._get_database(), attribute)
configdb = Database(config.get("ANALYZER", "CONFIGDB_NAME"))
metricdb = Database(config.get("ANALYZER", "METRICDB_NAME"))
resultdb = Database(config.get("ANALYZER", "RESULTDB_NAME"))
|
11593444
|
import unittest
from jnpr.jsnapy.operator import Operator
from jnpr.jsnapy.notify import Notification
import os
import yaml
from jnpr.jsnapy.check import Comparator
from mock import patch, MagicMock
from nose.plugins.attrib import attr
@attr('unit')
class TestCheck(unittest.TestCase):
def setUp(self):
self.hostname = '10.209.12.121'
@patch('smtplib.SMTP.sendmail')
@patch('logging.Logger.error')
@patch('logging.Logger.debug')
@patch('smtplib.SMTP.quit')
@patch('smtplib.SMTP.login')
@patch('smtplib.SMTP.starttls')
@patch('smtplib.SMTP.ehlo')
@patch('smtplib.SMTP.connect')
@patch('socket.getfqdn')
def test_notify_send_mail(self, mock_fqdn, mock_connect, mock_ehlo, mock_starttls, mock_login, mock_quit, mock_log, mock_error, mock_send):
res = Operator()
mock_send.side_effect = Exception('not able to send mail')
mock_fqdn.return_value = '1.1.1.1'
mock_connect.return_value = (220, "ok")
res.result = 'Passed'
mfile = os.path.join(os.path.dirname(__file__), 'configs', 'mail.yml')
mail_file = open(mfile, 'r')
mail_file = yaml.load(mail_file, Loader=yaml.FullLoader) #smtplib.SMTP#connectsocket.getfqdn, Loader=yaml.FullLoader
passwd = mail_file['passwd']
notf = Notification()
notf.notify(mail_file, self.hostname, passwd, res)
mock_log.assert_called()
mock_error.assert_called_with('\x1b[31mERROR!! in sending mail: not able to send mail', extra={'hostname': '10.209.12.121', 'hostame': None})
mock_quit.assert_called()
@patch('logging.Logger.error')
@patch('logging.Logger.debug')
@patch('smtplib.SMTP.login')
@patch('smtplib.SMTP.starttls')
@patch('smtplib.SMTP.ehlo')
@patch('smtplib.SMTP.connect')
@patch('socket.getfqdn')
def test_notify_not_login(self, mock_fqdn, mock_connect, mock_ehlo, mock_starttls, mock_login, mock_log, mock_error):
res = Operator()
mock_fqdn.return_value = '1.1.1.1'
mock_connect.return_value = (220, "ok")
mock_login.side_effect = Exception('could not login')
res.result = 'Passed'
mfile = os.path.join(os.path.dirname(__file__), 'configs', 'mail.yml')
mail_file = open(mfile, 'r')
mail_file = yaml.load(mail_file, Loader=yaml.FullLoader) #smtplib.SMTP#connectsocket.getfqdn, Loader=yaml.FullLoader
passwd = mail_file['passwd']
notf = Notification()
notf.notify(mail_file, self.hostname, passwd, res)
mock_log.assert_called()
mock_error.assert_called_with('\x1b[31mERROR occurred: could not login', extra={'hostname': '10.209.12.121', 'hostame': None})
|
11593447
|
import numpy as np
__all__ = ['crossover_1point', 'crossover_2point', 'crossover_2point_bit', 'crossover_pmx', 'crossover_2point_prob']
def crossover_1point(self):
Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom
for i in range(0, size_pop, 2):
n = np.random.randint(0, self.len_chrom)
# crossover at the point n
seg1, seg2 = self.Chrom[i, n:].copy(), self.Chrom[i + 1, n:].copy()
self.Chrom[i, n:], self.Chrom[i + 1, n:] = seg2, seg1
return self.Chrom
def crossover_2point(self):
Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom
for i in range(0, size_pop, 2):
n1, n2 = np.random.randint(0, self.len_chrom, 2)
if n1 > n2:
n1, n2 = n2, n1
# crossover at the points n1 to n2
seg1, seg2 = self.Chrom[i, n1:n2].copy(), self.Chrom[i + 1, n1:n2].copy()
self.Chrom[i, n1:n2], self.Chrom[i + 1, n1:n2] = seg2, seg1
return self.Chrom
def crossover_2point_bit(self):
'''
3 times faster than `crossover_2point`, but only use for 0/1 type of Chrom
:param self:
:return:
'''
Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom
half_size_pop = int(size_pop / 2)
Chrom1, Chrom2 = Chrom[:half_size_pop], Chrom[half_size_pop:]
mask = np.zeros(shape=(half_size_pop, len_chrom), dtype=int)
for i in range(half_size_pop):
n1, n2 = np.random.randint(0, self.len_chrom, 2)
if n1 > n2:
n1, n2 = n2, n1
mask[i, n1:n2] = 1
mask2 = (Chrom1 ^ Chrom2) & mask
Chrom1 ^= mask2
Chrom2 ^= mask2
return self.Chrom
def crossover_2point_prob(self, crossover_prob):
'''
2 points crossover with probability
'''
Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom
for i in range(0, size_pop, 2):
if np.random.rand() < crossover_prob:
n1, n2 = np.random.randint(0, self.len_chrom, 2)
if n1 > n2:
n1, n2 = n2, n1
seg1, seg2 = self.Chrom[i, n1:n2].copy(), self.Chrom[i + 1, n1:n2].copy()
self.Chrom[i, n1:n2], self.Chrom[i + 1, n1:n2] = seg2, seg1
return self.Chrom
# def crossover_rv_3(self):
# Chrom, size_pop = self.Chrom, self.size_pop
# i = np.random.randint(1, self.len_chrom) # crossover at the point i
# Chrom1 = np.concatenate([Chrom[::2, :i], Chrom[1::2, i:]], axis=1)
# Chrom2 = np.concatenate([Chrom[1::2, :i], Chrom[0::2, i:]], axis=1)
# self.Chrom = np.concatenate([Chrom1, Chrom2], axis=0)
# return self.Chrom
def crossover_pmx(self):
'''
Executes a partially matched crossover (PMX) on Chrom.
For more details see [Goldberg1985]_.
:param self:
:return:
.. [Goldberg1985] Goldberg and Lingel, "Alleles, loci, and the traveling
salesman problem", 1985.
'''
Chrom, size_pop, len_chrom = self.Chrom, self.size_pop, self.len_chrom
for i in range(0, size_pop, 2):
Chrom1, Chrom2 = self.Chrom[i], self.Chrom[i + 1]
cxpoint1, cxpoint2 = np.random.randint(0, self.len_chrom - 1, 2)
if cxpoint1 >= cxpoint2:
cxpoint1, cxpoint2 = cxpoint2, cxpoint1 + 1
# crossover at the point cxpoint1 to cxpoint2
pos1_recorder = {value: idx for idx, value in enumerate(Chrom1)}
pos2_recorder = {value: idx for idx, value in enumerate(Chrom2)}
for j in range(cxpoint1, cxpoint2):
value1, value2 = Chrom1[j], Chrom2[j]
pos1, pos2 = pos1_recorder[value2], pos2_recorder[value1]
Chrom1[j], Chrom1[pos1] = Chrom1[pos1], Chrom1[j]
Chrom2[j], Chrom2[pos2] = Chrom2[pos2], Chrom2[j]
pos1_recorder[value1], pos1_recorder[value2] = pos1, j
pos2_recorder[value1], pos2_recorder[value2] = j, pos2
self.Chrom[i], self.Chrom[i + 1] = Chrom1, Chrom2
return self.Chrom
|
11593461
|
from invoke import task, run
@task
def release(version):
"""
Version should be a string like '0.4' or '1.0'
"""
run('git tag -s "{}"'.format(version))
run('python setup.py sdist bdist_wheel')
run('twine upload -s dist/alchimia-{}*'.format(version))
|
11593544
|
import os
import re
import urllib
import uuid
from datetime import datetime
from cgi import FieldStorage
from ofs import get_impl
from pylons import request, response
from pylons.controllers.util import abort, redirect_to
from pylons import config
from paste.fileapp import FileApp
from paste.deploy.converters import asbool
from ckan.lib.base import BaseController, c, request, render, config, h, abort
from ckan.lib.jsonp import jsonpify
import ckan.model as model
import ckan.logic as logic
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import json
except:
import simplejson as json
from logging import getLogger
log = getLogger(__name__)
BUCKET = config.get('ckan.storage.bucket', 'default')
key_prefix = config.get('ckan.storage.key_prefix', 'file/')
_eq_re = re.compile(r"^(.*)(=[0-9]*)$")
def fix_stupid_pylons_encoding(data):
"""
Fix an apparent encoding problem when calling request.body
TODO: Investigate whether this is fixed in later versions?
"""
if data.startswith("%") or data.startswith("+"):
data = urllib.unquote_plus(data)
m = _eq_re.match(data)
if m:
data = m.groups()[0]
return data
def create_pairtree_marker(folder):
""" Creates the pairtree marker for tests if it doesn't exist """
if not folder[:-1] == '/':
folder = folder + '/'
directory = os.path.dirname(folder)
if not os.path.exists(directory):
os.makedirs(directory)
target = os.path.join(directory, 'pairtree_version0_1')
if os.path.exists(target):
return
open(target, 'wb').close()
def get_ofs():
"""Return a configured instance of the appropriate OFS driver.
"""
storage_backend = config['ofs.impl']
kw = {}
for k, v in config.items():
if not k.startswith('ofs.') or k == 'ofs.impl':
continue
kw[k[4:]] = v
# Make sure we have created the marker file to avoid pairtree issues
if storage_backend == 'pairtree' and 'storage_dir' in kw:
create_pairtree_marker(kw['storage_dir'])
ofs = get_impl(storage_backend)(**kw)
return ofs
def authorize(method, bucket, key, user, ofs):
"""
Check authz for the user with a given bucket/key combo within a
particular ofs implementation.
"""
if not method in ['POST', 'GET', 'PUT', 'DELETE']:
abort(400)
if method != 'GET':
# do not allow overwriting
if ofs.exists(bucket, key):
abort(409)
# now check user stuff
context = {'user': c.user,
'model': model}
try:
logic.check_access('file_upload', context, {})
except logic.NotAuthorized:
h.flash_error('Not authorized to upload files.')
abort(401)
class StorageController(BaseController):
'''Upload to storage backend.
'''
_ofs_impl = None
@property
def ofs(self):
if not StorageController._ofs_impl:
StorageController._ofs_impl = get_ofs()
return StorageController._ofs_impl
def upload(self):
label = key_prefix + request.params.get('filepath', str(uuid.uuid4()))
c.data = {
'action': h.url_for('storage_upload_handle', qualified=False),
'fields': [
{
'name': 'key',
'value': label
}
]
}
return render('storage/index.html')
def upload_handle(self):
bucket_id = BUCKET
params = dict(request.params.items())
stream = params.get('file')
label = params.get('key')
authorize('POST', BUCKET, label, c.userobj, self.ofs)
if not label:
abort(400, "No label")
if not isinstance(stream, FieldStorage):
abort(400, "No file stream.")
del params['file']
params['filename-original'] = stream.filename
#params['_owner'] = c.userobj.name if c.userobj else ""
params['uploaded-by'] = c.userobj.name if c.userobj else ""
self.ofs.put_stream(bucket_id, label, stream.file, params)
success_action_redirect = h.url_for(
'storage_upload_success', qualified=True,
bucket=BUCKET, label=label)
# Do not redirect here as it breaks js file uploads (get infinite loop
# in FF and crash in Chrome)
return self.success(label)
def success(self, label=None):
label = request.params.get('label', label)
h.flash_success('Upload successful')
c.file_url = h.url_for('storage_file',
label=label,
qualified=True)
c.upload_url = h.url_for('storage_upload')
return render('storage/success.html')
def success_empty(self, label=None):
# very simple method that just returns 200 OK
return ''
def file(self, label):
exists = self.ofs.exists(BUCKET, label)
if not exists:
# handle erroneous trailing slash by redirecting to url w/o slash
if label.endswith('/'):
label = label[:-1]
# This may be best being cached_url until we have moved it into
# permanent storage
file_url = h.url_for('storage_file', label=label)
h.redirect_to(file_url)
else:
abort(404)
file_url = self.ofs.get_url(BUCKET, label)
if file_url.startswith("file://"):
metadata = self.ofs.get_metadata(BUCKET, label)
filepath = file_url[len("file://"):]
headers = {
# 'Content-Disposition':'attachment; filename="%s"' % label,
'Content-Type': metadata.get('_format', 'text/plain')}
fapp = FileApp(filepath, headers=None, **headers)
return fapp(request.environ, self.start_response)
else:
h.redirect_to(file_url.encode('ascii', 'ignore'))
class StorageAPIController(BaseController):
_ofs_impl = None
@property
def ofs(self):
if not StorageAPIController._ofs_impl:
StorageAPIController._ofs_impl = get_ofs()
return StorageAPIController._ofs_impl
@jsonpify
def index(self):
info = {
'metadata/{label}': {
'description': 'Get or set metadata for this '
'item in storage', },
'auth/request/{label}': {
'description': self.auth_request.__doc__, },
'auth/form/{label}': {
'description': self.auth_form.__doc__, }}
return info
def set_metadata(self, label):
bucket = BUCKET
if not label.startswith("/"):
label = "/" + label
try:
data = fix_stupid_pylons_encoding(request.body)
if data:
metadata = json.loads(data)
else:
metadata = {}
except:
abort(400)
try:
b = self.ofs._require_bucket(bucket)
except:
abort(409)
k = self.ofs._get_key(b, label)
if k is None:
k = b.new_key(label)
metadata = metadata.copy()
metadata["_creation_time"] = str(datetime.utcnow())
self.ofs._update_key_metadata(k, metadata)
k.set_contents_from_file(StringIO(''))
elif request.method == "PUT":
old = self.ofs.get_metadata(bucket, label)
to_delete = []
for ok in old.keys():
if ok not in metadata:
to_delete.append(ok)
if to_delete:
self.ofs.del_metadata_keys(bucket, label, to_delete)
self.ofs.update_metadata(bucket, label, metadata)
else:
self.ofs.update_metadata(bucket, label, metadata)
k.make_public()
k.close()
return self.get_metadata(bucket, label)
@jsonpify
def get_metadata(self, label):
bucket = BUCKET
storage_backend = config['ofs.impl']
if storage_backend in ['google', 's3']:
if not label.startswith("/"):
label = "/" + label
url = "https://%s%s" % (
self.ofs.conn.calling_format.build_host(
self.ofs.conn.server_name(), bucket), label)
else:
url = h.url_for('storage_file',
label=label,
qualified=False
)
if url.startswith('/'):
url = config.get('ckan.site_url', '').rstrip('/') + url
if not self.ofs.exists(bucket, label):
abort(404)
metadata = self.ofs.get_metadata(bucket, label)
metadata["_location"] = url
return metadata
@jsonpify
def auth_request(self, label):
'''Provide authentication information for a request so a client can
interact with backend storage directly.
:param label: label.
:param kwargs: sent either via query string for GET or json-encoded
dict for POST). Interpreted as http headers for request plus an
(optional) method parameter (being the HTTP method).
Examples of headers are:
Content-Type
Content-Encoding (optional)
Content-Length
Content-MD5
Expect (should be '100-Continue')
:return: is a json hash containing various attributes including a
headers dictionary containing an Authorization field which is good for
15m.
'''
bucket = BUCKET
if request.POST:
try:
data = fix_stupid_pylons_encoding(request.body)
headers = json.loads(data)
except Exception:
from traceback import print_exc
msg = StringIO()
print_exc(msg)
log.error(msg.seek(0).read())
abort(400)
else:
headers = dict(request.params)
if 'method' in headers:
method = headers['method']
del headers['method']
else:
method = 'POST'
authorize(method, bucket, label, c.userobj, self.ofs)
http_request = self.ofs.authenticate_request(method, bucket, label,
headers)
return {
'host': http_request.host,
'method': http_request.method,
'path': http_request.path,
'headers': http_request.headers}
def _get_remote_form_data(self, label):
method = 'POST'
content_length_range = \
int(config.get('ckan.storage.max_content_length', 50000000))
acl = 'public-read'
fields = [{
'name': self.ofs.conn.provider.metadata_prefix + 'uploaded-by',
'value': c.userobj.id}]
conditions = ['{"%s": "%s"}' % (x['name'], x['value']) for x in
fields]
# In FF redirect to this breaks js upload as FF attempts to open file
# (presumably because mimetype = javascript) and this stops js
# success_action_redirect = h.url_for('storage_api_get_metadata',
# qualified=True, label=label)
success_action_redirect = h.url_for('storage_upload_success_empty',
qualified=True,
label=label)
data = self.ofs.conn.build_post_form_args(
BUCKET,
label,
expires_in=72000,
max_content_length=content_length_range,
success_action_redirect=success_action_redirect,
acl=acl,
fields=fields,
conditions=conditions
)
# HACK: fix up some broken stuff from boto
# e.g. should not have content-length-range in list of fields!
storage_backend = config['ofs.impl']
for idx, field in enumerate(data['fields']):
if storage_backend == 'google':
if field['name'] == 'AWSAccessKeyId':
field['name'] = 'GoogleAccessId'
if field['name'] == 'content-length-range':
del data['fields'][idx]
return data
def _get_form_data(self, label):
storage_backend = config['ofs.impl']
if storage_backend in ['google', 's3']:
return self._get_remote_form_data(label)
else:
data = {
'action': h.url_for('storage_upload_handle', qualified=False),
'fields': [
{
'name': 'key',
'value': label
}
]
}
return data
@jsonpify
def auth_form(self, label):
'''Provide fields for a form upload to storage including
authentication.
:param label: label.
:return: json-encoded dictionary with action parameter and fields list.
'''
bucket = BUCKET
if request.POST:
try:
data = fix_stupid_pylons_encoding(request.body)
headers = json.loads(data)
except Exception:
from traceback import print_exc
msg = StringIO()
print_exc(msg)
log.error(msg.seek(0).read())
abort(400)
else:
headers = dict(request.params)
method = 'POST'
authorize(method, bucket, label, c.userobj, self.ofs)
data = self._get_form_data(label)
return data
|
11593566
|
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.dates as mdates
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.patheffects as pe
from obspy.geodetics import gps2dist_azimuth
from .stack import get_peak_coordinates
import utm
from datetime import datetime
from . import RTMWarning
def plot_time_slice(S, processed_st, time_slice=None, label_stations=True,
hires=False, dem=None, plot_peak=True, xy_grid=None,
cont_int=5, annot_int=50):
"""
Plot a time slice through :math:`S` to produce a map-view plot. If time is
not specified, then the slice corresponds to the maximum of :math:`S` in
the time direction. Can also plot the peak of the stack function over
time.
Args:
S (:class:`~xarray.DataArray`): The stack function :math:`S`
processed_st (:class:`~obspy.core.stream.Stream`): Pre-processed
Stream; output of :func:`~rtm.waveform.process_waveforms` (This is
needed because Trace metadata from this Stream are used to plot
stations on the map)
time_slice (:class:`~obspy.core.utcdatetime.UTCDateTime`): Time of
desired time slice. The nearest time in :math:`S` to this specified
time will be plotted. If `None`, the time corresponding to
:math:`\max(S)` is used (default: `None`)
label_stations (bool): Toggle labeling stations with network and
station codes (default: `True`)
hires (bool): If `True`, use higher-resolution coastlines, which looks better
but can be slow (default: `False`)
dem (:class:`~xarray.DataArray`): Overlay time slice on a user-supplied
DEM from :class:`~rtm.grid.produce_dem` (default: `None`)
plot_peak (bool): Plot the peak stack function over time as a subplot
(default: `True`)
xy_grid (int, float, or None): If not `None`, transforms UTM
coordinates such that the grid center is at (0, 0) — the plot
extent is then given by (-xy_grid, xy_grid) [meters] for easting
and northing. Only valid for projected grids
cont_int (int): Contour interval [m] for plots with DEM data
annot_int (int): Annotated contour interval [m] for plots with DEM data
(these contours are thicker and labeled)
Returns:
:class:`~matplotlib.figure.Figure`: Output figure
"""
# Don't plot peak of stack function when length of stack is one
if plot_peak and len(S.time) == 1:
plot_peak = False
warnings.warn('Stack time length = 1, not plotting peak', RTMWarning)
st = processed_st.copy()
# Get coordinates of stack maximum in (latitude, longitude)
time_max, y_max, x_max, peaks, props = get_peak_coordinates(S, unproject=S.UTM)
# Gather coordinates of grid center
lon_0, lat_0 = S.grid_center
if S.UTM:
# Don't use cartopy for UTM
proj = None
transform = None
plot_transform = None
lon_0, lat_0, _, _ = utm.from_latlon(S.grid_center[1], S.grid_center[0])
x_max, y_max, _, _ = utm.from_latlon(y_max, x_max)
for tr in st:
tr.stats.longitude, tr.stats.latitude, _, _ = utm.from_latlon(
tr.stats.latitude, tr.stats.longitude)
else:
# This is a good projection to use since it preserves area
proj = ccrs.AlbersEqualArea(central_longitude=lon_0,
central_latitude=lat_0,
standard_parallels=(S.y.values.min(),
S.y.values.max()))
transform = ccrs.PlateCarree()
plot_transform = ccrs.PlateCarree()
if plot_peak:
fig, (ax, ax1) = plt.subplots(figsize=(8, 12), nrows=2,
gridspec_kw={'height_ratios': [3, 1]},
subplot_kw=dict(projection=proj))
#axes kluge so the second one can have a different projection
ax1.remove()
ax1 = fig.add_subplot(414)
else:
fig, ax = plt.subplots(figsize=(8, 8),
subplot_kw=dict(projection=proj))
# In either case, we convert from UTCDateTime to np.datetime64
if time_slice:
time_to_plot = np.datetime64(time_slice)
else:
time_to_plot = np.datetime64(time_max)
slice = S.sel(time=time_to_plot, method='nearest')
# Convert UTM grid/etc to x/y coordinates with (0,0) as origin
if xy_grid:
# Make sure this is a projected grid
if not S.UTM:
raise ValueError('xy_grid can only be used with projected grids!')
print(f'Converting to x/y grid, cropping {xy_grid:d} m from center')
# Update dataarrays to x/y coordinates from dem
x0 = slice.x.data.min() + slice.x_radius
y0 = slice.y.data.min() + slice.y_radius
slice = slice.assign_coords(x=(slice.x.data - x0))
slice = slice.assign_coords(y=(slice.y.data - y0))
# In case DEM has different extent than slice
if dem is not None:
x0_dem = dem.x.data.min() + dem.x_radius
y0_dem = dem.y.data.min() + dem.y_radius
dem = dem.assign_coords(x=(dem.x.data - x0_dem))
dem = dem.assign_coords(y=(dem.y.data - y0_dem))
lon_0 = lon_0 - x0
lat_0 = lat_0 - y0
x_max = x_max - x0
y_max = y_max - y0
for tr in st:
tr.stats.longitude = tr.stats.longitude - x0
tr.stats.latitude = tr.stats.latitude - y0
if dem is None:
if not S.UTM:
_plot_geographic_context(ax=ax, hires=hires)
alpha = 0.5
else:
alpha = 1 # Can plot slice as opaque for UTM plots w/o DEM, since nothing beneath slice
slice_plot_kwargs = dict(ax=ax, alpha=alpha, cmap='viridis',
add_colorbar=False, add_labels=False)
else:
# Rounding to nearest cont_int
all_levels = np.arange(np.ceil(dem.min().data / cont_int),
np.floor(dem.max().data / cont_int) + 1) * cont_int
# Rounding to nearest annot_int
annot_levels = np.arange(np.ceil(dem.min().data / annot_int),
np.floor(dem.max().data / annot_int) + 1) * annot_int
# Ensure we don't draw annotated levels twice
cont_levels = []
for level in all_levels:
if level not in annot_levels:
cont_levels.append(level)
dem.plot.contour(ax=ax, colors='k', levels=cont_levels, zorder=-1,
linewidths=0.3)
# Use thicker lines for annotated contours
cs = dem.plot.contour(ax=ax, colors='k', levels=annot_levels,
zorder=-1, linewidths=0.7)
ax.clabel(cs, fontsize=9, fmt='%d', inline=True) # Actually annotate
slice_plot_kwargs = dict(ax=ax, alpha=0.7, cmap='viridis',
add_colorbar=False, add_labels=False)
# Mask areas outside of DEM extent
# Select subset of DEM that slice occupies
dem_slice = dem.sel(x=slice.x, y=slice.y, method='nearest')
slice.data[np.isnan(dem_slice.data)] = np.nan
if S.UTM:
# imshow works well here (no gridlines in translucent plot)
sm = slice.plot.imshow(zorder=0, **slice_plot_kwargs)
plot_transform = ax.transData
# Label axes according to choice of xy_grid or not
if xy_grid:
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
else:
ax.set_xlabel('UTM easting [m]')
ax.set_ylabel('UTM northing [m]')
ax.ticklabel_format(style='plain', useOffset=False)
else:
# imshow performs poorly for Albers equal-area projection - use
# pcolormesh instead (gridlines will show in translucent plot)
sm = slice.plot.pcolormesh(transform=transform, **slice_plot_kwargs)
# Initialize list of handles for legend
h = [None, None, None]
scatter_zorder = 5
# Plot center of grid
h[0] = ax.scatter(lon_0, lat_0, s=50, color='limegreen', edgecolor='black',
label='Grid center', transform=plot_transform,
zorder=scatter_zorder)
# Plot stack maximum
if S.UTM:
# x/y formatting
label = 'Stack max'
else:
# Lat/lon formatting
label = f'Stack max\n({y_max:.4f}, {x_max:.4f})'
h[1] = ax.scatter(x_max, y_max, s=100, color='red', marker='*',
edgecolor='black', label=label,
transform=plot_transform, zorder=scatter_zorder)
# Plot stations
for tr in st:
h[2] = ax.scatter(tr.stats.longitude, tr.stats.latitude, marker='v',
color='orange', edgecolor='black',
label='Station', transform=plot_transform,
zorder=scatter_zorder)
if label_stations:
ax.text(tr.stats.longitude, tr.stats.latitude,
' {}.{}'.format(tr.stats.network, tr.stats.station),
verticalalignment='center_baseline',
horizontalalignment='left', fontsize=10, color='white',
transform=plot_transform, zorder=scatter_zorder,
path_effects=[pe.Stroke(linewidth=2, foreground='black'),
pe.Normal()],
clip_on=True)
ax.legend(h, [handle.get_label() for handle in h], loc='best',
framealpha=1, borderpad=.3, handletextpad=.3)
time_round = np.datetime64(slice.time.values + np.timedelta64(500, 'ms'),
's').astype(datetime) # Nearest second
title = 'Time: {}'.format(time_round)
if hasattr(S, 'celerity'):
title += f'\nCelerity: {S.celerity:g} m/s'
# Label global maximum if applicable
if slice.time.values == time_max:
title = 'GLOBAL MAXIMUM\n\n' + title
ax.set_title(title, pad=20)
# Show x- and y-axes w/ same scale if this is a Cartesian plot
if S.UTM:
ax.set_aspect('equal')
# Crop plot to show just the slice area
if xy_grid:
ax.set_xlim(-xy_grid, xy_grid)
ax.set_ylim(-xy_grid, xy_grid)
ax_pos = ax.get_position()
cloc = [ax_pos.x1+.02, ax_pos.y0, .02, ax_pos.height]
cbaxes = fig.add_axes(cloc)
cbar = fig.colorbar(sm, cax=cbaxes, label='Stack amplitude')
cbar.solids.set_alpha(1)
if plot_peak:
plot_stack_peak(S, plot_max=True, ax=ax1)
fig.show()
return fig
def plot_record_section(st, origin_time, source_location, plot_celerity=None,
label_waveforms=True):
"""
Plot a record section based upon user-provided source location and origin
time. Optionally plot celerity for reference, with two plotting options.
Args:
st (:class:`~obspy.core.stream.Stream`): Any Stream object with
`tr.stats.latitude`, `tr.stats.longitude` attached
origin_time (:class:`~obspy.core.utcdatetime.UTCDateTime`): Origin time
for record section
source_location (tuple): Tuple of (`lat`, `lon`) specifying source
location
plot_celerity: Can be either `'range'` or a single celerity or a list
of celerities. If `'range'`, plots a continuous swatch of
celerities from 260-380 m/s. Otherwise, plots specific celerities.
If `None`, does not plot any celerities (default: `None`)
label_waveforms (bool): Toggle labeling waveforms with network and
station codes (default: `True`)
Returns:
:class:`~matplotlib.figure.Figure`: Output figure
"""
st_edit = st.copy()
for tr in st_edit:
tr.stats.distance, _, _ = gps2dist_azimuth(*source_location,
tr.stats.latitude,
tr.stats.longitude)
st_edit.trim(origin_time)
fig = plt.figure(figsize=(12, 8))
st_edit.plot(fig=fig, type='section', orientation='horizontal',
fillcolors=('black', 'black'), linewidth=0)
ax = fig.axes[0]
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
if label_waveforms:
for tr in st_edit:
ax.text(1.01, tr.stats.distance / 1000,
f'{tr.stats.network}.{tr.stats.station}',
verticalalignment='center', transform=trans, fontsize=10)
pad = 0.1 # Move colorbar to the right to make room for labels
else:
pad = 0.05 # Matplotlib default for vertical colorbars
if plot_celerity:
# Check if user requested a continuous range of celerities
if plot_celerity == 'range':
inc = 0.5 # [m/s]
celerity_list = np.arange(220, 350 + inc, inc) # [m/s] Includes
# all reasonable
# celerities
zorder = -1
# Otherwise, they provided specific celerities
else:
# Type conversion
if type(plot_celerity) is not list:
plot_celerity = [plot_celerity]
celerity_list = plot_celerity
celerity_list.sort()
zorder = None
# Create colormap of appropriate length
cmap = plt.cm.get_cmap('rainbow', len(celerity_list))
colors = [cmap(i) for i in range(cmap.N)]
xlim = np.array(ax.get_xlim())
y_max = ax.get_ylim()[1] # Save this for re-scaling axis
for celerity, color in zip(celerity_list, colors):
ax.plot(xlim, xlim * celerity / 1000, label=f'{celerity:g}',
color=color, zorder=zorder)
ax.set_ylim(top=y_max) # Scale y-axis to pre-plotting extent
# If plotting a continuous range, add a colorbar
if plot_celerity == 'range':
mapper = plt.cm.ScalarMappable(cmap=cmap)
mapper.set_array(celerity_list)
cbar = fig.colorbar(mapper, label='Celerity (m/s)', pad=pad,
aspect=30)
cbar.ax.minorticks_on()
# If plotting discrete celerities, just add a legend
else:
ax.legend(title='Celerity (m/s)', loc='lower right', framealpha=1,
edgecolor='inherit')
ax.set_ylim(bottom=0) # Show all the way to zero offset
time_round = np.datetime64(origin_time + 0.5, 's').astype(datetime) # Nearest second
ax.set_xlabel('Time (s) from {}'.format(time_round))
ax.set_ylabel('Distance (km) from '
'({:.4f}, {:.4f})'.format(*source_location))
fig.tight_layout()
fig.show()
return fig
def plot_st(st, filt, equal_scale=False, remove_response=False,
label_waveforms=True):
"""
Plot Stream waveforms in a publication-quality figure. Multiple plotting
options, including filtering.
Args:
st (:class:`~obspy.core.stream.Stream`): Any Stream object
filt (list): A two-element list of lower and upper corner frequencies
for filtering. Specify `None` if no filtering is desired.
equal_scale (bool): Set equal scale for all waveforms (default:
`False`)
remove_response (bool): Remove response by applying sensitivity
label_waveforms (bool): Toggle labeling waveforms with network and
station codes (default: `True`)
Returns:
:class:`~matplotlib.figure.Figure`: Output figure
"""
st_plot = st.copy()
ntra = len(st)
tvec = st_plot[0].times('matplotlib')
if remove_response:
print('Applying sensitivity')
st_plot.remove_sensitivity()
if filt:
print('Filtering between %.1f-%.1f Hz' % (filt[0], filt[1]))
st_plot.detrend(type='linear')
st_plot.taper(max_percentage=.01)
st_plot.filter("bandpass", freqmin=filt[0], freqmax=filt[1], corners=2,
zerophase=True)
if equal_scale:
ym = np.max(st_plot.max())
fig, ax = plt.subplots(figsize=(8, 6), nrows=ntra, sharex=True)
for i, tr in enumerate(st_plot):
ax[i].plot(tvec, tr.data, 'k-')
ax[i].set_xlim(tvec[0], tvec[-1])
if equal_scale:
ax[i].set_ylim(-ym, ym)
else:
ax[i].set_ylim(-tr.data.max(), tr.data.max())
plt.locator_params(axis='y', nbins=4)
ax[i].tick_params(axis='y', labelsize=8)
ax[i].ticklabel_format(useOffset=False, style='plain')
if tr.stats.channel[1] == 'D':
ax[i].set_ylabel('Pressure [Pa]', fontsize=8)
else:
ax[i].set_ylabel('Velocity [m/s]', fontsize=8)
if label_waveforms:
ax[i].text(.85, .9,
f'{tr.stats.network}.{tr.stats.station}.{tr.stats.channel}',
verticalalignment='center', transform=ax[i].transAxes)
# Tick locating and formatting
locator = mdates.AutoDateLocator()
ax[-1].xaxis.set_major_locator(locator)
ax[-1].xaxis.set_major_formatter(_UTCDateFormatter(locator))
fig.autofmt_xdate()
fig.tight_layout()
plt.subplots_adjust(hspace=.12)
fig.show()
return fig
def plot_stack_peak(S, plot_max=False, ax=None):
"""
Plot the stack function (at the spatial stack max) as a function of time.
Args:
S: :class:`~xarray.DataArray` containing the stack function :math:`S`
plot_max (bool): Plot maximum value with red circle (default: `False`)
ax (:class:`~matplotlib.axes.Axes`): Pre-existing axes to plot into
Returns:
:class:`~matplotlib.figure.Figure`: Output figure
"""
s_peak = S.max(axis=(1, 2)).data
if not ax:
fig, ax = plt.subplots(figsize=(8, 4))
else:
fig = ax.get_figure() # Get figure to which provided axis belongs
ax.plot(S.time, s_peak, 'k-')
if plot_max:
stack_maximum = S.where(S == S.max(), drop=True).squeeze()
marker_kwargs = dict(marker='*', color='red', edgecolor='black', s=150,
zorder=5, clip_on=False)
if stack_maximum.size > 1:
max_indices = np.argwhere(~np.isnan(stack_maximum.data))
ax.scatter(stack_maximum[tuple(max_indices[0])].time.data,
stack_maximum[tuple(max_indices[0])].data,
**marker_kwargs)
warnings.warn(f'Multiple global maxima ({len(stack_maximum.data)}) '
'present in S!', RTMWarning)
else:
ax.scatter(stack_maximum.time.data, stack_maximum.data,
**marker_kwargs)
ax.set_xlim(S.time[0].data, S.time[-1].data)
ax.set_ylim(bottom=0) # Never can go below zero
ax.set_ylabel('Max stack amplitude')
# Tick locating and formatting
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(_UTCDateFormatter(locator))
plt.setp(ax.xaxis.get_majorticklabels(), rotation=30, ha='right')
return fig
def _plot_geographic_context(ax, hires=False):
"""
Plot geographic basemap information on a map axis. Plots simple coastlines for
unprojected plots.
Args:
ax (:class:`~cartopy.mpl.geoaxes.GeoAxes`): Existing axis to plot into
hires (bool): If `True`, use higher-resolution coastlines (default: `False`)
"""
# Since unprojected grids have regional/global extent, just show the
# coastlines and borders
if hires:
gshhs_scale = 'intermediate'
lake_scale = '10m'
else:
gshhs_scale = 'low'
lake_scale = '50m'
ax.add_feature(
cfeature.GSHHSFeature(scale=gshhs_scale),
facecolor=cfeature.COLORS['land'], zorder=0,
)
ax.background_patch.set_facecolor(cfeature.COLORS['water'])
ax.add_feature(
cfeature.LAKES.with_scale(lake_scale),
facecolor=cfeature.COLORS['water'],
edgecolor='black',
zorder=0,
)
# Add states and provinces borders
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='gray')
ax.add_feature(cfeature.BORDERS, edgecolor='gray')
# Add gridlines and labels
ax.gridlines(draw_labels=["x", "y", "left", "bottom"], linewidth=1,
color='gray', alpha=0.5, linestyle='--')
# Subclass ConciseDateFormatter (modifies __init__() and set_axis() methods)
class _UTCDateFormatter(mdates.ConciseDateFormatter):
def __init__(self, locator, tz=None):
super().__init__(locator, tz=tz, show_offset=True)
# Re-format datetimes
self.formats[5] = '%H:%M:%S.%f'
self.zero_formats = self.formats
self.offset_formats = [
'UTC time',
'UTC time in %Y',
'UTC time in %B %Y',
'UTC time on %Y-%m-%d',
'UTC time on %Y-%m-%d',
'UTC time on %Y-%m-%d',
]
def set_axis(self, axis):
self.axis = axis
# If this is an x-axis (usually is!) then center the offset text
if self.axis.axis_name == 'x':
offset = self.axis.get_offset_text()
offset.set_horizontalalignment('center')
offset.set_x(0.5)
|
11593627
|
import os
os.environ['APP_DEBUG'] = 'true'
import unittest
from tests.settings import SAMPLES_LIST_DIR, SAMPLES_DETAIL_DIR
from tests.test_base import TestBase
from gerapy_auto_extractor.classifiers.detail import is_detail, probability_of_detail
class TestClassifyDetail(TestBase):
def test_china_news1(self):
html = self.html('china_news1.html', file_dir=SAMPLES_DETAIL_DIR)
result = is_detail(html)
self.assertEqual(result, False)
def test_netease_news1(self):
html = self.html('netease_news1.html', file_dir=SAMPLES_DETAIL_DIR)
result = is_detail(html)
self.assertEqual(result, False)
def test_netease_news2(self):
html = self.html('netease_news1.html', file_dir=SAMPLES_DETAIL_DIR)
result = probability_of_detail(html)
self.assertGreater(result, 0.5)
if __name__ == '__main__':
unittest.main()
|
11593679
|
from abc import ABC
from abc import abstractmethod
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Union
import numpy as np
import pandas as pd
from etna.core.mixins import BaseMixin
if TYPE_CHECKING:
from etna.datasets import TSDataset
class BaseLogger(ABC, BaseMixin):
"""Abstract class for implementing loggers."""
def __init__(self):
"""Create logger instance."""
pass
@abstractmethod
def log(self, msg: Union[str, Dict[str, Any]], **kwargs):
"""
Log any event.
e.g. "Fitted segment segment_name"
Parameters
----------
msg:
Message or dict to log
kwargs:
Parameters for changing additional info in log message
"""
pass
@abstractmethod
def log_backtest_metrics(
self, ts: "TSDataset", metrics_df: pd.DataFrame, forecast_df: pd.DataFrame, fold_info_df: pd.DataFrame
):
"""
Write metrics to logger.
Parameters
----------
ts:
TSDataset to with backtest data
metrics_df:
Dataframe produced with TimeSeriesCrossValidation.get_metrics(aggregate_metrics=False)
forecast_df
Forecast from backtest
fold_info_df:
Fold information from backtest
"""
pass
def start_experiment(self, *args, **kwargs):
"""Start experiment(logger post init or reinit next experiment with the same name)."""
pass
def finish_experiment(self, *args, **kwargs):
"""Finish experiment."""
pass
def log_backtest_run(self, metrics: pd.DataFrame, forecast: pd.DataFrame, test: pd.DataFrame):
"""
Backtest metrics from one fold to logger.
Parameters
----------
metrics:
Dataframe with metrics from backtest fold
forecast:
Dataframe with forecast
test:
Dataframe with ground truth
"""
pass
class _Logger(BaseLogger):
"""Composite for loggers."""
def __init__(self):
"""Create instance for composite of loggers."""
super().__init__()
self.loggers = []
def add(self, logger: BaseLogger) -> int:
"""
Add new logger.
Parameters
----------
logger:
logger to be added
Returns
-------
result: int
identifier of added logger
"""
self.loggers.append(logger)
return len(self.loggers) - 1
def remove(self, idx: int):
"""
Remove logger by identifier.
Parameters
----------
idx:
identifier of added logger
"""
self.loggers.pop(idx)
def log(self, msg: Union[str, Dict[str, Any]], **kwargs):
"""Log any event."""
for logger in self.loggers:
logger.log(msg, **kwargs)
def log_backtest_metrics(
self, ts: "TSDataset", metrics_df: pd.DataFrame, forecast_df: pd.DataFrame, fold_info_df: pd.DataFrame
):
"""
Write metrics to logger.
Parameters
----------
ts:
TSDataset to with backtest data
metrics_df:
Dataframe produced with Pipeline._get_backtest_metrics()
forecast_df:
Forecast from backtest
fold_info_df:
Fold information from backtest
"""
for logger in self.loggers:
logger.log_backtest_metrics(ts, metrics_df, forecast_df, fold_info_df)
def log_backtest_run(self, metrics: pd.DataFrame, forecast: pd.DataFrame, test: pd.DataFrame):
"""
Backtest metrics from one fold to logger.
Parameters
----------
metrics:
Dataframe with metrics from backtest fold
forecast:
Dataframe with forecast
test:
Dataframe with ground truth
"""
for logger in self.loggers:
logger.log_backtest_run(metrics, forecast, test)
def start_experiment(self, *args, **kwargs):
"""Start experiment(logger post init or reinit next experiment with the same name)."""
for logger in self.loggers:
logger.start_experiment(*args, **kwargs)
def finish_experiment(self):
"""Finish experiment."""
for logger in self.loggers:
logger.finish_experiment()
@property
def pl_loggers(self):
"""Pytorch lightning loggers."""
return [logger.pl_logger for logger in self.loggers if "_pl_logger" in vars(logger)]
@contextmanager
def disable(self):
"""Context manager for local logging disabling."""
temp_loggers = self.loggers
self.loggers = []
yield
self.loggers = temp_loggers
def percentile(n: int):
"""Percentile for pandas agg."""
def percentile_(x):
return np.percentile(x.values, n)
percentile_.__name__ = "percentile_%s" % n
return percentile_
def aggregate_metrics_df(metrics_df: pd.DataFrame) -> Dict[str, float]:
"""Aggregate metrics in `log_backtest_metrics` method.
Parameters
----------
metrics_df:
Dataframe produced with Pipeline._get_backtest_metrics()
"""
# case for aggregate_metrics=False
if "fold_number" in metrics_df.columns:
metrics_dict = (
metrics_df.groupby("segment")
.mean()
.reset_index()
.drop(["segment", "fold_number"], axis=1)
.apply(["median", "mean", "std", percentile(5), percentile(25), percentile(75), percentile(95)])
.to_dict()
)
# case for aggregate_metrics=True
else:
metrics_dict = (
metrics_df.drop(["segment"], axis=1)
.apply(["median", "mean", "std", percentile(5), percentile(25), percentile(75), percentile(95)])
.to_dict()
)
metrics_dict_wide = {
f"{metrics_key}_{statistics_key}": value
for metrics_key, values in metrics_dict.items()
for statistics_key, value in values.items()
}
return metrics_dict_wide
|
11593680
|
import pytz
from django.forms.widgets import Widget
from leaflet.forms.widgets import LeafletWidget as StockLeafletWidget
class TimezoneWidget(Widget):
template_name = 'osmcal/partials/event_form_timezone.html'
def get_context(self, *args, **kwargs):
ctx = super().get_context(*args, **kwargs)
ctx['all_timezones'] = pytz.common_timezones
return ctx
class LeafletWidget(StockLeafletWidget):
template_name = 'osmcal/partials/leaflet_widget.html'
|
11593682
|
from __future__ import with_statement
import unittest
from flask import url_for
from .fixtures import app, feature_setup, FEATURE_NAME, FEATURE_IS_ON, NullFlagHandler, AlwaysOnFlagHandler, AlwaysOffFlagHandler, FLAG_CONFIG
class TestAddRemoveHandlers(unittest.TestCase):
def setUp(self):
app.config[FLAG_CONFIG] = {FEATURE_NAME: True}
app.config['TESTING'] = True
self.app = app
self.test_client = app.test_client()
def test_can_clear_handlers(self):
feature_setup.clear_handlers()
assert len(feature_setup.handlers) == 0
def test_can_add_handlers(self):
feature_setup.clear_handlers()
feature_setup.add_handler(NullFlagHandler)
feature_setup.add_handler(AlwaysOnFlagHandler)
assert len(feature_setup.handlers) == 2
def test_can_remove_handlers(self):
feature_setup.clear_handlers()
feature_setup.add_handler(NullFlagHandler)
feature_setup.add_handler(AlwaysOnFlagHandler)
assert len(feature_setup.handlers) == 2
feature_setup.remove_handler(NullFlagHandler)
assert len(feature_setup.handlers) == 1
feature_setup.remove_handler(AlwaysOnFlagHandler)
assert len(feature_setup.handlers) == 0
def test_removing_a_handler_that_wasnt_added_is_a_noop(self):
feature_setup.clear_handlers()
feature_setup.add_handler(NullFlagHandler)
feature_setup.remove_handler(AlwaysOffFlagHandler)
assert len(feature_setup.handlers) == 1
class TestDefaultHandlers(unittest.TestCase):
def setUp(self):
app.config[FLAG_CONFIG] = {FEATURE_NAME: True}
app.config['TESTING'] = True
self.app = app
self.test_client = app.test_client()
def test_null_handler_returns_false(self):
feature_setup.clear_handlers()
feature_setup.add_handler(NullFlagHandler)
with self.app.test_request_context('/'):
url = url_for('feature_decorator')
response = self.test_client.get(url)
assert response.status_code == 404, u'Unexpected status code'
assert FEATURE_IS_ON not in response.data.decode(u'utf-8')
def test_always_false_handler_returns_false(self):
feature_setup.clear_handlers()
feature_setup.add_handler(AlwaysOffFlagHandler)
with self.app.test_request_context('/'):
url = url_for('feature_decorator')
response = self.test_client.get(url)
assert response.status_code == 404, u'Unexpected status code'
assert FEATURE_IS_ON not in response.data.decode(u'utf-8')
def test_always_on_handler_returns_true(self):
feature_setup.clear_handlers()
feature_setup.add_handler(AlwaysOnFlagHandler)
with self.app.test_request_context('/'):
url = url_for('feature_decorator')
response = self.test_client.get(url)
assert response.status_code == 200, u'Unexpected status code'
assert FEATURE_IS_ON in response.data.decode(u'utf-8')
|
11593695
|
from activepapers.storage import ActivePaper
import numpy as np
paper = ActivePaper("internal_files.ap", "w")
script = paper.create_calclet("write",
"""
from activepapers.contents import open
with open('numbers', 'w') as f:
for i in range(10):
f.write(str(i)+'\\n')
""")
script.run()
script = paper.create_calclet("read1",
"""
from activepapers.contents import open
f = open('numbers')
for i in range(10):
assert f.readline().strip() == str(i)
f.close()
""")
script.run()
script = paper.create_calclet("read2",
"""
from activepapers.contents import open
f = open('numbers')
data = [int(line.strip()) for line in f]
f.close()
assert data == list(range(10))
""")
script.run()
script = paper.create_calclet("convert_to_binary",
"""
from activepapers.contents import open
import struct
with open('numbers') as f:
data = [int(line.strip()) for line in f]
f = open('binary_numbers', 'wb')
f.write(struct.pack(len(data)*'h', *data))
f.close()
""")
script.run()
script = paper.create_calclet("read_binary",
"""
from activepapers.contents import open
import struct
f = open('binary_numbers', 'rb')
assert struct.unpack(10*'h', f.read()) == tuple(range(10))
f.close()
""")
script.run()
paper.close()
|
11593706
|
import os
import yaml
from .config import TEMP_DIR
from .models import AcousticModel, G2PModel, IvectorExtractor, LanguageModel
from .exceptions import ArgumentError
def get_available_acoustic_languages():
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'acoustic')
os.makedirs(pretrained_dir, exist_ok=True)
languages = []
for f in os.listdir(pretrained_dir):
if f.endswith(AcousticModel.extension):
languages.append(os.path.splitext(f)[0])
return languages
def get_available_g2p_languages():
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'g2p')
os.makedirs(pretrained_dir, exist_ok=True)
languages = []
for f in os.listdir(pretrained_dir):
if f.endswith(G2PModel.extension):
languages.append(os.path.splitext(f)[0])
return languages
def get_available_ivector_languages():
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'ivector')
os.makedirs(pretrained_dir, exist_ok=True)
languages = []
for f in os.listdir(pretrained_dir):
if f.endswith(IvectorExtractor.extension):
languages.append(os.path.splitext(f)[0])
return languages
def get_available_lm_languages():
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'language_model')
os.makedirs(pretrained_dir, exist_ok=True)
languages = []
for f in os.listdir(pretrained_dir):
if f.endswith(LanguageModel.extension):
languages.append(os.path.splitext(f)[0])
return languages
def get_available_dict_languages():
extension = '.dict'
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'dictionary')
os.makedirs(pretrained_dir, exist_ok=True)
languages = []
for f in os.listdir(pretrained_dir):
if f.endswith(extension):
languages.append(os.path.splitext(f)[0])
return languages
def get_pretrained_acoustic_path(language):
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'acoustic')
os.makedirs(pretrained_dir, exist_ok=True)
return os.path.join(pretrained_dir, language + AcousticModel.extension)
def get_pretrained_ivector_path(language):
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'ivector')
os.makedirs(pretrained_dir, exist_ok=True)
return os.path.join(pretrained_dir, language + IvectorExtractor.extension)
def get_pretrained_language_model_path(language):
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'language_model')
os.makedirs(pretrained_dir, exist_ok=True)
return os.path.join(pretrained_dir, language + LanguageModel.extension)
def get_pretrained_g2p_path(language):
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'g2p')
os.makedirs(pretrained_dir, exist_ok=True)
return os.path.join(pretrained_dir, language + G2PModel.extension)
def get_dictionary_path(language):
pretrained_dir = os.path.join(TEMP_DIR, 'pretrained_models', 'dictionary')
os.makedirs(pretrained_dir, exist_ok=True)
return os.path.join(pretrained_dir, language + '.dict')
def validate_dictionary_arg(dictionary_path, download_dictionaries):
if dictionary_path.lower().endswith('.yaml'):
with open(dictionary_path, 'r', encoding='utf8') as f:
data = yaml.safe_load(f)
found_default = False
for speaker, path in data.items():
if speaker == 'default':
found_default = True
if path.lower() in download_dictionaries:
path = get_dictionary_path(path.lower())
if not os.path.exists(path):
raise ArgumentError('Could not find the dictionary file {} for speaker {}'.format(path, speaker))
if not os.path.isfile(path):
raise ArgumentError('The specified dictionary path ({} for speaker {}) is not a text file.'.format(path, speaker))
if not found_default:
raise ArgumentError('No "default" dictionary was found.')
else:
if dictionary_path.lower() in download_dictionaries:
dictionary_path = get_dictionary_path(dictionary_path.lower())
if not os.path.exists(dictionary_path):
raise ArgumentError('Could not find the dictionary file {}'.format(dictionary_path))
if not os.path.isfile(dictionary_path):
raise ArgumentError('The specified dictionary path ({}) is not a text file.'.format(dictionary_path))
return dictionary_path
|
11593728
|
class ToolStripArrowRenderEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.ToolStripRenderer.RenderArrow event.
ToolStripArrowRenderEventArgs(g: Graphics,toolStripItem: ToolStripItem,arrowRectangle: Rectangle,arrowColor: Color,arrowDirection: ArrowDirection)
"""
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
@staticmethod
def __new__(self,g,toolStripItem,arrowRectangle,arrowColor,arrowDirection):
""" __new__(cls: type,g: Graphics,toolStripItem: ToolStripItem,arrowRectangle: Rectangle,arrowColor: Color,arrowDirection: ArrowDirection) """
pass
ArrowColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the System.Windows.Forms.ToolStrip arrow.
Get: ArrowColor(self: ToolStripArrowRenderEventArgs) -> Color
Set: ArrowColor(self: ToolStripArrowRenderEventArgs)=value
"""
ArrowRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the bounding area of the System.Windows.Forms.ToolStrip arrow.
Get: ArrowRectangle(self: ToolStripArrowRenderEventArgs) -> Rectangle
Set: ArrowRectangle(self: ToolStripArrowRenderEventArgs)=value
"""
Direction=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the direction in which the System.Windows.Forms.ToolStrip arrow points.
Get: Direction(self: ToolStripArrowRenderEventArgs) -> ArrowDirection
Set: Direction(self: ToolStripArrowRenderEventArgs)=value
"""
Graphics=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the graphics used to paint the System.Windows.Forms.ToolStrip arrow.
Get: Graphics(self: ToolStripArrowRenderEventArgs) -> Graphics
"""
Item=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.ToolStripItem on which to paint the arrow.
Get: Item(self: ToolStripArrowRenderEventArgs) -> ToolStripItem
"""
|
11593784
|
import copy
import logging
import os
import subprocess
import tempfile
from .. import util
from .common import Endpoint
class SSHEndpoint(Endpoint):
def __init__(self, hostname, port=None, username=None, ssh_opts=None,
ssh_sudo=False, **kwargs):
super(SSHEndpoint, self).__init__(**kwargs)
self.hostname = hostname
self.port = port
self.username = username
self.ssh_opts = ssh_opts or []
self.sshfs_opts = copy.deepcopy(self.ssh_opts)
self.sshfs_opts += ["auto_unmount", "reconnect", "cache=no"]
self.ssh_sudo = ssh_sudo
if self.source:
self.source = os.path.normpath(self.source)
if not self.path.startswith("/"):
self.path = os.path.join(self.source, self.path)
self.path = os.path.normpath(self.path)
self.sshfs = None
def __repr__(self):
return "(SSH) {}{}".format(
self._build_connect_string(with_port=True), self.path)
def get_id(self):
s = self.hostname
if self.username:
s = "{}@{}".format(self.username, s)
if self.port:
s = "{}:{}".format(s, self.port)
return "ssh://{}{}".format(s, self.path)
def _prepare(self):
# check whether ssh is available
logging.debug("Checking for ssh ...")
cmd = ["ssh"]
try:
util.exec_subprocess(cmd, method="call", stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except FileNotFoundError as e:
logging.debug(" -> got exception: {}".format(e))
logging.info("ssh command is not available")
raise util.AbortError()
else:
logging.debug(" -> ssh is available")
# sshfs is useful for listing directories and reading/writing locks
tempdir = tempfile.mkdtemp()
logging.debug("Created tempdir: {}".format(tempdir))
mountpoint = os.path.join(tempdir, "mnt")
os.makedirs(mountpoint)
logging.debug("Created directory: {}".format(mountpoint))
logging.debug("Mounting sshfs ...")
cmd = ["sshfs"]
if self.port:
cmd += ["-p", str(self.port)]
for opt in self.sshfs_opts:
cmd += ["-o", opt]
cmd += ["{}:/".format(self._build_connect_string()), mountpoint]
try:
util.exec_subprocess(cmd, method="check_call",
stdout=subprocess.DEVNULL)
except FileNotFoundError as e:
logging.debug(" -> got exception: {}".format(e))
if self.source:
# we need that for the locks
logging.info(" The sshfs command is not available but it is "
"mandatory for sourcing from SSH.")
raise util.AbortError()
else:
self.sshfs = mountpoint
logging.debug(" -> sshfs is available")
# create directories, if needed
dirs = []
if self.source is not None:
dirs.append(self.source)
dirs.append(self.path)
if self.sshfs:
for d in dirs:
if not os.path.isdir(self._path2sshfs(d)):
logging.info("Creating directory: {}".format(d))
try:
os.makedirs(self._path2sshfs(d))
except OSError as e:
logging.error("Error creating new location {}: "
"{}".format(d, e))
raise util.AbortError()
else:
cmd = ["mkdir", "-p"] + dirs
self._exec_cmd(cmd)
def _collapse_cmds(self, cmds, abort_on_failure=True):
"""Concatenates all given commands, ';' is inserted as separator."""
collapsed = []
for i, cmd in enumerate(cmds):
if isinstance(cmd, (list, tuple)):
collapsed.extend(cmd)
if len(cmds) > i + 1:
collapsed.append("&&" if abort_on_failure else ";")
return [collapsed]
def _exec_cmd(self, orig_cmd, **kwargs):
"""Executes the command at the remote host."""
cmd = ["ssh"]
if self.port:
cmd += ["-p", str(self.port)]
for opt in self.ssh_opts:
cmd += ["-o", opt]
cmd += [self._build_connect_string()]
if self.ssh_sudo:
cmd += ["sudo"]
cmd.extend(orig_cmd)
return util.exec_subprocess(cmd, **kwargs)
def _listdir(self, location):
"""Operates remotely via 'ls -1A'."""
if self.sshfs:
items = os.listdir(self._path2sshfs(location))
else:
cmd = ["ls", "-1A", location]
output = self._exec_cmd(cmd, universal_newlines=True)
items = output.splitlines()
return items
def _get_lock_file_path(self):
return self._path2sshfs(super(SSHEndpoint, self)._get_lock_file_path())
########## Custom methods
def _build_connect_string(self, with_port=False):
s = self.hostname
if self.username:
s = "{}@{}".format(self.username, s)
if with_port and self.port:
s = "{}:{}".format(s, self.port)
return s
def _path2sshfs(self, path):
"""Joins the given ``path`` with the sshfs mountpoint."""
if not self.sshfs:
raise ValueError("sshfs not mounted")
if path.startswith("/"):
path = path[1:]
return os.path.join(self.sshfs, path)
|
11593802
|
import os, re, sys
from cupcake.io import GFF
from Bio import SeqIO
from collections import defaultdict
from bx.intervals.cluster import ClusterTree
from Cogent import BioReaders
"""
# BLASTN 2.6.0+
# Query: Domino_testis_i0_HQ_sampled07afe|c338324/f2p10/426
# Database: /pbi/dept/secondary/siv/gconcepcion/db/ncbi/nt
# Fields: query acc.ver, subject acc.ver, subject title, % identity, alignment length, mismatches, gap opens, q. start, q. end, s. start, s. end, evalue, bit score
# 1 hits found
Domino_testis_i0_HQ_sampled07afe|c338324/f2p10/426 NR_146154.1 Homo sapiens RNA, 28S ribosomal (LOC109910382), ribosomal RNA 99.750 400 1 0 1
400 2557 2956 0.0 734
"""
rex = re.compile('#\sFields:[\S\s]+')
def read_blastn(filename, qlen_dict):
"""
Read a BLASTn output file and return just "subject title"
"""
f = open(filename)
best_of = defaultdict(lambda: (100, 'NA'))
f.readline()
assert f.readline().strip().split()[1]=='Query:'
assert f.readline().strip().split()[1]=='Database:'
line = f.readline().strip()
if line.find('0 hits found') > 0: return best_of
fields = line.strip().split(',')
try:
i = fields.index (' subject title')
j = fields.index (' evalue')
k = fields.index(' q. start')
l = fields.index(' q. end')
except ValueError:
print("Unable to find fields 'evalue' and 'subject title' in {0}. Abort!".format (filename), file=sys.stderr)
f.readline()
for line in f:
if line.startswith('#'): continue
raw = line.strip().split('\t')
seqid, e, name, qstart, qend = raw[0], float(raw[j]), raw[i], int(raw[k]), int(raw[l])
if qend-qstart >= .8*qlen_dict[seqid]: #BLASTn result have to cover 80% of the sequence length
if e < best_of[seqid]: best_of[seqid] = (e, name)
return best_of
def read_cogent2_aligned_to_genome_gff(filename):
"""
Read cogent2 mapped to a genome.
Return: dict of {cogent path} --> list of gmapRecord; set of mapped genome contigs
NOTE: (gmap was run with -n 0 so if multiple must be chimeric)
"""
d = defaultdict(lambda: [])
contigs_seen = set()
if not os.path.exists(filename):
return {}, set()
try:
for r in GFF.gmapGFFReader(filename):
d[r.seqid].append(r)
contigs_seen.add(r.chr)
except IndexError:
pass
return dict(d), contigs_seen
def read_cogent2_aligned_to_genome_sam(input, filename):
"""
Read cogent2 mapped to a genome.
Return: dict of {cogent path} --> list of SAM Record; set of mapped genome contigs
NOTE: (minimap2 was run with --secondary=no so if multiple must be chimeric)
"""
d = defaultdict(lambda: [])
contigs_seen = set()
if not os.path.exists(filename):
return {}, set()
try:
for r in BioReaders.GMAPSAMReader(filename, True, query_len_dict=dict((r.id, len(r.seq)) for r in SeqIO.parse(open(input),'fasta'))):
if r.sID == '*': continue # unmapped
d[r.qID].append(r)
contigs_seen.add(r.sID)
except IndexError:
pass
return dict(d), contigs_seen
def is_true_gmap_chimeric(records):
"""
Given a list of gmap records of a single input, if it truly chimeric if:
(a) at least two of the records overlap (same chromosome, overlapping location) by 100 bp
OR
(b) at least two of the records are on different chromsomes
In other words, if all the records are on the same chromosome and do not overlap, they are
NOT truly chimeric.
"""
by_chr = defaultdict(lambda: [])
for r in records: by_chr[r.chr].append(r)
if len(by_chr) > 1: return True # on multiple chromosomes, really chimeric
else: # all on same chromosome
flag = False
records.sort(key=lambda r: r.start)
for i in range(len(records)-1):
if records[i].end - records[i+1].start >= 100: # overlap by more than 100 bp, true chimeric
flag = True
break
return flag
def is_true_minimap2_chimeric(records):
"""
Given a list of minimap2 records of a single input, if it truly chimeric if:
(a) at least two of the records overlap (same chromosome, overlapping location) by 100 bp
OR
(b) at least two of the records are on different chromsomes
In other words, if all the records are on the same chromosome and do not overlap, they are
NOT truly chimeric.
"""
by_chr = defaultdict(lambda: [])
for r in records: by_chr[r.sID].append(r)
if len(by_chr) > 1: return True # on multiple chromosomes, really chimeric
else: # all on same chromosome
flag = False
records.sort(key=lambda r: r.sStart)
for i in range(len(records)-1):
if records[i].sEnd - records[i+1].sStart >= 100: # overlap by more than 100 bp, true chimeric
flag = True
break
return flag
def calculate_cov_acc(d):
"""
Given dict of {cogent path} --> list of minimap2 record
(minimap2 was run with --secondary=no so if multiple must be chimeric)
If a Cogent contig was mapped chimerically (even if it's on the same contig)
possible with "switchbacks", then it's considered bad and we want to report it.
But we also want to rule out any bad mapping. So, check that for any multimapping,
that at least two of the mapped loci overlap. Otherwise it is considered "OK".
"""
worst_cov, worst_acc, has_chimeric = 100, 100, False
if len(d) == 0: return 0, 0, False
for v in d.values():
c = ClusterTree(0,0)
for x in v:
qlen = x.qLen
c.insert(x.qStart, x.qEnd, -1)
cov = sum(_e-_s for _s,_e,_junk in c.getregions())*100./qlen
acc = sum(x.identity*x.qCoverage for x in v)*100./sum(x.qCoverage for x in v)
if len(v) > 1 and is_true_minimap2_chimeric(v): # is truly chimeric
has_chimeric = True
if cov < worst_cov:
worst_cov, worst_acc = cov, acc
return worst_cov, worst_acc, has_chimeric
def tally_for_a_Cogent_dir(dirname, writer1, writer2, genome1, genome2=None, blastn_filename=None):
"""
1. read input mapped to cogent2 (in.trimmed.fa.cogent2.gff)
2. read cogent2 mapped to genome1
3. read cogent2 mapped to genome2 (if genome2 does not exist, just repeat genome1)
"""
if not os.path.exists(os.path.join(dirname, 'COGENT.DONE')):
return
seq_info = defaultdict(lambda: [])
contigs_seen = set()
# input mapped to Cogent contigs
filename = os.path.join(dirname, 'in.trimmed.fa.cogent2.sam')
reader = BioReaders.GMAPSAMReader(filename, True, \
query_len_dict=dict((r.id, len(r.seq)) for r in SeqIO.parse(open(os.path.join(dirname, 'in.trimmed.fa')), 'fasta')))
for r in reader:
seq_info[r.qID].append(r)
contigs_seen.add(r.sID)
# sanity check that all sequences in in.fa are mapped to cogent2.fa
for r in SeqIO.parse(open(os.path.join(dirname, 'in.fa')), 'fasta'):
assert r.id in seq_info
d_genome1, contig_genome1 = read_cogent2_aligned_to_genome_sam(os.path.join(dirname, 'cogent2.fa'), os.path.join(dirname,'cogent2.fa.'+genome1+'.sam'))
if genome2 is not None:
d_genome2, contig_genome2 = read_cogent2_aligned_to_genome_sam(os.path.join(dirname, 'cogent2.fa'), os.path.join(dirname,'cogent2.fa.'+genome2+'.sam'))
if blastn_filename is not None:
qlen_dict = dict((r.id, len(r.seq)) for r in SeqIO.parse(open(os.path.join(dirname, 'in.trimmed.fa')),'fasta'))
best_of = read_blastn(os.path.join(dirname, blastn_filename), qlen_dict)
# write:
# dirname, # of input, # of cogent contig, # of pacbio_contig, total pacbio cov, pacbio iden
cov1, acc1, has_chimeric1 = calculate_cov_acc(d_genome1)
rec1 = {'gene_family': dirname,
'input_size': len(seq_info),
'num_Cogent_contigs': len(contigs_seen),
'num_genome_contig': len(contig_genome1),
'genome_cov': "{0:.2f}".format(cov1),
'genome_acc': "{0:.2f}".format(acc1),
'genome_chimeric': has_chimeric1,
'genome_contigs': ",".join(contig_genome1)}
# (for genome2), # of contig, total worst cov, iden, is_chimeric, comma-separated list of contigs
if genome2 is not None:
cov2, acc2, has_chimeric2 = calculate_cov_acc(d_genome2)
rec1['num_genome2_contig'] = len(contig_genome2)
rec1['genome2_cov'] = "{0:.2f}".format(cov2)
rec1['genome2_acc'] = "{0:.2f}".format(acc2)
rec1['genome2_chimeric'] = has_chimeric2
rec1['genome2_contigs'] = ",".join(contig_genome2)
# (for blastn, optional) best name with best e-value
if blastn_filename is not None:
if len(best_of) == 0:
rec1['num_blastn'] = 0
rec1['blastn_best'] = 'NA'
else:
stuff = list(best_of.values()) # list of (e-value, name)
stuff.sort()
rec1['num_blastn'] = sum(_n!='NA' for _e,_n in list(best_of.values()))
rec1['blastn_best'] = '"' + stuff[0][1] + '"'
writer1.writerow(rec1)
in_aligned_to_genome1 = os.path.join(dirname, 'in.trimmed.fa.'+genome1+'.sam')
if os.path.exists(in_aligned_to_genome1):
d3, junk = read_cogent2_aligned_to_genome_sam(os.path.join(dirname, 'in.trimmed.fa'), in_aligned_to_genome1)
else:
d3 = {}
for seqid, v in seq_info.items():
contigs = [x.sID for x in v]
acc = sum(x.identity*x.qCoverage for x in v)/sum(x.qCoverage for x in v)
rec2 = {'seqid': seqid,
'gene_family': dirname,
'Cogent_contig': ",".join(contigs),
'Cogent_contig_acc': acc}
if not seqid in d3:
rec2['scaffold'] = 'NA'
rec2['num_scaffold'] = 0
rec2['scaffold_coverage'] = 'NA'
rec2['scaffold_acc'] = 'NA'
if blastn_filename is not None:
rec2['blastn_best'] = 'NA'
else:
scaffolds = [x.sID for x in d3[seqid]]
# calculate cov and acc
c = ClusterTree(0,0)
for x in d3[seqid]:
qlen = x.qLen
c.insert(x.qStart, x.qEnd, -1)
cov = sum(_e-_s for _s,_e,_junk in c.getregions())*100./qlen
acc = sum(x.identity*x.qCoverage for x in d3[seqid])*1./sum(x.qCoverage for x in d3[seqid])
rec2['scaffold'] = ",".join(scaffolds)
rec2['num_scaffold'] = len(scaffolds)
rec2['scaffold_coverage'] = cov
rec2['scaffold_acc'] = acc
if blastn_filename is not None:
rec2['blastn_best'] = best_of[seqid][1]
writer2.writerow(rec2)
|
11593806
|
from loggers import Actions
from stopping_decision_makers.base_decision_maker import BaseDecisionMaker
class FixedDepthDecisionMaker(BaseDecisionMaker):
"""
A concrete implementation of a decision maker.
Returns True iif the depth at which a user is in a SERP is less than a predetermined value.
"""
def __init__(self, search_context, logger, depth):
super(FixedDepthDecisionMaker, self).__init__(search_context, logger)
self.__depth = depth
def decide(self):
"""
If the user's current position in the current SERP is < the maximum depth, look at the next snippet in the SERP.
Otherwise, a new query should be issued.
"""
if self._search_context.get_current_serp_position() < self.__depth:
return Actions.SNIPPET
return Actions.QUERY
|
11593842
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import IO
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def delete_thing_shadow(self, thingName: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_thing_shadow(self, thingName: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def publish(self, topic: str, qos: int = None, payload: Union[bytes, IO] = None):
pass
def update_thing_shadow(self, thingName: str, payload: Union[bytes, IO]) -> Dict:
pass
|
11593882
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, SelectField
from wtforms.validators import DataRequired, InputRequired
class ReminderForm(FlaskForm):
title = StringField('title', validators=[DataRequired()])
body = TextAreaField('body', validators=[DataRequired()])
tag = SelectField(u'Tag', coerce=int, validators=[InputRequired()])
submit = SubmitField('Write')
|
11593931
|
from operator import attrgetter
class Solver:
def checkvalidpuzzle(self, arr):
subsquarestartingpoints = [[0, 0], [0, 3], [0, 6], [3, 0], [3, 3], [3, 6], [6, 0], [6, 3], [6, 6]]
# Checking row validity of every row
for row in range(9):
has = set()
for col in range(9):
if arr[row][col] == 0:
continue
if arr[row][col] in has:
return False
has.add(arr[row][col])
# Checking column validity of every column
for col in range(9):
has = set()
for row in range(9):
if arr[row][col] == 0:
continue
if arr[row][col] in has:
return False
has.add(arr[row][col])
# Checking box validity
for pointrow, pointcol in subsquarestartingpoints:
has = set()
for row in range(3):
for col in range(3):
if arr[pointrow+row][pointcol+col] == 0:
continue
if arr[pointrow+row][pointcol+col] in has:
return False
has.add(arr[pointrow+row][pointcol+col])
return True
def print_board(self, arr):
for i in range(9):
for j in range(9):
if arr[i][j]==0:
print("_", end=" ")
else:
print(arr[i][j], end=" ")
print("")
@staticmethod
def solve_sudoku(arr):
"""
Create a binary matrix to convert to an exact cover problem.
Choices: 729
Each cell can have any value from 1 to 9.
Constraints: 324
1. Each row must have all the values from 1 to 9, total: 81
2. Each column must have all the values from 1 to 9, total: 81
3. Each block must have all the values from 1 to 9, total: 81
4. Each cell must be filled, total: 81
Choices are ordered by row > col > value
Constraints are ordered as above.
"""
# Represent the binary matrix as sparse matrix (has < 729 * 4 ones in a matrix of 729 * 342)
positions = []
def add_position(ch, r, c, x):
positions.append([ch, [
9 * r + x, # Row constraint
81 + 9 * c + x, # Col constraint
162 + 9 * ((r // 3) * 3 + (c // 3)) + x, # Block constraint
243 + 9 * r + c # Cell constraint
]])
choice_row = 0
for i in range(9): # Row
for j in range(9): # Column
if arr[i][j] == 0:
for k in range(9): # Value
add_position(choice_row, i, j, k)
choice_row += 1
else:
k = arr[i][j] - 1
add_position(choice_row + k, i, j, k)
choice_row += 9
alg_x = AlgorithmX(324, positions)
if not alg_x.solve():
return False
rows = alg_x.solution
if len(rows) != 81:
return False
for row in rows:
i, row = divmod(row, 81)
j, value = divmod(row, 9)
arr[i][j] = value + 1 # value is 0-8
return True
class AlgorithmXNode:
def __init__(self, value=0):
"""
Create a node with self links.
:param value: Serves multiple purposes:
- nothing for root node
- the number of cells in column for all header nodes
- the row id in all other nodes
"""
self.value = value
self.left = self.right = self.up = self.down = self.top = self
def insert_h(self):
"""
Insert this node in the row, using left and right links.
"""
self.left.right = self.right.left = self
def insert_v(self, update_top=True):
"""
Insert this node in the column.
:param update_top: If true, update the counter in the header.
"""
self.up.down = self.down.up = self
if update_top:
self.top.value += 1
def insert_above(self, node):
"""
Insert this node above the given node, in the column, updating the top.
"""
self.top = node.top
self.up = node.up
self.down = node
self.insert_v()
def insert_after(self, node):
"""
Insert this node to the right the given node.
"""
self.right = node.right
self.left = node
self.insert_h()
def remove_h(self):
"""
Remove this node from the row. Inverse of insert_h.
"""
self.left.right = self.right
self.right.left = self.left
def remove_v(self, update_top=True):
"""
Remove this node from the column. Inverse of insert_v.
:param update_top: If true, update the counter in the header.
"""
self.up.down = self.down
self.down.up = self.up
if update_top:
self.top.value -= 1
def cover(self):
self.top.remove_h()
for row in self.top.loop('down'):
for node in row.loop('right'):
node.remove_v()
def uncover(self):
for row in self.top.loop('up'):
for node in row.loop('left'):
node.insert_v()
self.top.insert_h()
def loop(self, direction):
"""
Yield each node from self to self, following the direction, excluding self.
:param direction: One of 'left', 'right', 'up', 'down'.
:return: Nodes from self to self (both exclusive), one at a time.
"""
if direction not in {'left', 'right', 'up', 'down'}:
raise ValueError(f"Direction must be one of 'left', 'right', 'up', 'down', got {direction}")
next_node = attrgetter(direction)
node = next_node(self)
while node != self:
yield node
node = next_node(node)
class AlgorithmX:
"""
Use Algorithm X with dancing links to solve a constraint satisfaction problem
represented in the form of Exact Cover.
Refer to https://en.wikipedia.org/wiki/Dancing_Links and
https://en.wikipedia.org/wiki/Algorithm_X for the algorithm.
"""
def __init__(self, constraint_count, matrix):
matrix.sort()
headers = [AlgorithmXNode() for _ in range(constraint_count)]
for row, cols in matrix:
first = None # first node in row
for col in cols:
node = AlgorithmXNode(row)
# Insert in column
node.insert_above(headers[col])
# Insert in row
if first is None:
first = node
else:
node.insert_after(first)
# Header row
self.root = AlgorithmXNode()
last = self.root
for header in headers:
header.insert_after(last)
last = header
self.solution = []
def solve(self):
if self.root.right == self.root:
# All constraints have been satisfied
return True
# Find column with least number of nodes
header = min(self.root.loop('right'), key=attrgetter('value'))
if header.value == 0:
# No valid solution exists
return False
header.cover()
for row in header.loop('down'):
for node in row.loop('right'):
node.cover()
if self.solve():
# Add row to solution
self.solution.append(row.value)
return True
# Try a different value
for node in row.loop('left'):
node.uncover()
header.uncover()
# Backtrack
return False
|
11593940
|
import math
import copy
import numpy as np
import basis.robot_math as rm
import robot_sim._kinematics.jlchain_mesh as jlm
import robot_sim._kinematics.jlchain_ik as jlik
class JLChain(object):
"""
Joint Link Chain, no branches allowed
Usage:
1. Inherit this class and overwrite self._initjntlnks()/self.tgtjnts to define new joint links
2. Define multiple instances of this class to compose a complicated structure
Notes:
The joint types include "revolute", "prismatic", "end"; One JlChain object alwyas has two "end" joints
"""
def __init__(self,
pos=np.zeros(3),
rotmat=np.eye(3),
homeconf=np.zeros(6),
name='jlchain',
cdprimitive_type='box',
cdmesh_type='triangles'):
"""
initialize a manipulator
naming rules
allvalues -- all values: values at all joints including the fixed ones at the base and the end (both are 0)
conf -- configuration: target joint values
:param pos:
:param rotmat:
:param homeconf: number of joints
:param name:
:param cdprimitive_type: 'aabb', 'obb', 'convex_hull', 'triangulation
:param cdmesh_type:
:param name:
"""
self.name = name
self.pos = pos
self.rotmat = rotmat
self.ndof = homeconf.shape[0]
self._zeroconf = np.zeros(self.ndof)
self._homeconf = homeconf.astype('float64')
# initialize joints and links
self.lnks, self.jnts = self._init_jlchain()
self._tgtjnts = range(1, self.ndof + 1)
self.goto_homeconf()
# default tcp
self.tcp_jntid = -1
self.tcp_loc_pos = np.zeros(3)
self.tcp_loc_rotmat = np.eye(3)
# collision primitives
# mesh generator
self.cdprimitive_type = cdprimitive_type
self.cdmesh_type = cdmesh_type
self._mt = jlm.JLChainMesh(self, cdprimitive_type=cdprimitive_type, cdmesh_type=cdmesh_type) # t = tool
self._ikt = jlik.JLChainIK(self) # t = tool
def _init_jlchain(self):
"""
init joints and links chains
there are two lists of dictionaries where the first one is joints, the second one is links
links: a list of dictionaries with each dictionary holding the properties of a link
joints: a list of dictionaries with each dictionary holding the properties of a joint
njoints is assumed to be equal to nlinks+1
joint i connects link i-1 and link i
:return:
author: weiwei
date: 20161202tsukuba, 20190328toyonaka, 20200330toyonaka
"""
lnks = [dict() for i in range(self.ndof + 1)]
jnts = [dict() for i in range(self.ndof + 2)]
for id in range(self.ndof + 1):
lnks[id]['name'] = 'link0'
lnks[id]['loc_pos'] = np.array([0, 0, 0])
lnks[id]['loc_rotmat'] = rm.rotmat_from_euler(0, 0, 0)
lnks[id]['com'] = np.zeros(3)
lnks[id]['inertia'] = np.eye(3)
lnks[id]['mass'] = 0 # the visual adjustment is ignored for simplisity
lnks[id]['meshfile'] = None
lnks[id]['collisionmodel'] = None
lnks[id]['cdprimit_childid'] = -1 # id of the CollisionChecker.np.Child
lnks[id]['scale'] = [1, 1, 1] # 3 list
lnks[id]['rgba'] = [.7, .7, .7, 1] # 4 list
for id in range(self.ndof + 2):
jnts[id]['type'] = 'revolute'
jnts[id]['parent'] = id - 1
jnts[id]['child'] = id + 1
jnts[id]['loc_pos'] = np.array([0, .1, 0]) if id > 0 else np.array([0, 0, 0])
jnts[id]['loc_rotmat'] = np.eye(3)
jnts[id]['loc_motionax'] = np.array([0, 0, 1]) # rot ax for rev joint, linear ax for pris joint
jnts[id]['gl_pos0'] = jnts[id]['loc_pos'] # to be updated by self._update_fk
jnts[id]['gl_rotmat0'] = jnts[id]['loc_rotmat'] # to be updated by self._update_fk
jnts[id]['gl_motionax'] = jnts[id]['loc_motionax'] # to be updated by self._update_fk
jnts[id]['gl_posq'] = jnts[id]['gl_pos0'] # to be updated by self._update_fk
jnts[id]['gl_rotmatq'] = jnts[id]['gl_rotmat0'] # to be updated by self._update_fk
jnts[id]['motion_rng'] = [-math.pi, math.pi] # min, max
jnts[id]['motion_val'] = 0
jnts[0]['gl_pos0'] = self.pos # This is not necessary, for easy read
jnts[0]['gl_rotmat0'] = self.rotmat
jnts[0]['type'] = 'end'
jnts[self.ndof + 1]['loc_pos'] = np.array([0, 0, 0])
jnts[self.ndof + 1]['child'] = -1
jnts[self.ndof + 1]['type'] = 'end'
return lnks, jnts
def _update_fk(self):
"""
Update the kinematics
Note that this function should not be called explicitly
It is called automatically by functions like movexxx
:return: updated links and joints
author: weiwei
date: 20161202, 20201009osaka
"""
id = 0
while id != -1:
# update joint values
pjid = self.jnts[id]['parent']
if pjid == -1:
self.jnts[id]['gl_pos0'] = self.pos
self.jnts[id]['gl_rotmat0'] = self.rotmat
else:
self.jnts[id]['gl_pos0'] = self.jnts[pjid]['gl_posq'] + np.dot(self.jnts[pjid]['gl_rotmatq'],
self.jnts[id]['loc_pos'])
self.jnts[id]['gl_rotmat0'] = np.dot(self.jnts[pjid]['gl_rotmatq'], self.jnts[id]['loc_rotmat'])
self.jnts[id]['gl_motionax'] = np.dot(self.jnts[id]['gl_rotmat0'], self.jnts[id]['loc_motionax'])
if self.jnts[id]['type'] == "end":
self.jnts[id]['gl_rotmatq'] = self.jnts[id]['gl_rotmat0']
self.jnts[id]['gl_posq'] = self.jnts[id]['gl_pos0']
elif self.jnts[id]['type'] == "revolute":
self.jnts[id]['gl_rotmatq'] = np.dot(self.jnts[id]['gl_rotmat0'],
rm.rotmat_from_axangle(self.jnts[id]['loc_motionax'],
self.jnts[id]['motion_val']))
self.jnts[id]['gl_posq'] = self.jnts[id]['gl_pos0']
elif self.jnts[id]['type'] == "prismatic":
self.jnts[id]['gl_rotmatq'] = self.jnts[id]['gl_rotmat0']
tmp_translation = np.dot(self.jnts[id]['gl_rotmatq'],
self.jnts[id]['loc_motionax'] * self.jnts[id]['motion_val'])
self.jnts[id]['gl_posq'] = self.jnts[id]['gl_pos0'] + tmp_translation
# update link values, child link id = id
if id < self.ndof + 1:
self.lnks[id]['gl_pos'] = np.dot(self.jnts[id]['gl_rotmatq'], self.lnks[id]['loc_pos']) + \
self.jnts[id]['gl_posq']
self.lnks[id]['gl_rotmat'] = np.dot(self.jnts[id]['gl_rotmatq'], self.lnks[id]['loc_rotmat'])
# self.lnks[id]['cdprimit_cache'][0] = True
id = self.jnts[id]['child']
return self.lnks, self.jnts
@property
def homeconf(self):
return np.array([self._homeconf[i - 1] for i in self.tgtjnts])
@property
def zeroconf(self):
return np.array([self._zeroconf[i - 1] for i in self.tgtjnts])
@property
def tgtjnts(self):
return self._tgtjnts
@tgtjnts.setter
def tgtjnts(self, values):
self._tgtjnts = values
self._ikt = jlik.JLChainIK(self)
def fix_to(self, pos, rotmat, jnt_values=None):
# fix the connecting end of the jlchain to the given pos and rotmat
self.pos = pos
self.rotmat = rotmat
self.fk(jnt_values=jnt_values)
def set_homeconf(self, jnt_values=None):
"""
:param jnt_values:
:return:
"""
if jnt_values is None:
jnt_values = np.zeros(self.ndof)
if len(jnt_values) == self.ndof:
self._homeconf = jnt_values
else:
print('The given values must have enough dof!')
raise Exception
def reinitialize(self, cdprimitive_type=None, cdmesh_type=None):
"""
reinitialize jntlinks by updating fk and reconstructing jntlnkmesh
:return:
author: weiwei
date: 20201126
"""
self.goto_homeconf()
if cdprimitive_type is None: # use previously set values if none
cdprimitive_type = self.cdprimitive_type
if cdmesh_type is None:
cdmesh_type = self.cdmesh_type
self._mg = jlm.JLChainMesh(self, cdprimitive_type, cdmesh_type)
self._ikt = jlik.JLChainIK(self)
def set_tcp(self, tcp_jntid=None, tcp_loc_pos=None, tcp_loc_rotmat=None):
if tcp_jntid is not None:
self.tcp_jntid = tcp_jntid
if tcp_loc_pos is not None:
self.tcp_loc_pos = tcp_loc_pos
if tcp_loc_rotmat is not None:
self.tcp_loc_rotmat = tcp_loc_rotmat
def get_gl_tcp(self,
tcp_jnt_id=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None):
"""
tcp_jntid, tcp_loc_pos, tcp_loc_rotmat are the tool center pose parameters. They are
used for temporary computation, the self.tcp_xxx parameters will not be changed
in case None is provided, the self.tcp_jntid, self.tcp_loc_pos, self.tcp_loc_rotmat will be used
:param tcp_jnt_id:
:param tcp_loc_pos:
:param tcp_loc_rotmat:
:return:
"""
return self._ikt.get_gl_tcp(tcp_jnt_id, tcp_loc_pos, tcp_loc_rotmat)
def get_jnt_ranges(self):
"""
get jntsrnage
:return: [[jnt0min, jnt0max], [jnt1min, jnt1max], ...]
date: 20180602, 20200704osaka
author: weiwei
"""
jnt_limits = []
for id in self.tgtjnts:
jnt_limits.append([self.jnts[id]['motion_rng'][0], self.jnts[id]['motion_rng'][1]])
return jnt_limits
def fk(self, jnt_values=None):
"""
move the joints using forward kinematics
:param jnt_values: a 1xn ndarray where each element indicates the value of a joint (in radian or meter)
:return
author: weiwei
date: 20161205, 20201009osaka
"""
if jnt_values is not None:
counter = 0
for id in self.tgtjnts:
self.jnts[id]['motion_val'] = jnt_values[counter]
counter += 1
self._update_fk()
def goto_homeconf(self):
"""
move the robot_s to initial pose
:return: null
author: weiwei
date: 20161211osaka
"""
self.fk(jnt_values=self.homeconf)
def goto_zeroconf(self):
"""
move the robot_s to initial pose
:return: null
author: weiwei
date: 20161211osaka
"""
self.fk(jnt_values=self.zeroconf)
def get_jnt_values(self):
"""
get the current joint values
:return: jnt_values: a 1xn ndarray
author: weiwei
date: 20161205tsukuba
"""
jnt_values = np.zeros(len(self.tgtjnts))
counter = 0
for id in self.tgtjnts:
jnt_values[counter] = self.jnts[id]['motion_val']
counter += 1
return jnt_values
def rand_conf(self):
"""
generate a random configuration
author: weiwei
date: 20200326
"""
jnt_values = np.zeros(len(self.tgtjnts))
counter = 0
for i in self.tgtjnts:
jnt_values[counter] = np.random.uniform(self.jnts[i]['motion_rng'][0], self.jnts[i]['motion_rng'][1])
counter += 1
return jnt_values
def ik(self,
tgt_pos,
tgt_rotmat,
seed_jnt_values=None,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
max_niter=100,
local_minima="accept",
toggle_debug=False):
"""
Numerical IK
NOTE1: in the numik function of rotjntlinksik,
tcp_jntid, tcp_loc_pos, tcp_loc_rotmat are the tool center pose parameters. They are
used for temporary computation, the self.tcp_xxx parameters will not be changed
in case None is provided, the self.tcp_jntid, self.tcp_loc_pos, self.tcp_loc_rotmat will be used
NOTE2: if list, len(tgtpos)=len(tgtrot) < len(tcp_jntid)=len(tcp_loc_pos)=len(tcp_loc_rotmat)
:param tgt_pos: 1x3 nparray, single value or list
:param tgt_rotmat: 3x3 nparray, single value or list
:param seed_jnt_values: the starting configuration used in the numerical iteration
:param tcp_jntid: a joint ID in the self.tgtjnts
:param tcp_loc_pos: 1x3 nparray, decribed in the local frame of self.jnts[tcp_jntid], single value or list
:param tcp_loc_rotmat: 3x3 nparray, decribed in the local frame of self.jnts[tcp_jntid], single value or list
:param max_niter
:param local_minima: what to do at local minima: "accept", "randomrestart", "end"
:return:
"""
return self._ikt.num_ik(tgt_pos=tgt_pos,
tgt_rot=tgt_rotmat,
seed_jnt_values=seed_jnt_values,
max_niter=max_niter,
tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
local_minima=local_minima,
toggle_debug=toggle_debug)
def manipulability(self):
return self._ikt.manipulability()
def manipulability_axmat(self, type="translational"):
return self._ikt.manipulability_axmat(type=type)
def jacobian(self):
return self._ikt.jacobian(tcp_jntid=self.tcp_jntid)
def cvt_loc_tcp_to_gl(self,
loc_pos=np.zeros(3),
loc_rotmat=np.eye(3),
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None):
"""
given a relative pos and relative rot with respective to the ith jntlnk,
get the world pos and world rot
:param loc_pos: nparray 1x3
:param loc_romat: nparray 3x3
:return:
author: weiwei
date: 20190312, 20210609
"""
if tcp_jntid is None:
tcp_jntid = self.tcp_jntid
if tcp_loc_pos is None:
tcp_loc_pos = self.tcp_loc_pos
if tcp_loc_rotmat is None:
tcp_loc_rotmat = self.tcp_loc_rotmat
tcp_gl_pos = self.jnts[tcp_jntid]['gl_posq'] + self.jnts[tcp_jntid]['gl_rotmatq'].dot(tcp_loc_pos)
tcp_gl_rotmat = self.jnts[tcp_jntid]['gl_rotmatq'].dot(tcp_loc_rotmat)
gl_pos = tcp_gl_pos + tcp_gl_rotmat.dot(loc_pos)
gl_rot = tcp_gl_rotmat.dot(loc_rotmat)
return [gl_pos, gl_rot]
def cvt_gl_to_loc_tcp(self,
gl_pos,
gl_rotmat,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None):
"""
given a world pos and world rot
get the relative pos and relative rot with respective to the ith jntlnk
:param gl_pos: 1x3 nparray
:param gl_rotmat: 3x3 nparray
:param tcp_jntid: id of the joint in which the tool center point is defined
:param tcp_loc_pos: 1x3 nparray, local pose of the tool center point in the frame of the given tcp_jntid
:param tcp_loc_rotmat: 3x3 nparray, local rotmat of the tool center point
:return:
author: weiwei
date: 20190312
"""
if tcp_jntid is None:
tcp_jntid = self.tcp_jntid
if tcp_loc_pos is None:
tcp_loc_pos = self.tcp_loc_pos
if tcp_loc_rotmat is None:
tcp_loc_rotmat = self.tcp_loc_rotmat
tcp_gloc_pos = self.jnts[tcp_jntid]['gl_posq'] + self.jnts[tcp_jntid]['gl_rotmatq'].dot(tcp_loc_pos)
tcp_gloc_rotmat = self.jnts[tcp_jntid]['gl_rotmatq'].dot(tcp_loc_rotmat)
loc_pos, loc_romat = rm.rel_pose(tcp_gloc_pos, tcp_gloc_rotmat, gl_pos, gl_rotmat)
return [loc_pos, loc_romat]
def gen_meshmodel(self,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=True,
toggle_jntscs=False,
rgba=None,
name='jlcmesh'):
return self._mt.gen_meshmodel(tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
name=name, rgba=rgba)
def gen_stickmodel(self,
rgba=np.array([.5, 0, 0, 1]),
thickness=.01,
joint_ratio=1.62,
link_ratio=.62,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=True,
toggle_jntscs=False,
toggle_connjnt=False,
name='jlcstick'):
return self._mt.gen_stickmodel(rgba=rgba,
thickness=thickness,
joint_ratio=joint_ratio,
link_ratio=link_ratio,
tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt,
name=name)
def gen_endsphere(self):
return self._mt.gen_endsphere()
def copy(self):
return copy.deepcopy(self)
if __name__ == "__main__":
import time
import visualization.panda.world as wd
import robot_sim._kinematics.jlchain_mesh as jlm
import modeling.geometric_model as gm
base = wd.World(cam_pos=[3, 0, 3], lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
jlinstance = JLChain(homeconf=np.array([0, 0, 0, 0, 0, 0, 0, 0]))
# rjlinstance.settcp(tcp_jntid=rjlinstance.tgtjnts[-3], tcp_loc_pos=np.array([0,0,30]))
# jlinstance.jnts[4]['type'] = 'prismatic'
# jlinstance.jnts[4]['loc_motionax'] = np.array([1, 0, 0])
# jlinstance.jnts[4]['motion_val'] = .2
# jlinstance.jnts[4]['rngmax'] = 1
# jlinstance.jnts[4]['rngmin'] = -1
jlinstance.fk()
jlinstance.gen_stickmodel().attach_to(base)
base.run()
tgt_pos0 = np.array([.45, 0, 0])
tgt_rotmat0 = np.eye(3)
tgt_pos1 = np.array([.1, 0, 0])
tgt_rotmat1 = np.eye(3)
tgt_pos_list = [tgt_pos0, tgt_pos1]
tgt_rotmat_list = [tgt_rotmat0, tgt_rotmat1]
gm.gen_mycframe(pos=tgt_pos0, rotmat=tgt_rotmat0, length=.15, thickness=.01).attach_to(base)
gm.gen_mycframe(pos=tgt_pos1, rotmat=tgt_rotmat1, length=.15, thickness=.01).attach_to(base)
tcp_jntidlist = [jlinstance.tgtjnts[-1], jlinstance.tgtjnts[-6]]
tcp_loc_poslist = [np.array([.03, 0, .0]), np.array([.03, 0, .0])]
tcp_loc_rotmatlist = [np.eye(3), np.eye(3)]
# tgt_pos_list = tgt_pos_list[0]
# tgt_rotmat_list = tgt_rotmat_list[0]
# tcp_jntidlist = tcp_jntidlist[0]
# tcp_loc_poslist = tcp_loc_poslist[0]
# tcp_loc_rotmatlist = tcp_loc_rotmatlist[0]
tic = time.time()
jnt_values = jlinstance.ik(tgt_pos_list,
tgt_rotmat_list,
seed_jnt_values=None,
tcp_jntid=tcp_jntidlist,
tcp_loc_pos=tcp_loc_poslist,
tcp_loc_rotmat=tcp_loc_rotmatlist,
local_minima="accept",
toggle_debug=True)
toc = time.time()
print('ik cost: ', toc - tic, jnt_values)
jlinstance.fk(jnt_values=jnt_values)
jlinstance.gen_stickmodel(tcp_jntid=tcp_jntidlist,
tcp_loc_pos=tcp_loc_poslist,
tcp_loc_rotmat=tcp_loc_rotmatlist,
toggle_jntscs=True).attach_to(base)
jlinstance2 = jlinstance.copy()
jlinstance2.fix_to(pos=np.array([1, 1, 0]), rotmat=rm.rotmat_from_axangle([0, 0, 1], math.pi / 2))
jlinstance2.gen_stickmodel(tcp_jntid=tcp_jntidlist,
tcp_loc_pos=tcp_loc_poslist,
tcp_loc_rotmat=tcp_loc_rotmatlist,
toggle_jntscs=True).attach_to(base)
base.run()
|
11593943
|
import sys
import time
import json
import yaml
import psycopg2
from pprint import pprint
from datetime import datetime
from .helper import *
date_format = '%Y-%m-%dT%H:%M:%SZ'
class APIListener():
"""
"""
def __init__(self,
event,
query_type,
config_f,
append,
verbose,
update_interval):
self.event = event
self.query_type = query_type
self.verbose = verbose
self.stop = False
self.pause = False
self.temp_unavail = False
self.update_interval_mins = update_interval
self.update_interval_secs = update_interval * 60
# Config
with open(config_f) as fin:
config = yaml.load(fin, Loader=yaml.Loader)
self.config = config
# API keys
self.api_key = config['keys']['twitter']['api_key']
self.secret_key = config['keys']['twitter']['api_secret_key']
self.bearer_token = config['keys']['twitter']['bearer_token']
self.headers = {"Authorization": f"Bearer {self.bearer_token}"}
# JSON output
self.out_json_dir = config['output']['json']['twitter'][query_type]
self.out_json_fname = f"{self.out_json_dir}/{event}.json"
if append:
self.write_mode = 'a+'
else:
self.write_mode = 'w+'
# Database output
schema = config['output']['psql']['twitter']['schema']
tables = config['output']['psql']['twitter']['tables']
self.tables = dict()
self.tables['tweets'] = f"{schema}.{tables['tweets']}"
self.tables['users'] = f"{schema}.{tables['users']}"
self.tables['media'] = f"{schema}.{tables['media']}"
self.tables['places'] = f"{schema}.{tables['places']}"
# Database connection
self.conn = psycopg2.connect(host=config['psql']['host'],
port=config['psql']['port'],
user=config['psql']['user'],
database=config['psql']['database'],
password=config['psql']['password'])
self.conn.autocommit = True
self.cur = self.conn.cursor()
self.cur.execute("SET TIME ZONE 'UTC';")
# Fields
request_fields = config['request_fields']['twitter']
self.request_fields = {"tweet.fields": ",".join(request_fields['tweets']),
"user.fields": ",".join(request_fields['users']),
"media.fields": ",".join(request_fields['media']),
"place.fields": ",".join(request_fields['places']),
"expansions": ",".join(config['expansions'])}
# Insert commmands
self.templates = dict()
self.insert_cmds = dict()
self.insert_fields = dict()
for insert_type in ['tweets', 'users', 'media', 'places']:
try:
update_fields = config['update_fields']['twitter'][insert_type]
except KeyError:
update_fields = None
insert_fields = set(config['insert_fields']['twitter'][insert_type].keys())
self.insert_fields[insert_type] = insert_fields
table = self.tables[insert_type]
update_cmd = get_update_cmd(update_fields, self.query_type, insert_type)
insert_cmd,template = get_insert_cmd(insert_fields, table, update_cmd)
self.insert_cmds[insert_type] = insert_cmd
self.templates[insert_type] = template
if insert_type == 'tweets':
ref_insert_cmd,_ = get_insert_cmd(insert_fields, table)
self.templates['ref'] = template
self.insert_cmds['ref'] = ref_insert_cmd
self.insert_fields['ref'] = insert_fields
# Params of request
self.params = dict()
# Tweet counters
now = time.time()
self.n_tweets_total = 0
self.n_tweets_last_15mins = 0
self.n_tweets_since_update = 0
self.prev_15min_time_mark = now
self.prev_update_time_mark = now
self.rate_limit = None
self.n_calls_last_15mins = None
if self.verbose:
now = datetime.now().strftime("%Y-%m-%d %I:%M%p")
print(f"Starting listener at {now}")
print(f"Event: {self.event}")
def exit_handler(self, signum, frame):
"""
Helper function for handling CTRL+C exit, used with signal.SIGINT
"""
self.stop = True
if self.verbose:
print('\nStopping...')
def check_response_exception(self, response):
"""
Checks to see if the status code returned by a response is valid. If
not, then it raises an exception with a given message
Parameters
----------
response: obj
A response object from a request made via the requests library
"""
if not response.ok:
if response.status_code == 503:
self.temp_unavail = True
if self.verbose:
print('\nAPI service is temporarily unavailable')
elif response.status_code == 429:
self.pause = True
if self.verbose:
print('\nAPI says you are over the rate limit')
else:
status_text = response.text
status_code = response.status_code
raise Exception(f"Error in search (HTTP {status_code}): {status_text}")
def check_rate_limit(self):
"""
Checks if the query has exceeded the rate limit or the service is
temporarily unavailable. If so, pauses search. Also handles printing
out regular updates
"""
update_reset = False
unavail_reset = False
rate_limit_reset = False
# Check if listener needs to pause or print update
now = time.time()
secs_since_prev_15mins = now - self.prev_15min_time_mark
secs_since_last_update = now - self.prev_update_time_mark
if (self.n_calls_last_15mins >= self.rate_limit) or self.pause:
n_sleep_secs = 900 - secs_since_prev_15mins + 15 # add a little extra
if self.verbose:
print('Stopping for {} mins'.format(round(n_sleep_secs/60)))
print(f"Previous 15 min mark: {self.prev_15min_time_mark}")
print(f"Seconds since last 15 min mark: {secs_since_prev_15mins}")
print(f"Calls since last 15 min mark: {self.n_calls_last_15mins}")
time.sleep(n_sleep_secs)
rate_limit_reset = True
update_reset = True
elif 900 - secs_since_prev_15mins < 0:
rate_limit_reset = True
update_reset = True
elif self.temp_unavail:
if self.verbose:
print('Stopping for 30 seconds')
time.sleep(30)
unavail_reset = True
elif secs_since_last_update > self.update_interval_secs:
update_reset = True
# Reset status to keep listener running, and print update
if rate_limit_reset or update_reset:
if rate_limit_reset:
n_tweets = self.n_tweets_last_15mins
n_mins = 15
self.n_calls_last_15mins = 0
self.n_tweets_last_15mins = 0
self.prev_15min_time_mark = now
self.pause = False
if update_reset:
if update_reset and not rate_limit_reset:
n_tweets = self.n_tweets_since_update
n_mins = round(secs_since_last_update/60)
self.n_tweets_since_update = 0
self.prev_update_time_mark = now
if self.verbose:
self.print_update(n_tweets, n_mins)
if unavail_reset:
self.temp_unavail = False
def limit_rate(self):
"""
Makes listener sleep based on how much time is left in the 15 minute
interval and how many calls have been made
Time to sleep = # secs remaining / # calls remaining to be made
"""
now = time.time()
secs_since_prev_15mins = now - self.prev_15min_time_mark
n_secs_remaining = 900 - secs_since_prev_15mins
n_calls_remaining = self.rate_limit - self.n_calls_last_15mins
if n_secs_remaining > 0 and n_calls_remaining > 0:
n_sleep_secs = n_secs_remaining / n_calls_remaining
if self.query_type != 'stream':
# Full archive search has minimum 1 request / sec limit too
n_sleep_secs = max(1, n_sleep_secs)
time.sleep(n_sleep_secs)
elif n_secs_remaining > 0 and n_calls_remaining <= 0:
self.pause = True
def manage_writing(self, response_json):
"""
Coordinates the writing of data, namely handling exceptions and updating
the count of data returned from the API
Parameters
----------
response_json: dict
JSON from an API response produced via the response library
"""
try:
if self.query_type == 'stream':
tweets = [response_json['data']]
else:
tweets = response_json['data']
includes = response_json['includes']
self.write(tweets, includes)
self.n_tweets_total += len(tweets)
self.n_tweets_since_update += len(tweets)
self.n_tweets_last_15mins += len(tweets)
except KeyError as err:
if 'meta' in response_json and 'result_count' in response_json['meta']:
if response_json['meta']['result_count'] == 0:
pass
else:
pprint(response_json)
raise err
else:
pprint(response_json)
raise err
def write(self, tweets, includes):
"""
Writes data to a PostgreSQL database and a newline-delimited JSON file.
All raw data is written to the JSON file. Insertion data is retrieved
for all tweets, referenced tweets, users, media, and places and inserted
into the database. The insertion subsets to the fields specified by the
config file (`insert_fields.platform`)
Parameters
-----------
tweets: list of dicts
List of dictionary tweet objects. The return of the `data` field
from the API. Note: for the stream, we manually wrap it in a list
because only a single tweet is returned
includes: dict of dicts
Dictionary of different referenced objects that were included. The
return of the `includes` field from the API
"""
all_inserts = get_all_inserts(tweets, includes, self.event, self.query_type)
# Write to database
insert_types = ['tweets', 'ref', 'users', 'media', 'places']
for insert_type,inserts in zip(insert_types, all_inserts):
# Insert
template = self.templates[insert_type]
insert_cmd = self.insert_cmds[insert_type]
try:
psycopg2.extras.execute_values(self.cur,
sql=insert_cmd,
argslist=inserts,
template=template)
except Exception as e:
print(f"Failed insert: {insert_type}\n")
pprint(inserts)
print()
print(f"Insert command\n{insert_cmd}\n")
print(f"Template\n{template}\n")
print(f"{insert_type}\n")
print()
raise e
# Write to JSON
for tweet in tweets:
out_str = json.dumps(tweet)
self.out_json_f.write(f"{out_str}\n")
def print_update(self, n_tweets, n_mins):
"""
Prints out the number of tweets that have been retrieved from the API
since the last update
Note: this undercounts the number of tweets that are inserted because
it does not count referenced tweets that have been inserted
Parameters
----------
n_tweets: int
The number of tweets since the last update
n_mins:
The number of minutes since the last update
"""
print('\n\t'+datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
out_str = f"\t{n_tweets:,} tweets in the last {n_mins} mins"
out_str = f"{out_str} | {self.n_tweets_total:,} tweets total"
print(out_str)
|
11594023
|
from torch.optim.lr_scheduler import _LRScheduler
class WarmupScheduler(_LRScheduler):
def __init__(self, optimizer, iters, last_epoch=-1):
self.iters = iters
super(WarmupScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.iters > 0:
lr = [base_lr * self.last_epoch / self.iters for base_lr in self.base_lrs]
else:
lr = self.base_lrs
return lr
|
11594042
|
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
from random import randint, choice
import kivent_core
from kivent_core.gameworld import GameWorld
from kivent_core.systems.position_systems import PositionSystem2D
from kivent_core.systems.renderers import Renderer
from kivent_core.managers.resource_managers import texture_manager
from kivy.properties import StringProperty
from os.path import dirname, join, abspath
texture_manager.load_atlas(join(dirname(dirname(abspath(__file__))), 'assets',
'background_objects.atlas'))
class TestGame(Widget):
def on_kv_post(self, *args):
self.gameworld.init_gameworld(
['renderer', 'position'],
callback=self.init_game)
def init_game(self):
self.setup_states()
self.set_state()
self.load_models()
self.draw_some_stuff()
def load_models(self):
model_manager = self.gameworld.model_manager
model_manager.load_textured_rectangle('vertex_format_4f', 7., 7.,
'star1', 'star1-4')
model_manager.load_textured_rectangle('vertex_format_4f', 10., 10.,
'star1', 'star1-4-2')
def draw_some_stuff(self):
init_entity = self.gameworld.init_entity
for x in range(2000):
pos = randint(0, Window.width), randint(0, Window.height)
model_key = choice(['star1-4', 'star1-4-2'])
create_dict = {
'position': pos,
'renderer': {'texture': 'star1',
'model_key': model_key},
}
ent = init_entity(create_dict, ['position', 'renderer'])
#If you do not set Renderer.force_update to True, call update_trigger
#self.ids.renderer.update_trigger()
def setup_states(self):
self.gameworld.add_state(state_name='main',
systems_added=['renderer'],
systems_removed=[], systems_paused=[],
systems_unpaused=['renderer'],
screenmanager_screen='main')
def set_state(self):
self.gameworld.state = 'main'
class DebugPanel(Widget):
fps = StringProperty(None)
def __init__(self, **kwargs):
super(DebugPanel, self).__init__(**kwargs)
Clock.schedule_once(self.update_fps)
def update_fps(self,dt):
self.fps = str(int(Clock.get_fps()))
Clock.schedule_once(self.update_fps, .05)
class YourAppNameApp(App):
def build(self):
Window.clearcolor = (0, 0, 0, 1.)
if __name__ == '__main__':
YourAppNameApp().run()
|
11594053
|
from flask import Blueprint, jsonify, request, redirect, url_for
from critiquebrainz.db import users as db_users
from critiquebrainz.db.user import User
from critiquebrainz.decorators import crossdomain
from critiquebrainz.ws.exceptions import NotFound
from critiquebrainz.ws.oauth import oauth
from critiquebrainz.ws.parser import Parser
user_bp = Blueprint('ws_user', __name__)
@user_bp.route('/me', methods=["GET", "OPTIONS"])
@crossdomain(headers="Authorization, Content-Type")
@oauth.require_auth()
def user_me_handler(user):
"""Get your profile information.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/user/me" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"user": {
"display_name": "your_display_name",
"created": "Fri, 02 Dec 2016 19:02:47 GMT",
"show_gravatar": true,
"user_type": "Noob",
"email": "your_email_id",
"karma": 0,
"musicbrainz_username": "username/id associated with musicbrainz",
"id": "your-unique-user-id",
"avatar": "https://gravatar.com/your-gravatar-link"
}
}
:query inc: includes
:resheader Content-Type: *application/json*
"""
inc = Parser.list('uri', 'inc', User.allowed_includes, optional=True) or []
return jsonify(user=user.to_dict(inc, confidential=True))
@user_bp.route('/me/reviews')
@oauth.require_auth()
@crossdomain(headers="Authorization, Content-Type")
def user_reviews_handler(user):
"""Get your reviews.
:resheader Content-Type: *application/json*
"""
return redirect(url_for('review.list', user_id=user.id, **request.args))
@user_bp.route('/me/applications')
@oauth.require_auth()
@crossdomain(headers="Authorization, Content-Type")
def user_applications_handler(user):
"""Get your applications.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/user/me/applications" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"applications": [
{
"website": "https://your-website.com",
"user_id": "your-unique-user-id",
"name": "<NAME> your Application",
"redirect_uri": "https://your-call-back.com/uri",
"client_id": "your Oauth client ID",
"client_secret": "your super-secret Oauth client secret",
"desc": "Application description set by you."
}
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(applications=[c.to_dict() for c in user.clients])
@user_bp.route('/me/tokens')
@oauth.require_auth()
@crossdomain(headers="Authorization, Content-Type")
def user_tokens_handler(user):
"""Get your OAuth tokens.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/user/me/tokens" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"tokens": [
{
"scopes": "user",
"client": {
"website": "https://your-website.com",
"user_id": "your-unique-user-id",
"name": "<NAME> your Application",
"redirect_uri": "https://your-call-back.com/uri",
"client_id": "your Oauth client ID",
"client_secret": "your super-secret Oauth client secret",
"desc": "Application description set by you."
},
"refresh_token": "refresh token generated for your Oauth authorization code"
}
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(tokens=[t.to_dict() for t in user.tokens])
# don't need to add OPTIONS here because its already added
# for this endpoint in user_me_handler
@user_bp.route('/me', methods=['POST'])
@oauth.require_auth('user')
@crossdomain(headers="Authorization, Content-Type")
def user_modify_handler(user):
"""Modify your profile.
**OAuth scope:** user
:reqheader Content-Type: *application/json*
:json string display_name: Display name **(optional)**
:json string email: Email address **(optional)**
:json boolean show_gravatar: Show gravatar **(optional)**
:resheader Content-Type: *application/json*
"""
def fetch_params():
display_name = Parser.string('json', 'display_name', optional=True)
email = Parser.email('json', 'email', optional=True)
show_gravatar = Parser.bool('json', 'show_gravatar', optional=True)
return display_name, email, show_gravatar
display_name, email, show_gravatar = fetch_params()
user.update(display_name, email, show_gravatar)
return jsonify(message='Request processed successfully')
# don't need to add OPTIONS here because its already added
# for this endpoint in user_me_handler
@user_bp.route('/me', methods=['DELETE'])
@oauth.require_auth('user')
@crossdomain(headers="Authorization, Content-Type")
def user_delete_handler(user):
"""Delete your profile.
**OAuth scope:** user
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/user/me" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:resheader Content-Type: *application/json*
"""
user.delete()
return jsonify(message='Request processed successfully')
@user_bp.route('/<uuid:user_id>', methods=['GET', 'OPTIONS'])
@crossdomain(headers="Authorization, Content-Type")
def user_entity_handler(user_id):
"""Get profile of a user with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/user/ae5a003f-292c-497e-afbd-8076e9626f2e \\
-X GET
**Response Example:**
.. code-block:: json
{
"user": {
"created": "Wed, 07 May 2014 14:47:03 GMT",
"display_name": "User's Name comes here",
"id": "ae5a003f-292c-497e-afbd-8076e9626f2e",
"karma": 0,
"user_type": "Noob"
}
}
:resheader Content-Type: *application/json*
"""
user = db_users.get_by_id(str(user_id))
if not user:
raise NotFound("Can't find a user with ID: {user_id}".format(user_id=user_id))
inc = Parser.list('uri', 'inc', User.allowed_includes, optional=True) or []
return jsonify(user=User(user).to_dict(inc))
@user_bp.route('/', methods=['GET', 'OPTIONS'])
@crossdomain(headers="Authorization, Content-Type")
def review_list_handler():
"""Get list of users.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/user/?offset=10&limit=3" \\
-X GET
**Response Example:**
.. code-block:: json
{
"count": 925,
"limit": 3,
"offset": 10,
"users": [
{
"created": "Wed, 07 May 2014 14:46:58 GMT",
"display_name": "Display Name",
"id": "b291a99b-7bb0-4531-ba45-f6cfb4d944de",
"karma": 0,
"user_type": "Noob"
},
{
"created": "Wed, 07 May 2014 14:46:59 GMT",
"display_name": "<NAME>",
"id": "a52e1629-a516-43c2-855f-bb195aeb2a33",
"karma": 3,
"user_type": "Noob"
},
{
"created": "Wed, 07 May 2014 14:47:00 GMT",
"display_name": "<NAME>",
"id": "1fb36917-d4d3-411b-82c4-901d949e17b8",
"karma": 0,
"user_type": "Noob"
}
]
}
:query limit: results limit, min is 0, max is 50, default is 50 **(optional)**
:query offset: result offset, default is 0 **(optional)**
:resheader Content-Type: *application/json*
"""
def fetch_params():
limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50
offset = Parser.int('uri', 'offset', optional=True) or 0
return limit, offset
limit, offset = fetch_params()
users = db_users.list_users(limit, offset)
users = [User(user) for user in users]
return jsonify(limit=limit, offset=offset, count=len(users),
users=[p.to_dict() for p in users])
|
11594056
|
from bge import logic
from link_scripts.PlayerConstants import PlayerState
from link_scripts.states.Attack import start_basicSwordAttack1State
from link_scripts.states.Ledge import start_climbLedgeState
from link_scripts.states.Door import start_openDoorState
from link_scripts.states.Hits import start_hitState
from link_scripts.states.Interaction import start_interactionState
from link_scripts.states.PickThrow import start_pickObjectState, start_throwObjectState
from link_scripts.states.Push import start_waitPushState
from link_scripts.states.Chest import start_openChestState
from link_scripts.StarterState import start_firstLookView, start_ladderState
JUST_ACTIVATED = logic.KX_INPUT_JUST_ACTIVATED
def idleState(self):
# stop movement
self.stopMovement()
# play wait animation
if (self.heartContainer.isLow):
self.rig.playHeavyWait()
else:
self.rig.playWait()
# get forward force
forward_force = self.getForwardForce()
# If use object
if (self.objectManager.useObject()):
return
# If detect enemy damage
if (self.tester.detectEnemyDamage()):
start_hitState(self)
return
# If statetime is finish can do action
if (self.playStateTime(1.0)):
# If detect chest
if ( self.tester.detectChest() ):
if ( self.gamepad.isActionPressed() ):
start_openChestState(self)
# If detect bloc
if (self.tester.detectBloc()):
if (self.gamepad.isActionPressed()):
start_waitPushState(self)
return
# If detect ledge ground from ground
if (self.tester.detectLedgeGroundFromGround()):
if (self.gamepad.isActionPressed()):
# go to climb
start_climbLedgeState(self)
return
# If detect pckable object
if ( self.tester.detectObjectToPickUp() and self.pickManager.active == False ):
if (self.gamepad.isActionPressed() ):
start_pickObjectState(self)
return
# test if detect placard
if (self.tester.detectInteractivePlacard()):
if ( self.gamepad.isActionPressed(JUST_ACTIVATED) ):
start_interactionState(self)
return
if (self.pickManager.active == True):
# if action pressed
if ( self.gamepad.isAttackPressed(JUST_ACTIVATED) ):
start_throwObjectState(self)
return
# test if can target a object
if ( self.targetManager.zTarget() ):#canTargetObject()
self.switchState(PlayerState.IDLE_TARGET_STATE)
return
# if move go to walk animation
if (forward_force != 0):
if (self.targetManager.active):
self.switchState(PlayerState.STRAFE_STATE)
else:
self.switchState(PlayerState.WALK_STATE)
# other action
else:
# if detect key for look
if ( self.gamepad.isLookPressed() ):
# go to look state
start_firstLookView(self)
# detect ladder
elif (self.tester.detectLadder()):
# change hud action text
#self.playerHUD().changeActionText('Monter')
if ( self.gamepad.isActionPressed() ):
# go to ladder state
start_ladderState(self)
# detect a door
elif (self.tester.detectDoor()):
if ( self.gamepad.isActionPressed() ):
# g oto ladder state
start_openDoorState(self)
# go to get armed
elif (self.gamepad.isAttackPressed() and self.fightManager.canUseSword()):
if (self.fightManager.isUnsheated() ):
start_basicSwordAttack1State(self)
else:
self.unsheat(True)
# range sword an shield
if ( self.fightManager.isUnsheated() ):
if ( self.gamepad.isActionPressed() ):
self.unsheat(False)
|
11594076
|
import os
import shutil
import datetime
import yaml
import pyccl
import cosmosis
import sacc
from ._version import __version__
def write_metadata(analysis_id, output_dir, config_file):
"""Write run metadata to an output path.
Parameters
----------
analysis_id : str
A unique id for this analysis.
output_dir : str
The directory in which to write metadata
config_file : str
The path to the config file.
"""
metadata = {
'analysis_id': analysis_id,
'timestamp': datetime.datetime.utcnow().isoformat(),
'firecrown_version': __version__,
'pyccl_version': pyccl.__version__,
'cosmosis-standalone_version': cosmosis.__version__,
'sacc_version': sacc.__version__,
}
# Copy configuration file into output
shutil.copyfile(config_file, os.path.join(output_dir, 'config.yaml'))
# Save any metadata
with open(os.path.join(output_dir, 'metadata.yaml'), 'w') as fp:
yaml.dump(metadata, fp, default_flow_style=False)
|
11594081
|
from datetime import datetime, timezone
import uuid
import humanize
import requests
import spdx
from modelforge.environment import collect_environment_without_packages
LICENSES = {l["id"]: l for l in spdx.licenses()}
LICENSES["Proprietary"] = {"sources": [""]}
def check_license(license: str):
"""
Ensure that the license identifier is SPDX-compliant (or is "Proprietary").
:param license: License identifier.
:return: None
"""
if license not in LICENSES:
raise ValueError("license must be an SPDX-compliant identifier or \"Proprietary\"")
def generate_new_meta(name: str, description: str, vendor: str, license: str) -> dict:
"""
Create the metadata tree for the given model name and the list of dependencies.
:param name: Name of the model.
:param description: Description of the model.
:param vendor: Name of the party which is responsible for support of the model.
:param license: License identifier.
:return: dict with the metadata.
"""
check_license(license)
return {
"code": None,
"created_at": get_datetime_now(),
"datasets": [],
"dependencies": [],
"description": description,
"vendor": vendor,
"environment": collect_environment_without_packages(),
"extra": None,
"license": license,
"metrics": {},
"model": name,
"parent": None,
"references": [],
"series": None,
"tags": [],
"uuid": str(uuid.uuid4()),
"version": [1, 0, 0],
}
def get_datetime_now() -> datetime:
"""
Return the current UTC date and time.
"""
return datetime.now(timezone.utc)
def format_datetime(dt: datetime) -> str:
"""
Format a datetime object as string.
:param dt: Date and time to format.
:return: String representation.
"""
return dt.strftime("%Y-%m-%d %H:%M:%S%z")
def extract_model_meta(base_meta: dict, extra_meta: dict, model_url: str) -> dict:
"""
Merge the metadata from the backend and the extra metadata into a dict which is suitable for \
`index.json`.
:param base_meta: tree["meta"] :class:`dict` containing data from the backend.
:param extra_meta: dict containing data from the user, similar to `meta.json`.
:param model_url: public URL of the model.
:return: converted dict.
"""
meta = {"default": {"default": base_meta["uuid"],
"description": base_meta["description"],
"code": extra_meta["code"]}}
del base_meta["model"]
del base_meta["uuid"]
meta["model"] = base_meta
meta["model"].update({k: extra_meta[k] for k in ("code", "datasets", "references", "tags",
"extra")})
response = requests.get(model_url, stream=True)
meta["model"]["size"] = humanize.naturalsize(int(response.headers["content-length"]))
meta["model"]["url"] = model_url
meta["model"]["created_at"] = format_datetime(meta["model"]["created_at"])
return meta
|
11594087
|
import torch
import argparse
import cv2
import numpy as np
import os
from skimage import transform as trans
import torch.multiprocessing as mp
def parse_args():
parser = argparse.ArgumentParser(
description="fake add headband img generation tool")
parser.add_argument('--input', dest='input_file',
help='path of the input image list', type=str,
required=True)
parser.add_argument('--output_dir', dest='out_dir',
help='dir of saved fake_glass img', type=str,
required=True)
parser.add_argument('--key_point_list', dest='key_point_list',
help='key point list', type=str,
required=True)
parsed_args = parser.parse_args()
return parsed_args
###
mean = np.asarray([0.485, 0.456, 0.406])
std = np.asarray([0.229, 0.224, 0.225])
""" if cuda is available use gpu
"""
if torch.cuda.is_available():
def map_location(storage, loc): return storage.cuda()
else:
map_location = 'cpu'
def get_face(detector, img_queue, box_queue):
"""
Get face from image queue. This function is used for multiprocessing
"""
while True:
image = img_queue.get()
box = detector.extract_cnn_facebox(image)
box_queue.put(box)
def create_path(path):
"""
create not exists dir
"""
if not os.path.exists(path):
os.makedirs(path)
def crop_transform68(rimg, landmark, image_size, src):
"""
crop headband image with landmark
"""
assert landmark.shape[0] == 68 or landmark.shape[0] == 5
assert landmark.shape[1] == 2
tform = trans.SimilarityTransform()
tform.estimate(landmark, src)
M = tform.params[0:2, :]
img = cv2.warpAffine(
rimg, M, (image_size[1], image_size[0]), borderValue=0.0)
return img
def add_headband(image_path_list, headband_mats,
headband_landmarks, out_dir, landmark_list):
"""
add headband with image
"""
image_size = [256, 256]
for jj, imgname in enumerate(image_path_list):
imgname_dir = imgname.split('/')[-2]
file_dir = os.path.join(out_dir, imgname_dir)
create_path(file_dir)
img = cv2.imread(imgname)
landmark_index = landmark_list[jj, :]
landmark = landmark_index.reshape(-1, 2)
src_landmark = landmark
#num of masks
rn = 8
mat_headband = headband_mats[rn]
landmark_headband = headband_landmarks[rn]
mat_headband = crop_transform68(
mat_headband,
landmark_headband,
image_size,
src_landmark)
gray_headband = cv2.cvtColor(mat_headband, cv2.COLOR_BGR2GRAY)
ret, headband_mask = cv2.threshold(
gray_headband, 230, 255, 1) # cv2.THRESH_BINARY)
img1_bg = cv2.bitwise_and(
img.copy(),
img.copy(),
mask=cv2.bitwise_not(headband_mask))
img2_fg = cv2.bitwise_and(
mat_headband,
mat_headband,
mask=(headband_mask))
img = cv2.add(img1_bg, img2_fg)
cv2.imwrite(os.path.join(file_dir, os.path.basename(imgname)), img)
def add_headhand_worker(img_paths, shards, i, out_dir):
"""
process every list
"""
headband_mats = []
headband_landmarks = []
with open('./headband_list', 'r')as f:
for pic in f:
pic = pic.strip('\n')
pic_path = os.path.join('./headband', pic)
pts_path = os.path.join('./headband_test_pts', pic)
with open(pts_path, 'r')as f:
landmark = np.loadtxt(f)
headband_mat = cv2.imread(pic_path)
headband_landmarks.append(landmark)
headband_mats.append(headband_mat)
begin = shards[i]
end = shards[i + 1]
add_headband(img_paths[begin: end],
headband_mats,
headband_landmarks,
out_dir,
landmark_total[begin: end,
:])
def nice_shards(total_num, n):
"""
split list
"""
size = total_num // n + 1
shards = [0]
for i in range(n):
shards.append(min(total_num, shards[i] + size))
return shards
def add_headband_main():
"""
add headband main with multi process
"""
args = parse_args()
input_file = args.input_file
out_dir = args.out_dir
key_point_list = args.key_point_list
img_paths = []
with open(input_file, 'r')as f:
for line in f:
if '\t' in line:
line = line.strip('\n').split('\t')[0]
elif ' ' in line:
line = line.strip('\n').split(' ')[0]
else:
line = line.strip('\n')
img_paths.append(line)
print('total process pic is {}'.format(len(img_paths)))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
"""
num of process
"""
p_num = 5
shards = nice_shards(len(img_paths), p_num)
global landmark_total
landmark_total = np.loadtxt(key_point_list)
results = []
for i in range(p_num):
p = mp.Process(
target=add_headhand_worker, args=(
img_paths, shards, i, out_dir,))
p.start()
results.append(p)
for p in results:
p.join()
print("All worker done")
if __name__ == '__main__':
""" start process"""
add_headband_main()
|
11594111
|
import os
import sys
from abc import abstractproperty
from functools import lru_cache
from importlib import import_module
from detect_secrets.plugins.base import BasePlugin
from detect_secrets.util import get_root_directory
@lru_cache(maxsize=1)
def get_mapping_from_secret_type_to_class_name(plugin_filenames=None):
"""Returns secret_type => plugin classname"""
return {
plugin.secret_type: name
for name, plugin in import_plugins(plugin_filenames=plugin_filenames).items()
}
@lru_cache(maxsize=1)
def import_plugins(plugin_filenames=None):
"""
:type plugin_filenames: tuple
:param plugin_filenames: the plugin filenames.
:rtype: Dict[str, Type[TypeVar('Plugin', bound=BasePlugin)]]
"""
modules = []
for root, _, files in os.walk(
os.path.join(get_root_directory(), 'detect_secrets/plugins'),
):
for filename in files:
if not filename.startswith('_'):
modules.append(os.path.splitext(filename)[0])
# Only want to import top level files
break
plugins = {}
for module_name in modules:
# If plugin_filenames is None, all of the plugins will get imported.
# Normal runs of this will have plugin_filenames set.
# plugin_filenames will be None if we are testing a method and don't pass it in.
if plugin_filenames is None or module_name in plugin_filenames:
try:
module = import_module('detect_secrets.plugins.{}'.format(module_name))
except ModuleNotFoundError as err: # pragma: no cover
if hasattr(err, 'msg'):
message = err.msg
if "No module named 'ibm_db'" not in message:
raise err
else:
yellow = '\033[93m'
end_yellow = '\033[0m'
print(
yellow,
'Warning: ibm_db is not installed, the DB2 plugin will not run.\n',
'To enable the optional DB2 plugin, install ibm_db with:\n',
'pip install ibm_db\n',
'and run detect secrets with',
'detect-secrets scan --update .secrets.baseline --db2-scan',
end_yellow,
file=sys.stderr,
flush=True,
)
continue
for name in filter(lambda x: not x.startswith('_'), dir(module)):
plugin = getattr(module, name)
try:
if not issubclass(plugin, BasePlugin):
continue
except TypeError:
# Occurs when plugin is not a class type.
continue
# Use this as a heuristic to determine abstract classes
if isinstance(plugin.secret_type, abstractproperty):
continue
plugins[name] = plugin
return plugins
|
11594113
|
import sublime, sublime_plugin
from ..libs import util
class JavascriptEnhancementsSurroundWithCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
selections = view.sel()
case = args.get("case")
if case == "if_else_statement" :
if len(selections) != 2 :
return
sel_1 = util.trim_region(view, selections[0])
prev_line_is_empty = util.prev_line_is_empty(view, sel_1)
space_1 = util.get_whitespace_from_line_begin(view, sel_1)
space_before = (space_1 + "\n" + space_1 if not prev_line_is_empty else "")
new_text = util.replace_with_tab(view, sel_1, space_before + "if (bool) {\n" + space_1, "\n" + space_1 + "} ")
view.replace(edit, sel_1, new_text)
sel_2 = util.trim_region(view, selections[1])
next_line_is_empty = util.next_line_is_empty(view, sel_2)
space_2 = util.get_whitespace_from_line_begin(view, sel_2)
space_after = ("\n" + space_2 if not next_line_is_empty else "")
new_text = util.replace_with_tab(view, sel_2, " else {\n" + space_2, "\n" + space_2 + "}" + space_after)
view.replace(edit, sel_2, new_text)
new_selection = sublime.Region(sel_1.begin() + len(space_before+"if ("), sel_1.begin() + len(space_before+"if (bool"))
view.sel().clear()
view.sel().add(new_selection)
else :
for selection in selections :
selection = util.trim_region(view, selection)
if view.substr(selection).strip() == "" :
continue
prev_line_is_empty = util.prev_line_is_empty(view, selection)
next_line_is_empty = util.next_line_is_empty(view, selection)
space = util.get_whitespace_from_line_begin(view, selection)
space_before = (space + "\n" + space if not prev_line_is_empty else "")
space_after = ("\n" + space if not next_line_is_empty else "")
new_text = ""
new_selection = None
if case == "if_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"if (bool) {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"if ("), selection.begin() + len(space_before+"if (bool"))
elif case == "while_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"while (bool) {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"while ("), selection.begin() + len(space_before+"while (bool"))
elif case == "do_while_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"do {\n"+space, "\n"+space+"} while (bool)" + space_after)
new_selection = sublime.Region(selection.begin() + len(new_text) - len("ool)"), selection.begin() + len(new_text))
elif case == "for_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"for ( ; bool ; ) {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"for ( ; "), selection.begin() + len(space_before+"for ( ; bool"))
elif case == "try_catch_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"try {\n"+space, "\n"+space+"} catch (e) {\n"+space+"\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(new_text) - len(") {\n"+space+"\n"+space+"}" + space_after), selection.begin() + len(new_text) - len(" {\n"+space+"\n"+space+"}" + space_after))
elif case == "try_finally_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"try {\n"+space, "\n"+space+"} finally {\n"+space+"\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"try {"), selection.begin() + len(space_before+"try {"))
elif case == "try_catch_finally_statement" :
new_text = util.replace_with_tab(view, selection, space_before+"try {\n"+space, "\n"+space+"} catch (e) {\n"+space+"\n"+space+"} finally {\n"+space+"\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(new_text) - len(") {\n"+space+"\n"+space+"} finally {\n"+space+"\n"+space+"}" + space_after + space_after), selection.begin() + len(new_text) - len(" {\n"+space+"\n"+space+"} finally {\n"+space+"\n"+space+"}" + space_after + space_after))
elif case == "function" :
new_text = util.replace_with_tab(view, selection, space_before+"function func_name () {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"function "), selection.begin() + len(space_before+"function func_name"))
elif case == "anonymous_function" :
new_text = util.replace_with_tab(view, selection, space_before+"function () {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"function () {"), selection.begin() + len(space_before+"function () {"))
elif case == "arrow_function" :
new_text = util.replace_with_tab(view, selection, space_before+"() => {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"() => {"), selection.begin() + len(space_before+"() => {"))
elif case == "async_function" :
new_text = util.replace_with_tab(view, selection, space_before+"async function func_name () {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"async function "), selection.begin() + len(space_before+"async function func_name"))
elif case == "iife_function" :
new_text = util.replace_with_tab(view, selection, space_before+"(function () {\n"+space, "\n"+space+"})()" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"(function () {"), selection.begin() + len(space_before+"(function () {"))
elif case == "generator_function" :
new_text = util.replace_with_tab(view, selection, space_before+"function* func_name () {\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"function* "), selection.begin() + len(space_before+"function* func_name"))
elif case == "block" :
new_text = util.replace_with_tab(view, selection, space_before+"{\n"+space, "\n"+space+"}" + space_after)
new_selection = sublime.Region(selection.begin() + len(space_before+"{"), selection.begin() + len(space_before+"{"))
view.erase(edit, selection)
view.insert(edit, selection.begin(), new_text)
view.sel().clear()
view.sel().add(new_selection)
def is_enabled(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selections = view.sel()
for selection in selections :
if view.substr(selection).strip() != "" :
return True
return False
def is_visible(self, **args) :
view = self.view
if not util.selection_in_js_scope(view) :
return False
selections = view.sel()
for selection in selections :
if view.substr(selection).strip() != "" :
return True
return False
|
11594128
|
from model import SampleRNN, Predictor, Generator
import torch
import re
import sys
import numpy as np
import argparse
from librosa.output import write_wav
import os
from interpolate import interpolation
import random
default_params = {
# model parameters
'n_rnn': 1,
'dim': 1024,
'learn_h0': True,
'ulaw': True,
'q_levels': 256,
'weight_norm': False,
'seq_len': 1040,
'batch_size': 128,
'look_ahead': False,
'qrnn': False,
'val_frac': 0.1,
'test_frac': 0.1,
'cond_dim': 43, # Conditioners of size 43 = 40 MFCC + 1 LF0 + 1FV + 1 U/V
'norm_ind': False, # If true, normalization is done independent by speaker. If false, normalization is joint
'static_spk': False, # If true, training is only done with one speaker
# training parameters
'sample_rate': 16000,
'n_samples': 1,
'sample_length': 80000,
'seed': 77977,
'cond': 0,
# generator parameters
'datasets_path': '/veu/tfgveu7/project/tcstar/',
'cond_set': 'cond/'
}
def init_random_seed(seed, cuda):
print('Seed:', seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
def as_type(var, target_type):
case = str(target_type).split('\'')[1].split('\'')[0]
if case == 'bool':
return var[0] == 'T'
elif case == 'int':
return int(var)
elif case == 'float':
return float(var)
elif case == 'list':
return list(map(int, var.split(',')))
else:
return var
def load_model(checkpoint_path):
model_pattern = '.*ep{}-it{}'
checkpoint_name = os.path.basename(checkpoint_path)
match = re.match(
model_pattern.format(r'(\d+)', r'(\d+)'),
checkpoint_name
)
if match:
epoch = int(match.group(1))
iteration = int(match.group(2))
else:
epoch, iteration = (0, 0)
return torch.load(checkpoint_path), epoch, iteration
class RunGenerator:
def __init__(self, model, sample_rate, cuda, epoch, cond, spk_list, speaker,
checkpoints_path, original_name):
self.generate = Generator(model, cuda)
self.sample_rate = sample_rate
self.cuda = cuda
self.epoch = epoch
self.cond = cond
self.speaker = speaker
self.original_name = original_name
path_split = checkpoints_path.split('/')
self.filename = '/'.join(path_split[:2]) + '/samples/' + path_split[-1] + '_file-' + \
self.original_name + '_spk-' + spk_list[self.speaker] + '.wav'
print('Generating file', self.filename)
def __call__(self, n_samples, sample_length, cond, speaker):
print('Generate', n_samples, 'of length', sample_length)
samples = self.generate(n_samples, sample_length, cond, speaker).cpu().numpy()
for i in range(n_samples):
print(self.filename)
write_wav(
self.filename,
samples[i, :], sr=self.sample_rate
)
def main(frame_sizes, **params):
use_cuda = torch.cuda.is_available()
params = dict(
default_params,
frame_sizes=frame_sizes,
**params
)
# Redefine parameters listed in the experiment directory and separated with '~'
for i in params['model'].split('/')[1].split('~'):
param = i.split(':')
if param[0] in params:
params[param[0]] = as_type(param[1], type(params[param[0]]))
# Define npy file names with maximum and minimum values of de-normalized conditioners
npy_name_min_max_cond = 'npy_datasets/min_max' + params['norm_ind'] * '_ind' + (not params['norm_ind']) * '_joint' \
+ params['static_spk'] * '_static' + '.npy'
# Define npy file name with array of unique speakers in dataset
npy_name_spk_id = 'npy_datasets/spk_id.npy'
# Get file names from partition's list
file_names = open(str(params['datasets_path']) +
'generate_cond_gina.list', 'r').read().splitlines()
spk_names = open(str(params['datasets_path']) +
'generate_spk_gina.list', 'r').read().splitlines()
datasets_path = os.path.join(params['datasets_path'], params['cond_set'])
spk = np.load(npy_name_spk_id)
if len(spk_names) != len(file_names):
print('Length of speaker file do not match length of conditioner file.')
quit()
print('Generating', len(file_names), 'audio files')
for i in range(len(file_names)):
print('Generating Audio', i)
print('Generating...', file_names[i])
# Load CC conditioner
c = np.loadtxt(datasets_path + file_names[i] + '.cc')
# Load LF0 conditioner
f0file = np.loadtxt(datasets_path + file_names[i] + '.lf0')
f0, _ = interpolation(f0file, -10000000000)
f0 = f0.reshape(f0.shape[0], 1)
# Load FV conditioner
fvfile = np.loadtxt(datasets_path + file_names[i] + '.gv')
fv, uv = interpolation(fvfile, 1e3)
num_fv = fv.shape[0]
uv = uv.reshape(num_fv, 1)
fv = fv.reshape(num_fv, 1)
# Load speaker conditioner
speaker = np.where(spk == spk_names[i])[0][0]
cond = np.concatenate((c, f0), axis=1)
cond = np.concatenate((cond, fv), axis=1)
cond = np.concatenate((cond, uv), axis=1)
# Load maximum and minimum of de-normalized conditioners
min_cond = np.load(npy_name_min_max_cond)[0]
max_cond = np.load(npy_name_min_max_cond)[1]
# Normalize conditioners with absolute maximum and minimum for each speaker of training partition
if params['norm_ind']:
print('Normalizing conditioners for each speaker of training dataset')
cond = (cond - min_cond[speaker]) / (max_cond[speaker] - min_cond[speaker])
else:
print('Normalizing conditioners jointly')
cond = (cond - min_cond) / (max_cond - min_cond)
print('Shape cond', cond.shape)
if params['look_ahead']:
delayed = np.copy(cond)
delayed[:-1, :] = delayed[1:, :]
cond = np.concatenate((cond, delayed), axis=1)
print('Shape cond after look ahead', cond.shape)
print(cond.shape)
seed = params.get('seed')
init_random_seed(seed, use_cuda)
spk_dim = len([i for i in os.listdir(os.path.join(params['datasets_path'], params['cond_set']))
if os.path.islink(os.path.join(params['datasets_path'], params['cond_set']) + '/' + i)])
print('Start Generate SampleRNN')
model = SampleRNN(
frame_sizes=params['frame_sizes'],
n_rnn=params['n_rnn'],
dim=params['dim'],
learn_h0=params['learn_h0'],
q_levels=params['q_levels'],
ulaw=params['ulaw'],
weight_norm=params['weight_norm'],
cond_dim=params['cond_dim']*(1+params['look_ahead']),
spk_dim=spk_dim,
qrnn=params['qrnn']
)
print(model)
if use_cuda:
model = model.cuda()
predictor = Predictor(model).cuda()
else:
predictor = Predictor(model)
f_name = params['model']
model_data = load_model(f_name)
if model_data is None:
sys.exit('ERROR: Model not found in' + str(f_name))
(state_dict, epoch_index, iteration) = model_data
print('OK: Read model', f_name, '(epoch:', epoch_index, ')')
print(state_dict)
predictor.load_state_dict(state_dict)
original_name = file_names[i].split('/')[1]
if original_name == "..":
original_name = file_names[i].split('/')[3]
generator = RunGenerator(
model=model,
sample_rate=params['sample_rate'],
cuda=use_cuda,
epoch=epoch_index,
cond=cond,
spk_list=spk,
speaker=speaker,
checkpoints_path=f_name,
original_name=original_name
)
generator(params['n_samples'], params['sample_length'], cond, speaker)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS
)
def parse_bool(arg):
arg = arg.lower()
if 'true'.startswith(arg):
return True
elif 'false'.startswith(arg):
return False
else:
raise ValueError()
parser.add_argument(
'--frame_sizes', nargs='+', type=int, required=True,
help='frame sizes in terms of the number of lower tier frames, \
starting from the lowest RNN tier'
)
parser.add_argument(
'--model', required=True,
help='model (including path)'
)
parser.add_argument(
'--n_rnn', type=int, help='number of RNN layers in each tier'
)
parser.add_argument(
'--dim', type=int, help='number of neurons in every RNN and MLP layer'
)
parser.add_argument(
'--learn_h0', type=parse_bool,
help='whether to learn the initial states of RNNs'
)
parser.add_argument(
'--ulaw', type=parse_bool,
help='use u-law quantization'
)
parser.add_argument(
'--q_levels', type=int,
help='number of bins in quantization of audio samples'
)
parser.add_argument(
'--seq_len', type=int,
help='how many samples to include in each truncated BPTT pass'
)
parser.add_argument('--batch_size', type=int, help='batch size')
parser.add_argument(
'--datasets_path', help='path to the directory to find the conditioning'
)
parser.add_argument(
'--cond_set',
help='cond_set name - name of a directory in the conditioning sets path \
(settable by --datasets_path)'
)
parser.add_argument(
'--sample_rate', type=int,
help='sample rate of the training data and generated sound'
)
parser.add_argument(
'--n_samples', type=int,
help='number of samples to generate in each epoch'
)
parser.add_argument(
'--sample_length', type=int,
help='length of each generated sample (in samples)'
)
parser.add_argument(
'--norm_ind', type=parse_bool,
help='Apply conditioner normalization independently by speaker or jointly if false'
)
parser.add_argument(
'--look_ahead', type=float,
help='Take conditioners from current and next frame'
)
parser.add_argument(
'--static_spk', type=parse_bool,
help='Only train with one speaker'
)
parser.add_argument(
'--seed', type=int,
help='seed initialization of random generator'
)
parser.add_argument(
'--weight_norm', type=parse_bool,
help='Apply weight normalization'
)
parser.set_defaults(**default_params)
main(**vars(parser.parse_args()))
|
11594159
|
from stdc.app import runThermoCalculator
from stdc.flaskapp import create_app
import pytest
import os
import json
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DIR = os.path.join(THIS_DIR,'refData')
@pytest.fixture
def client():
app = create_app({'TESTING': True})
with app.test_client() as client:
yield client
@pytest.mark.parametrize("inp_file, regenerateResult", \
[
('Si_inp.json', False),
('CO2_inp.json', False),
('SiH4_inp.json', False)
]
)
def test_thermocalc(inp_file,regenerateResult,regenerateAllResults=False):
print('========================================================')
print('TEST INPUT FILE: ', inp_file)
print()
print()
inp_file = os.path.join(TEST_DIR,inp_file)
out_file = inp_file.replace('inp.json', 'out.json')
# read the inputs from a file
with open(inp_file, 'r') as f:
inputs = json.load(f)
thermoData = runThermoCalculator(inputs)
# dump the output to a file. This makes it easier
# to compare the results in case the test fails
with open(out_file, 'w') as f:
json.dump(obj=thermoData, fp=f)
# if regenerate the test results if requested
if regenerateResult or regenerateAllResults:
with open(out_file+'_ref', 'w') as f:
json.dump(thermoData, f)
# read the ref results
with open(out_file+'_ref', 'r') as f:
ref_ThermoData = json.load(f)
assert thermoData == ref_ThermoData
@pytest.mark.parametrize("ref_file, ocIRI, osIRI, regenerateResult", \
[
( 'CO2_out_web.json_ref', \
'http://www.theworldavatar.com/kb/ontocompchem/G09_testID-111-111-111', \
'http://www.theworldavatar.com/kb/ontospecies/s00009360.owl/Species_7258652483811000', \
False
),
(
'C9H20_out_web.json_ref', \
'http://www.theworldavatar.com/kb/ontocompchem/G09_34486bee-f786-4bd2-ba1b-f7d82cadb88a', \
'http://www.theworldavatar.com/kb/ontospecies/s00008729.owl/Species_7258559357316100', \
False
)
]
)
def test_webapp(ref_file, ocIRI, osIRI, regenerateResult, client, regenerateAllResults=False):
print('========================================================')
ref_file = os.path.join(TEST_DIR,ref_file)
out_file = ref_file.replace('_ref', '')
route = f"/api/thermoagent/calculate?ontocompchem_IRI={ocIRI}&ontospecies_IRI={osIRI}"
response = client.get(route)
thermoData = response.json['result']
# dump the output to a file. This makes it easier
# to compare the results in case the test fails
with open(out_file, 'w') as f:
json.dump(obj=thermoData, fp=f)
# if regenerate the test results if requested
if regenerateResult or regenerateAllResults:
with open(out_file+'_ref', 'w') as f:
json.dump(thermoData, f)
# read the ref results
with open(out_file+'_ref', 'r') as f:
ref_ThermoData = json.load(f)
assert thermoData == ref_ThermoData
|
11594180
|
from django.core.management.base import BaseCommand
from django.core.management import call_command
class Command(BaseCommand):
help = "Initial migration."
def handle(self, *args, **options):
call_command("migrate", "contenttypes")
call_command("migrate", "auth")
call_command("migrate", "--fake-initial", "accounts", "0001_initial")
call_command("migrate", "accounts")
call_command("migrate", "admin")
call_command("migrate", "--fake", "greencheck", "0001_initial")
call_command("migrate", "sessions")
call_command("migrate")
self.stdout.write(self.style.SUCCESS("Initial migration completed!"))
|
11594203
|
import re
import numpy as np
def get_timesteps_per_episode(env):
"""Returns a best guess as to the maximum number of steps allowed in a given Gym environment"""
if hasattr(env, "_max_episode_steps"):
return env._max_episode_steps
if hasattr(env, "spec"):
return env.spec.tags.get("wrapper_config.TimeLimit.max_episode_steps")
if hasattr(env, "env"):
return get_timesteps_per_episode(env.env)
return None
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = str(value)
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
def corrcoef(dist_a, dist_b):
"""Returns a scalar between 1.0 and -1.0. 0.0 is no correlation. 1.0 is perfect correlation"""
dist_a = np.copy(dist_a) # Prevent np.corrcoef from blowing up on data with 0 variance
dist_b = np.copy(dist_b)
dist_a[0] += 1e-12
dist_b[0] += 1e-12
return np.corrcoef(dist_a, dist_b)[0, 1]
|
11594240
|
import numpy as np
import pandas as pd
import scipy
from numereval.scores import *
def neutralize(
df, columns, extra_neutralizers=None, proportion=1.0, normalize=True, era_col="era"
):
# need to do this for lint to be happy bc [] is a "dangerous argument"
if extra_neutralizers is None:
extra_neutralizers = []
unique_eras = df[era_col].unique()
computed = []
for u in unique_eras:
print(u, end="\r")
df_era = df[df[era_col] == u]
scores = df_era[columns].values
if normalize:
scores2 = []
for x in scores.T:
x = (pd.Series(x).rank(method="first").values - 0.5) / len(x)
scores2.append(x)
scores = np.array(scores2).T
extra = df_era[extra_neutralizers].values
exposures = np.concatenate([extra], axis=1)
else:
exposures = df_era[extra_neutralizers].values
scores -= proportion * exposures.dot(
np.linalg.pinv(exposures.astype(np.float32)).dot(scores.astype(np.float32))
)
scores /= scores.std()
computed.append(scores)
return pd.DataFrame(np.concatenate(computed), columns=columns, index=df.index)
# to neutralize any series by any other series
def neutralize_series(series, by, proportion=1.0):
scores = series.values.reshape(-1, 1)
exposures = by.values.reshape(-1, 1)
# this line makes series neutral to a constant column so that it's centered and for sure gets corr 0 with exposures
exposures = np.hstack(
(exposures, np.array([np.mean(series)] * len(exposures)).reshape(-1, 1))
)
correction = proportion * (
exposures.dot(np.linalg.lstsq(exposures, scores, rcond=None)[0])
)
corrected_scores = scores - correction
neutralized = pd.Series(corrected_scores.ravel(), index=series.index)
return neutralized
def unif(df):
x = (df.rank(method="first") - 0.5) / len(df)
return pd.Series(x, index=df.index)
def get_feature_neutral_mean(df):
feature_cols = [c for c in df.columns if c.startswith("feature")]
df.loc[:, "neutral_sub"] = neutralize(df, [PREDICTION_NAME], feature_cols)[
PREDICTION_NAME
]
scores = (
df.groupby("era")
.apply(lambda x: correlation(x["neutral_sub"], x[TARGET_NAME]))
.mean()
)
return np.mean(scores)
|
11594242
|
from CommonServerPython import *
# IMPORTS
import dns.resolver
import re
from typing import List, Dict, Callable, Tuple, Any
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# CONSTANTS
GOOGLE_BASE_DNS = "_cloud-netblocks.googleusercontent.com"
def fetch_cidr(dns_address: str) -> List[Dict]:
"""Recursively builds a CIDR dictionary with the relevant ip and type
Args:
dns_address: the dns address to lookup
Returns:
CIDR list
"""
cidr_arr = []
regex_dns = r"(include:.*? )"
regex_cidr = r"(ip.*?:.*? )"
try:
query_response_str = str(dns.resolver.query(dns_address, "TXT").response.answer[0][0])
except IndexError:
query_response_str = ''
dns_matches = re.finditer(regex_dns, query_response_str)
for match in dns_matches:
m = match.group()
address = m[8:len(m) - 1]
cidr_arr += fetch_cidr(address)
cidr_matches = re.finditer(regex_cidr, query_response_str)
for match in cidr_matches:
m = match.group()
cidr_type = FeedIndicatorType.CIDR if(m[0:3] == "ip4") else FeedIndicatorType.IPv6CIDR
cidr_ip = m[4:len(m) - 1]
cidr_arr.append({"type": cidr_type, "ip": cidr_ip})
return cidr_arr
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def build_iterator(self):
"""Retrieves all entries from the feed.
Returns:
A list of objects, containing the data.
"""
return fetch_cidr(self._base_url)
def test_module(client: Client, *_) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""Builds the iterator to check that the feed is accessible.
Args:
client: Client object.
Returns:
Outputs.
"""
client.build_iterator()
return 'ok', {}, {}
def get_indicators(client: Client, params: Dict[str, str], args: Dict[str, str]) -> Tuple[str, Dict[Any, Any], Dict[Any, Any]]:
"""Wrapper for retrieving indicators from the feed to the war-room.
Args:
client: Client object with request
params: demisto.params()
args: demisto.args()
Returns:
Outputs.
"""
limit = int(args.get('limit', '10'))
indicators = fetch_indicators(client, params)[:limit]
human_readable = tableToMarkdown('Indicators from GCP Whitelist Feed:', indicators,
headers=['value', 'type'], removeNull=True)
return human_readable, {}, {'raw_response': indicators}
def fetch_indicators(client: Client, params: Dict[str, str]) -> List[Dict]:
"""Retrieves indicators from the feed
Args:
client (Client): Client object with request
params: demisto.params() to retrieve tags
Returns:
Indicators.
"""
feed_tags = argToList(params.get('feedTags', ''))
tlp_color = params.get('tlp_color')
iterator = client.build_iterator()
indicators = []
for indicator in iterator:
indicator_obj = {
'value': indicator["ip"],
'type': indicator["type"],
'rawJSON': {
'value': indicator["ip"],
'type': indicator["type"],
},
'fields': {}
}
if feed_tags:
indicator_obj['fields']['tags'] = feed_tags
if tlp_color:
indicator_obj['fields']['trafficlightprotocol'] = tlp_color
indicators.append(indicator_obj)
return indicators
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
command = demisto.command()
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=GOOGLE_BASE_DNS,
verify=verify_certificate,
proxy=proxy)
commands: Dict[
str, Callable[[Client, Dict[str, str], Dict[str, str]], Tuple[str, Dict[Any, Any], Dict[Any, Any]]]
] = {
'test-module': test_module,
'gcp-whitelist-get-indicators': get_indicators
}
if command in commands:
return_outputs(*commands[command](client, demisto.params(), demisto.args()))
elif demisto.command() == 'fetch-indicators':
indicators = fetch_indicators(client, demisto.params())
for single_batch in batch(indicators, batch_size=2000):
demisto.createIndicators(single_batch)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
11594299
|
import os
from skimage.data import chelsea
from skimage.io import imsave
here = os.path.dirname(__file__)
img = chelsea()
imsave(os.path.join(here, '..', '_static', 'generated_images', 'chelsea.png'),
img)
|
11594322
|
NUM_STAGES = 4
def get_stages():
stages = []
for i in range(1, NUM_STAGES + 1):
stages.append("stage_" + str(i))
return stages
def get_init_args():
return {
"kwargs": {
"project": "custom_stages",
"stages": get_stages(),
}
}
def get_jobs():
jobs = []
for i in range(1, NUM_STAGES + 1):
job = {
"kwargs": {
"command": "sleep 2",
"ci-stage": "stage_" + str(i),
"description": str(i),
"pipeline": "siesta",
}
}
jobs.append(job)
return jobs
def get_run_build_args():
return {}
def check_run(run):
asserts = []
stages = get_stages()
stages_run = run["stages"]
# Assert that the expected stages are
# equal to the stages stored in the run
asserts.append(stages == stages_run)
pipe_stages = run["pipelines"][0]["ci_stages"]
for pipe_stage in pipe_stages:
# Assert that any stage in the pipeline is one of the expected stages
asserts.append(pipe_stage["name"] in stages)
for job in pipe_stage["jobs"]:
# Assert that the stage of a job corresponds to the pipeline stage
asserts.append(job["wrapper_arguments"]["ci_stage"] == pipe_stage["name"])
# Assert that the description of a job corresponds to the expected one
asserts.append("stage_" + job["wrapper_arguments"]["description"] == pipe_stage["name"])
# Check that the number of asserts is the expected one
num_asserts = 3 * NUM_STAGES + 1
return num_asserts == len(asserts) and all(asserts)
|
11594345
|
import numpy as np
import os,sys,time
import torch
import options,util
import model_pretrain
print(util.yellow("======================================================="))
print(util.yellow("main_pretrain.py (pretraining with AtlasNet reimplementation)"))
print(util.yellow("======================================================="))
print(util.magenta("setting configurations..."))
opt = options.set()
with torch.cuda.device(opt.gpu):
trainer = model_pretrain.Model(opt)
trainer.load_dataset(opt)
trainer.build_network(opt)
trainer.setup_optimizer(opt)
trainer.restore_checkpoint(opt)
trainer.setup_visualizer(opt)
print(util.yellow("======= TRAINING START ======="))
trainer.time_start(opt)
for ep in range(opt.from_epoch,opt.to_epoch):
trainer.train_epoch(opt,ep)
if (ep+1)%10==0: trainer.evaluate(opt,ep)
if (ep+1)%50==0: trainer.save_checkpoint(opt,ep)
print(util.yellow("======= TRAINING DONE ======="))
|
11594364
|
from unittest import TestCase
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from robustnessgym.active import ais
# Get labels and probabilities from trained sklearn model
def get_labels_and_probs(X_test, clf, rank_prob=False):
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
if rank_prob:
temp = prob_pos.argsort()
ranks = np.empty_like(temp).astype(float)
ranks[temp] = np.linspace(0.0, 0.99999, len(prob_pos))
prob_pos = ranks
y_pred = clf.predict(X_test)
return y_pred, prob_pos
# Generate synthetic data to test model validation
def make_data(n_samples=100000, weights=(0.9, 0.1)):
X, y = datasets.make_classification(
n_samples=n_samples,
n_features=20,
n_informative=2,
n_redundant=2,
weights=weights,
)
pos_ind = np.where(y == 1)[0]
neg_ind = np.where(y == 0)[0]
train_samples = 100 # Samples used for training the models
# Randomly choose train_samples positive and negative rows
pos_samples = np.random.choice(len(pos_ind), size=train_samples, replace=False)
neg_samples = np.random.choice(len(neg_ind), size=train_samples, replace=False)
train_indices = np.concatenate([pos_ind[pos_samples], neg_ind[neg_samples]])
X_train = X[train_indices, :]
X_test = np.delete(X, train_indices, axis=0)
y_train = y[train_indices]
y_test = np.delete(y, train_indices)
return X_train, X_test, y_train, y_test
# Train a model on synthetic data, extract labels and probabilities
# from the trained model, then run AIS to validate
class TestAIS(TestCase):
def test_endtoend(self):
X_train, X_test, y_train, y_test = make_data(weights=(0.99, 0.01))
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred, probs = get_labels_and_probs(X_test, clf, rank_prob=True)
prf1, stds, budget = ais.ais_fullalgorithm(y_pred, y_test, probs, 6000)
self.assertTrue(budget <= 6000)
self.assertTrue(np.nansum(prf1 > 1) + np.nansum(prf1 < 0) == 0)
|
11594484
|
def main():
price = float(input("price($): "))
tip_rate = float(input("tip rate(%): "))
tip = round(price * tip_rate / 100, 2)
total = price + tip
if price < 0:
if tip_rate < 0:
print("price and tip rate can't be a negative number")
else:
print("price can't be a negative number")
elif tip_rate < 0:
print("tip rate can't be a negative number")
else:
print("\nprice:", "$", price)
print("tip rate:", tip_rate, "%")
print("tip:", "$", tip)
print("total:", "$", total)
main()
|
11594504
|
import unittest
from app import app
class TestConfig(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_config(self):
config = app.config['USER_CONFIG']
self.assertTrue(isinstance(config, dict))
self.assertTrue('ceph_config' in config)
if __name__ == '__main__':
unittest.main()
|
11594505
|
import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoProgressBar import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.pushButtonStart.clicked.connect(self.updateBar)
self.show()
def updateBar(self):
x = 0
while x < 100:
x += 0.0001
self.ui.progressBar.setValue(x)
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
|
11594569
|
import time
import pytest
from django.conf import settings
from pymongo import MongoClient
from node.blockchain.utils.lock import create_lock, lock
from node.core.database import get_database
from node.core.exceptions import BlockchainIsNotLockedError, BlockchainLockingError, BlockchainUnlockingError
def has_lock(name):
client_settings = settings.DATABASES['default']['CLIENT']
client = MongoClient(**client_settings)
return bool(client[settings.DATABASES['default']['NAME']].lock.find_one({'_id': name}))
@pytest.mark.django_db
def test_setting_lock():
assert not has_lock('mylock')
@lock('mylock')
def locked_function():
assert has_lock('mylock')
locked_function()
assert not has_lock('mylock')
@pytest.mark.django_db
def test_setting_lock_if_already_locked():
assert not has_lock('mylock')
@lock('mylock')
def locked_function():
assert has_lock('mylock')
@lock('mylock')
def lock_and_call():
assert has_lock('mylock')
with pytest.raises(BlockchainLockingError, match='Blockchain locking timeout for lock'):
locked_function()
raise BlockchainLockingError # prevent exception swallowing by pytest.raises()
with pytest.raises(BlockchainLockingError):
lock_and_call()
assert has_lock('mylock')
@pytest.mark.django_db
def test_unlocking_if_already_unlocked():
assert not has_lock('mylock')
@lock('mylock')
def locked_function():
assert has_lock('mylock')
get_database().lock.delete_one({'_id': 'mylock'})
with pytest.raises(BlockchainUnlockingError):
locked_function()
@pytest.mark.django_db
def test_ensure_locked():
assert not has_lock('mylock')
@lock('mylock')
def expect_locked_function():
assert has_lock('mylock')
@lock('mylock')
def locked_function():
assert has_lock('mylock')
expect_locked_function(expect_locked=True)
locked_function()
assert not has_lock('mylock')
@pytest.mark.django_db
def test_ensure_locked_if_not_locked():
assert not has_lock('mylock')
@lock('mylock')
def expect_locked_function():
assert has_lock('mylock')
with pytest.raises(BlockchainIsNotLockedError):
expect_locked_function(expect_locked=True)
assert not has_lock('mylock')
@pytest.mark.django_db
def test_cannot_create_lock_twice():
create_lock('mylock')
with pytest.raises(BlockchainLockingError, match='Lock could not be acquired'):
create_lock('mylock')
@pytest.mark.django_db
def test_cannot_create_lock_twice_with_longer_timeout():
create_lock('mylock')
start = time.time()
with pytest.raises(BlockchainLockingError, match='Blockchain locking timeout for lock'):
create_lock('mylock', timeout_seconds=0.1)
end = time.time()
assert 0.1 <= end - start <= 0.2
|
11594574
|
from django.core.handlers.base import BaseHandler
from freezegun import freeze_time
from ..jwt import (
JWT_REFRESH_TOKEN_COOKIE_NAME,
JWT_REFRESH_TYPE,
create_refresh_token,
jwt_encode,
jwt_user_payload,
)
@freeze_time("2020-03-18 12:00:00")
def test_jwt_refresh_token_middleware(rf, customer_user, settings):
refresh_token = create_refresh_token(customer_user)
settings.MIDDLEWARE = [
"saleor.core.middleware.jwt_refresh_token_middleware",
]
request = rf.request()
request.refresh_token = refresh_token
handler = BaseHandler()
handler.load_middleware()
response = handler.get_response(request)
cookie = response.cookies.get(JWT_REFRESH_TOKEN_COOKIE_NAME)
assert cookie.value == refresh_token
@freeze_time("2020-03-18 12:00:00")
def test_jwt_refresh_token_middleware_token_without_expire(rf, customer_user, settings):
settings.JWT_EXPIRE = True
payload = jwt_user_payload(
customer_user,
JWT_REFRESH_TYPE,
settings.JWT_TTL_REFRESH,
)
del payload["exp"]
refresh_token = jwt_encode(payload)
settings.MIDDLEWARE = [
"saleor.core.middleware.jwt_refresh_token_middleware",
]
request = rf.request()
request.refresh_token = refresh_token
handler = BaseHandler()
handler.load_middleware()
response = handler.get_response(request)
cookie = response.cookies.get(JWT_REFRESH_TOKEN_COOKIE_NAME)
assert cookie.value == refresh_token
@freeze_time("2020-03-18 12:00:00")
def test_jwt_refresh_token_middleware_samesite_debug_mode(rf, customer_user, settings):
refresh_token = create_refresh_token(customer_user)
settings.MIDDLEWARE = [
"saleor.core.middleware.jwt_refresh_token_middleware",
]
settings.DEBUG = True
request = rf.request()
request.refresh_token = refresh_token
handler = BaseHandler()
handler.load_middleware()
response = handler.get_response(request)
cookie = response.cookies.get(JWT_REFRESH_TOKEN_COOKIE_NAME)
assert cookie["samesite"] == "Lax"
@freeze_time("2020-03-18 12:00:00")
def test_jwt_refresh_token_middleware_samesite_none(rf, customer_user, settings):
refresh_token = create_refresh_token(customer_user)
settings.MIDDLEWARE = [
"saleor.core.middleware.jwt_refresh_token_middleware",
]
settings.DEBUG = False
request = rf.request()
request.refresh_token = refresh_token
handler = BaseHandler()
handler.load_middleware()
response = handler.get_response(request)
cookie = response.cookies.get(JWT_REFRESH_TOKEN_COOKIE_NAME)
assert cookie["samesite"] == "None"
|
11594575
|
from pathlib import Path
import pytest
from tempfile import TemporaryDirectory
import numpy as np
import pandas as pd
from deepnog.data import ProteinIterableDataset
from deepnog.learning import fit, predict
from deepnog.utils import create_df, get_config
DEEPNOG_ROOT = Path(__file__).parent.parent.parent.absolute()
TRAINING_FASTA = DEEPNOG_ROOT/"tests/data/test_training_dummy.faa"
TRAINING_CSV = DEEPNOG_ROOT/"tests/data/test_training_dummy.faa.csv"
DEEPNOG_CONFIG = DEEPNOG_ROOT/"config/deepnog_custom_config.yml"
@pytest.mark.parametrize('architecture', ['deepnog', ])
def test_fit_model_and_predict(architecture):
""" Fit each DeepNOG model on the dummy data, and assert inference
on the same training data gives perfect predictions.
"""
with TemporaryDirectory(prefix='deepnog_pytest_') as d:
config = get_config(DEEPNOG_CONFIG)
module = config['architecture'][architecture]['module']
cls = config['architecture'][architecture]['class']
result = fit(architecture=architecture,
module=module,
cls=cls,
training_sequences=TRAINING_FASTA,
validation_sequences=TRAINING_FASTA,
training_labels=TRAINING_CSV,
validation_labels=TRAINING_CSV,
n_epochs=2,
shuffle=True,
tensorboard_dir=None,
random_seed=123,
config_file=DEEPNOG_CONFIG,
verbose=0,
out_dir=Path(d),
)
dataset = ProteinIterableDataset(TRAINING_FASTA, TRAINING_CSV, )
preds, confs, ids, indices = predict(result.model,
dataset,
num_workers=0,
verbose=0)
df_pred = create_df(dataset.label_encoder.classes_,
preds, confs, ids, indices,
threshold=1e-15)
df_true = pd.read_csv(TRAINING_CSV)
df = df_true.merge(df_pred,
left_on="protein_id",
right_on="sequence_id")
np.testing.assert_array_equal(df.prediction, df.eggnog_id)
|
11594578
|
import copy
import logging
import os
from rest_framework import status
from rest_framework.test import APITestCase
from human_lambdas.user_handler.models import Organization
from human_lambdas.workflow_handler.models import Task, Workflow
from human_lambdas.workflow_handler.tests.constants import (
REGISTRATION_DATA,
WORKFLOW_DATA_3,
)
from . import DATA_PATH
logger = logging.getLogger(__name__)
class TestTaskCount(APITestCase):
def setUp(self):
self.file_path = os.path.join(DATA_PATH, "test.csv")
self.total_rows = 3
_ = self.client.post("/v1/users/register", REGISTRATION_DATA)
self.org_id = Organization.objects.get(user__email="<EMAIL>").pk
response = self.client.post(
"/v1/users/token", {"email": "<EMAIL>", "password": "<PASSWORD>"}
)
self.access_token = response.data["access"]
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + self.access_token)
response = self.client.get(
"/v1/users/api-token",
)
self.token = response.data["token"]
response = self.client.post(
"/v1/orgs/{}/workflows/create".format(self.org_id),
WORKFLOW_DATA_3,
format="json",
)
self.workflow_id = response.data["id"]
with open(self.file_path) as f:
data = {"file": f}
response = self.client.post(
"/v1/orgs/{0}/workflows/{1}/upload".format(
self.org_id, self.workflow_id
),
data=data,
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
def test_total_count(self):
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, self.total_rows)
def test_assigning_task(self):
n_tasks = self.total_rows
for i in range(self.total_rows):
response = self.client.get(
"/v1/orgs/{}/workflows/{}/tasks/next".format(
self.org_id, self.workflow_id
)
)
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
for idata in response.data["data"]:
if idata["id"] == "foo":
idata[idata["type"]]["value"] = "bajs"
data = {"data": response.data["data"]}
_ = self.client.patch(
"/v1/orgs/{}/workflows/{}/tasks/{}".format(
self.org_id, self.workflow_id, response.data["id"]
),
data=data,
format="json",
)
n_tasks -= 1
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
def test_task_creation(self):
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token)
n_tasks = self.total_rows
task_data = {
"data": {"Alpha": "data1", "Beta": "data2", "Gamma": "data3"},
}
for i in range(5):
response = self.client.post(
"/orgs/{}/workflows/{}/tasks/create".format(
self.org_id, self.workflow_id
),
task_data,
format="json",
)
n_tasks += 1
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + self.access_token)
response = self.client.get(
"/v1/orgs/{}/workflows/{}/tasks/{}".format(
self.org_id, self.workflow_id, response.data["id"]
)
)
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
data = copy.deepcopy(response.data["data"])
for idata in data:
if idata["id"] == "foo":
idata["single_selection"]["value"] = "bar2"
response_data = {"data": data}
_ = self.client.patch(
"/v1/orgs/{}/workflows/{}/tasks/{}".format(
self.org_id, self.workflow_id, response.data["id"]
),
data=response_data,
format="json",
)
n_tasks -= 1
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
def test_unassigning_task(self):
n_tasks = self.total_rows
response = self.client.get(
"/v1/orgs/{}/workflows/{}/tasks/next".format(self.org_id, self.workflow_id)
)
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
_ = self.client.post(
"/v1/orgs/{}/workflows/{}/tasks/{}/assign".format(
self.org_id,
self.workflow_id,
response.data["id"],
),
data={"assigned_to": None},
format="json",
)
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
task = Task.objects.get(pk=response.data["id"])
self.assertEqual(task.status, "open")
self.assertEqual(task.assigned_at, None)
def test_active_users(self):
n_tasks = self.total_rows
response = self.client.get(
"/v1/orgs/{}/workflows/{}/tasks/next".format(self.org_id, self.workflow_id)
)
workflow = Workflow.objects.get(pk=self.workflow_id)
self.assertEqual(workflow.n_tasks, n_tasks)
response = self.client.get("/v1/orgs/{}/workflows".format(self.org_id))
self.assertEqual(response.data[0]["active_users"], ["foo"])
|
11594602
|
import os
import unittest
import shlex
from click.testing import CliRunner
from cloudsplaining.command.expand_policy import expand_policy
class ExpandPolicyClickTest(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def test_click_expand_policy_wildcards(self):
"""cloudsplaining.command.expand_policy: expand_policy with wildcards example should return exit code 0"""
examples_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "examples"))
input_file = os.path.join(examples_directory, "policies", "wildcards.json")
command = f"--input-file {input_file}"
args = shlex.split(command)
response = self.runner.invoke(cli=expand_policy, args=args)
print(response.output)
self.assertTrue(response.exit_code == 0)
def test_click_expand_policy_explicit_actions(self):
"""cloudsplaining.command.expand_policy: expand_policy with explicit actions example should return exit code 0"""
examples_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "examples"))
input_file = os.path.join(examples_directory, "policies", "explicit-actions.json")
command = f"--input-file {input_file}"
args = shlex.split(command)
response = self.runner.invoke(cli=expand_policy, args=args)
print(response.output)
self.assertTrue(response.exit_code == 0)
|
11594710
|
from pyradioconfig.calculator_model_framework.interfaces.icalculator import ICalculator
from pyradioconfig.parts.lynx.calculators.calc_demodulator import CALC_Demodulator_lynx
from pyradioconfig.calculator_model_framework.Utils.CustomExceptions import CalculationException
from enum import Enum
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
from math import *
from py_2_and_3_compatibility import *
from pyradioconfig.parts.ocelot.calculators.calc_shaping import CALC_Shaping_ocelot
from pyradioconfig.calculator_model_framework.Utils.LogMgr import LogMgr
import numpy as np
import numpy.matlib
from scipy import signal as sp
#This file contains calculations related to the digital signal path, including ADC clocking, decimators, SRCs, channel filter, datafilter, digital mixer, and baud rate
class CALC_Demodulator_ocelot(ICalculator):
SRC2DENUM = 16384.0
chf_required_clks_per_sample = 4
def buildVariables(self, model):
#TODO: Clean this up and consolidate model variables
#A lot of code here for now, as we changed the CalcManager to not run Commmon code in Ocelot (use only inheritance)
#Build variables from Lynx
calc_demod_lynx_obj = CALC_Demodulator_lynx()
calc_demod_lynx_obj.buildVariables(model)
#New variables
self._addModelVariable(model, 'adc_clock_mode', Enum, ModelVariableFormat.DECIMAL)
model.vars.adc_clock_mode.var_enum = CreateModelVariableEnum(
enum_name = 'AdcClockModeEnum',
enum_desc = 'Defines how the ADC clock is derived',
member_data = [
['HFXOMULT',0, 'Multiply HFXO for ADC Clock'],
['VCODIV', 1, 'Divide VCO for ADC Clock'],
])
self._addModelActual(model, 'adc_clock_mode', Enum, ModelVariableFormat.DECIMAL)
model.vars.adc_clock_mode_actual.var_enum = model.vars.adc_clock_mode.var_enum
self._addModelVariable(model, 'adc_rate_mode', Enum, ModelVariableFormat.DECIMAL)
model.vars.adc_rate_mode.var_enum = CreateModelVariableEnum(
enum_name='AdcRateModeEnum',
enum_desc='ADC Clock Rate Mode',
member_data=[
['FULLRATE', 0, 'Full rate mode'],
['HALFRATE', 1, 'Half rate mode'],
['EIGHTHRATE',2,'Eighth rate mode']
])
self._addModelVariable(model, 'adc_xo_mult', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'adc_freq', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'datafilter_taps', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'enable_high_mod_trecs', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'adc_xo_mult', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'lo_target_freq', long, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'adc_target_freq', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'adc_vco_div', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'adc_vco_div', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'a_divider', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'b_divider', int, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'c_divider', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'adc_freq_error', float, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'digmixfreq', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'src2_ratio', float, ModelVariableFormat.DECIMAL)
self._addModelActual(model, 'timing_detection_threshold_gain', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'rx_deviation_scaled', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'demod_select', Enum, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'trecs_enabled', bool, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'max_dec2', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'min_dec2', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'bitrate_gross', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'digmixfreq', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'chflatency_actual', int, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'preamble_detection_length', int, ModelVariableFormat.DECIMAL,
desc='Number of preamble bits to use for timing detection')
model.vars.demod_select.var_enum = CreateModelVariableEnum(
enum_name='DemodSelectEnum',
enum_desc='Demod Selection',
member_data=[
['LEGACY', 0, 'Legacy Demod'],
['COHERENT', 1, 'Coherent Demod'],
['TRECS_VITERBI', 2, 'TRecS + Viterbi Demod'],
['TRECS_SLICER', 3, 'TRecS + HD Demod'],
['BCR', 4, 'PRO2 BCR Demod'],
['LONGRANGE', 5, 'BLE Long Range Demod']
])
self._addModelVariable(model, 'min_bwsel', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'max_bwsel', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'min_src2', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'max_src2', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'bandwidth_tol', float, ModelVariableFormat.DECIMAL)
self._addModelVariable(model, 'phscale_derate_factor', int, ModelVariableFormat.DECIMAL)
self._add_demod_rate_variable(model)
def _add_demod_rate_variable(self, model):
self._addModelActual(model, 'demod_rate', int, ModelVariableFormat.DECIMAL)
def return_solution(self, model, demod_select):
# Check if we have a solution for OSR, DEC0, and DEC1
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_osr_dec0_dec1(model, demod_select)
# If we have selected TRECS but did not find a solution with the above line try to find a solution
# with relaxed SRC2 limits (SRC2 > 0.55 instead of SRC2 > 0.8)
# FIXME: once we are comfortable with the limit at 0.55 we might want to make this the general limit and remove this call
is_trecs = demod_select == model.vars.demod_select.var_enum.TRECS_SLICER or demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI
# is_vcodiv_high_bw widens the src2 limits for PHYs that would be affected by IPMCUSRW_876
# The issue occurs when the filter chain is in a VCODIV + dec=4,1 configuration. We'll want to constrain
# the filter to go to the next decimation factor (likely 3,2) and use fractional interpolation on the SRC2.
# We can't use dec0_actual, dec1_actual because those are the variables we are solving for
# instead, base the decision on if the bandwidth is in the range of what would use dec=4,1.
# the final check is handled by _channel_filter_clocks_valid
bandwidth_hz_threshold = model.vars.adc_freq_actual.value / (8 * 4 * 1) * 0.2
is_vcodiv_high_bw = model.vars.adc_clock_mode.value == model.vars.adc_clock_mode.var_enum.VCODIV and \
model.vars.bandwidth_hz.value > bandwidth_hz_threshold
no_solution = target_osr == 0 or target_osr > max_osr
if (is_trecs or is_vcodiv_high_bw) and no_solution:
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_osr_dec0_dec1(model, demod_select, relaxsrc2=True)
# If in TRECS SLICER mode we have one more chance to find a working solution this time with the remodulation
# path enabled.
if demod_select == model.vars.demod_select.var_enum.TRECS_SLICER and (
target_osr == 0 or target_osr > max_osr):
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_osr_dec0_dec1(model, demod_select, withremod=True)
# return solution if we have found one
return target_osr, dec0, dec1, min_osr, max_osr
def calc_demod_sel(self, model):
modtype = model.vars.modulation_type.value
tol = model.vars.baudrate_tol_ppm.value
mi = model.vars.modulation_index.value
antdivmode = model.vars.antdivmode.value
if hasattr(model.profiles, 'Long_Range'):
is_long_range = model.profile == model.profiles.Long_Range
else:
is_long_range = False
if model.vars.demod_select._value_forced != None:
demod_select = model.vars.demod_select._value_forced
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_solution(model, demod_select)
else:
# choose demod_select based on modulation and demod priority
if (modtype == model.vars.modulation_type.var_enum.OOK) or \
(modtype==model.vars.modulation_type.var_enum.ASK):
demod_select = model.vars.demod_select.var_enum.BCR
[target_osr,dec0,dec1,min_osr,max_osr] = self.return_osr_dec0_dec1(model,demod_select)
# TODO: Is there a case where osr < 7
elif (modtype == model.vars.modulation_type.var_enum.OQPSK):
if is_long_range:
demod_select = model.vars.demod_select.var_enum.COHERENT
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_solution(model, demod_select)
else:
demod_select = model.vars.demod_select.var_enum.LEGACY
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_solution(model, demod_select)
elif (modtype == model.vars.modulation_type.var_enum.BPSK) or \
(modtype == model.vars.modulation_type.var_enum.DBPSK):
demod_select = model.vars.demod_select.var_enum.LEGACY
[target_osr,dec0,dec1,min_osr,max_osr] = self.return_osr_dec0_dec1(model, demod_select)
elif (modtype == model.vars.modulation_type.var_enum.FSK4):
demod_select = model.vars.demod_select.var_enum.LEGACY
[target_osr,dec0,dec1,min_osr,max_osr] = self.return_osr_dec0_dec1(model, demod_select)
elif (modtype == model.vars.modulation_type.var_enum.FSK2 or \
modtype == model.vars.modulation_type.var_enum.MSK):
# : for these antdivmode, can only use legacy or coherent demod
if antdivmode == model.vars.antdivmode.var_enum.ANTSELFIRST or \
antdivmode == model.vars.antdivmode.var_enum.ANTSELCORR or \
antdivmode == model.vars.antdivmode.var_enum.ANTSELRSSI:
demod_select = model.vars.demod_select.var_enum.LEGACY
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_osr_dec0_dec1(model, demod_select)
else:
if tol > 10000:
demod_select = model.vars.demod_select.var_enum.BCR
[target_osr,dec0,dec1,min_osr,max_osr] = self.return_osr_dec0_dec1(model, demod_select)
else:
if mi < 1.0:
if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV:
# : don't use legacy demod for this anntena diversity mode
demod_select_list = [model.vars.demod_select.var_enum.TRECS_VITERBI,
model.vars.demod_select.var_enum.BCR]
else:
demod_select_list = [model.vars.demod_select.var_enum.TRECS_VITERBI,
model.vars.demod_select.var_enum.BCR,
model.vars.demod_select.var_enum.LEGACY]
else:
if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV:
# : don't use legacy demod for this anntena diversity mode
demod_select_list = [model.vars.demod_select.var_enum.TRECS_SLICER,
model.vars.demod_select.var_enum.BCR]
else:
demod_select_list = [model.vars.demod_select.var_enum.TRECS_SLICER,
model.vars.demod_select.var_enum.BCR,
model.vars.demod_select.var_enum.LEGACY]
# loop over demod list and see if we can find a solution
for demod_select in demod_select_list:
[target_osr, dec0, dec1, min_osr, max_osr] = self.return_solution( model, demod_select)
# stop at first solution
if target_osr != 0:
break
if target_osr == 0:
raise CalculationException('WARNING: target_osr=0 in calc_choose_demod()')
model.vars.demod_select.value = demod_select
model.vars.target_osr.value = int(target_osr)
model.vars.targetmin_osr.value = int(min_osr)
model.vars.targetmax_osr.value = int(max_osr)
model.vars.dec0.value = int(dec0)
model.vars.dec1.value = int(dec1)
def calc_trecs_enabled(self, model):
demod_select = model.vars.demod_select.value
if demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI or demod_select == model.vars.demod_select.var_enum.TRECS_SLICER:
trecs_enabled = True
else:
trecs_enabled = False
model.vars.trecs_enabled.value = trecs_enabled
def calc_osr_actual(self,model):
#This function calculates the actual OSR based on the ADC rate and decimator/SRC values
#Load model variables into local variables
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
dec2_actual = model.vars.dec2_actual.value
baudrate_actual = model.vars.rx_baud_rate_actual.value
src2_actual = model.vars.src2_ratio_actual.value
bcr_remod_dec = 2 ** model.vars.MODEM_BCRDEMODOOK_RAWNDEC.value
osr_actual = adc_freq_actual * src2_actual / (dec0_actual * dec1_actual * 8 * dec2_actual * bcr_remod_dec * baudrate_actual)
#Load local variables back into model variables
model.vars.oversampling_rate_actual.value = osr_actual
def calc_trecsosr_reg(self, model):
#This function writes the TRECSOSR register
#Load model variables into local variables
demod_select = model.vars.demod_select.value
osr_actual = model.vars.oversampling_rate_actual.value
remoddwn = model.vars.MODEM_PHDMODCTRL_REMODDWN.value + 1
trecs_enabled = model.vars.trecs_enabled.value
if trecs_enabled:
trecsosr_reg = osr_actual / remoddwn
else:
trecsosr_reg = 0
#Write the register
self._reg_write(model.vars.MODEM_TRECSCFG_TRECSOSR, int(round(trecsosr_reg)))
def return_dec0_from_reg(self, reg):
"""convert register value to decimation value
Args:
reg (int) : register value to decimation value
"""
if reg == 0:
dec0 = 3
elif reg == 1 or reg == 2:
dec0 = 4
elif reg == 3 or reg == 4:
dec0 = 8
elif reg == 5:
dec0 = 5
return dec0
def calc_dec0_reg(self,model):
#This function writes the register for dec0
#Load model variables into local variables
dec0_value = model.vars.dec0.value
#Define a constant list for the (register data, value pairs)
dec0_list = [(0, 3), (2, 4), (4, 8)]
# Search for the value in the list
for dec0_pair in dec0_list:
if (dec0_pair[1]==dec0_value):
dec0_reg = dec0_pair[0]
#Write the registers
self._reg_write(model.vars.MODEM_CF_DEC0, dec0_reg)
def calc_dec1_reg(self, model):
#This function writes the register for dec1
#Load model variables into local variables
dec1_value = model.vars.dec1.value
#Dec1 register is simply one less than the value
dec1_reg = dec1_value - 1
#Write the registers
self._reg_write(model.vars.MODEM_CF_DEC1, dec1_reg)
def calc_dec0_actual(self,model):
#This function calculates the actual dec0 based on the register value
#Load model variables into local variables
dec0_reg = model.vars.MODEM_CF_DEC0.value
#Define a constant list for the (register data, value pairs)
dec0_list = [(0, 3), (1, 4), (2, 4), (3, 8), (4, 8)]
#Search for the value in the list
for dec0_pair in dec0_list:
if (dec0_pair[0]==dec0_reg):
dec0_value = dec0_pair[1]
#Load local variables back into model variables
model.vars.dec0_actual.value = dec0_value
def calc_dec1_actual(self, model):
#This function calculates the actual dec1 based on the register value
#Load model variables into local variables
dec1_reg = model.vars.MODEM_CF_DEC1.value
#Dec1 value is simply one more than the register setting
dec1_value = dec1_reg + 1
#Load local variables back into model variables
model.vars.dec1_actual.value = dec1_value
def calc_src2_dec2(self,model):
#This function calculates dec2 and src2
# FIXME: need to have an options for TRecS where DEC2 is bypassed DEC2=1
# unless the remod is enabled
#Load model variables into local variables
adc_freq = model.vars.adc_freq_actual.value
dec0 = model.vars.dec0_actual.value
dec1 = model.vars.dec1_actual.value
baudrate = model.vars.baudrate.value #We don't know the actual baudrate yet
target_osr = model.vars.target_osr.value #We don't know the actual OSR value yet
demod_sel = model.vars.demod_select.value
max_dec2 = model.vars.max_dec2.value
min_dec2 = model.vars.min_dec2.value
min_src2 = model.vars.min_src2.value # min value for SRC2
max_src2 = model.vars.max_src2.value # max value for SRC2
if (demod_sel==model.vars.demod_select.var_enum.BCR):
# BCR demod, dec2 and src2 not enabled
best_dec2 = 1
best_src2 = 1.0
else:
# Legacy, Coherent, Trecs/Viterbi Demods
#Calculate the OSR at the input to SRC2
osr_src2_min = float(adc_freq) / (8 * dec0 * dec1 * baudrate) * min_src2
osr_src2_max = float(adc_freq) / (8 * dec0 * dec1 * baudrate) * max_src2
#Calculate dec2 to achieve close to the target OSR
dec2_min = max(int( ceil(osr_src2_min / target_osr)),min_dec2)
dec2_max = min(int(floor(osr_src2_max / target_osr)),max_dec2)
target_src2 = 1.0
best_error = 999
# default values
best_dec2 = 1
best_src2 = (8 * dec0 * dec1 * baudrate) * target_osr / float(adc_freq)
for dec2 in range(dec2_min, dec2_max + 1):
src2 = dec2 * (8 * dec0 * dec1 * baudrate) * target_osr / float(adc_freq)
error = abs(target_src2 - src2)
if best_error > error:
best_error = error
best_src2 = src2
best_dec2 = dec2
#Load local variables back into model variables
model.vars.dec2.value = best_dec2
model.vars.src2_ratio.value = best_src2
def calc_src2_reg(self,model):
#This function calculates the src2 register writes
# Load model variables into local variables
src2_value = model.vars.src2_ratio.value
min_src2 = model.vars.min_src2.value # min value for SRC2
max_src2 = model.vars.max_src2.value # max value for SRC2
if (src2_value) >= min_src2 and (src2_value <= max_src2):
src2_reg = int(round(16384/src2_value))
else:
raise CalculationException('WARNING: src2 value out of range in calc_src2_reg()')
if (src2_reg != 16384):
src2_en = 1
else:
src2_en = 0
#Write to registers
self._reg_write(model.vars.MODEM_SRCCHF_SRCRATIO2, src2_reg)
self._reg_write(model.vars.MODEM_SRCCHF_SRCENABLE2, src2_en)
def calc_dec2_reg(self,model):
#This function calculates the dec2 register value
#Load model variables into local variables
dec2_value = model.vars.dec2.value
#The dec2 register is one less than the decimation value
dec2_reg = dec2_value - 1
#Write to register
self._reg_write(model.vars.MODEM_CF_DEC2, dec2_reg)
def calc_src2_actual(self,model):
#This function calculates the actual SRC2 ratio from the register value
#Load model variables into local variables
src2_reg = model.vars.MODEM_SRCCHF_SRCRATIO2.value
src2_en_reg = model.vars.MODEM_SRCCHF_SRCENABLE2.value
if src2_en_reg:
#The src2 ratio is simply 16384 divided by the register value
src2_ratio_actual = 16384.0 / src2_reg
else:
src2_ratio_actual = 1.0
#Load local variables back into model variables
model.vars.src2_ratio_actual.value = src2_ratio_actual
def calc_dec2_actual(self,model):
#This function calculates the actual dec2 ratio from the register value
#Load model variables into local variables
dec2_reg = model.vars.MODEM_CF_DEC2.value
#The actual dec2 value is the dec2 register plus one
dec2_actual = dec2_reg + 1
#Load local variables back into model variables
model.vars.dec2_actual.value = dec2_actual
def calc_rxbr(self,model):
#This function calculates the receive baudrate settings
# based on actual dec0,dec1,dec2,src2, and desired baudrate
# then baudrate_actual will be calculated from rxbrfrac_actual
#Load model variables into local variables
target_osr = model.vars.target_osr.value #We don't know the actual OSR yet, because that has to be based on the final baudrate
targetmax_osr = model.vars.targetmax_osr.value
targetmin_osr = model.vars.targetmin_osr.value
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
dec2_actual = model.vars.dec2_actual.value
src2_actual = model.vars.src2_ratio_actual.value
baudrate = model.vars.baudrate.value
demod_select = model.vars.demod_select.value
bcr_demod_en = model.vars.bcr_demod_en.value
if demod_select == model.vars.demod_select.var_enum.BCR:
# FIXME: do we need to use the pro2 calc values here?
# BCR uses its own registers for defined in the pro2 calculator
model.vars.rxbrint.value = 0
model.vars.rxbrnum.value = 1
model.vars.rxbrden.value = 2
return
else:
denlist = range(2, 31)
error_limit = 0.5
# not using target_osr, because in some cases (e.g. BCR with fractional OSR)
# the OSR w.r.t desired baudrate and acutal decimators varies from the target
# allowing 0.01% variation from targetmin_osr and targetmax_osr for range check
# because this osr calculation uses src2_actual, which has some small quantization noise
osr = float(adc_freq_actual * src2_actual) / float(dec0_actual * dec1_actual * 8 * dec2_actual * baudrate)
osr_limit_min = targetmin_osr * (1 - 0.0001)
osr_limit_max = targetmax_osr * (1 + 0.0001)
if (osr >= osr_limit_min) and (osr <= osr_limit_max):
#search for best fraction
rxbrint = int(floor(osr/2))
frac = (osr/2) - float(rxbrint)
numlist = range(0,31)
min_error = 100
for den in denlist:
for num in numlist:
frac_error = abs(float(num)/float(den) - frac)
if (frac_error<min_error):
min_error = frac_error
best_den = den
best_num = num
# calculate error in percent of baudrate, and require < 0.5% error
# matlab simulation sweeping osr with 0.01% step size, showed the max osr relative error = 0.4%
# using num=0:31, den=2:31
error_percent = 100 * abs( 2 * ( rxbrint + float(best_num) / float(best_den) ) - osr ) / osr
if error_percent < error_limit:
rxbrnum = best_num
rxbrden = best_den
if (rxbrnum==rxbrden):
rxbrden=2
rxbrnum=0
rxbrint=rxbrint+1
elif rxbrnum>rxbrden:
raise CalculationException('ERROR: num > den in calc_rxbr()')
else:
#print("adc_freq = %f" % adc_freq_actual)
#print("baudrate = %f" % baudrate)
#print("target_osr = %f" % target_osr)
#print("adjust_osr = %f" % osr)
#print("rxbrint = %d" % rxbrint)
#print("best_num = %d" % best_num)
#print("best_den = %d" % best_den)
#print(model.vars.demod_select.value)
raise CalculationException('ERROR: baudrate error > 0.5% in calc_rxbr()')
elif bcr_demod_en:
rxbrint = 3
rxbrnum = 1
rxbrden = 2
else:
#print("adc_freq = %f" % adc_freq_actual)
#print("baudrate = %f" % baudrate)
#print("target_osr = %f" % target_osr)
#print("adjust_osr = %f" % osr)
#print("targetmin_osr = %f" % targetmin_osr)
#print("targetmax_osr = %f" % targetmax_osr)
#print(str(model.vars.demod_select.value).split(".")[-1])
raise CalculationException('ERROR: OSR out of range in calc_rxbr()')
#Load local variables back into model variables
model.vars.rxbrint.value = rxbrint
model.vars.rxbrnum.value = rxbrnum
model.vars.rxbrden.value = rxbrden
def calc_rxbr_reg(self,model):
#This function writes the rxbr registers
#Load model variables into local variables
rxbrint = model.vars.rxbrint.value
rxbrnum = model.vars.rxbrnum.value
rxbrden = model.vars.rxbrden.value
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
dec2_actual = model.vars.dec2_actual.value
baudrate = model.vars.baudrate.value
src2_actual = model.vars.src2_ratio_actual.value
trecs_enabled = model.vars.trecs_enabled.value
osr = adc_freq_actual * src2_actual / (dec0_actual * dec1_actual * 8 * dec2_actual * baudrate)
if trecs_enabled and osr >= 8:
rxbrint = 0
rxbrden = 2
rxbrnum = 1
#Write registers
self._reg_sat_write(model.vars.MODEM_RXBR_RXBRINT, rxbrint)
self._reg_sat_write(model.vars.MODEM_RXBR_RXBRNUM, rxbrnum)
self._reg_sat_write(model.vars.MODEM_RXBR_RXBRDEN, rxbrden)
def calc_rxbr_actual(self,model):
#This function shows the actual rxbr values contained in the registers
#Load model variables into local variables
rxbrint_actual = model.vars.MODEM_RXBR_RXBRINT.value
rxbrnum_actual = model.vars.MODEM_RXBR_RXBRNUM.value
rxbrden_actual = model.vars.MODEM_RXBR_RXBRDEN.value
#Calculate the rxbr fraction
rxbrfrac_actual = float(rxbrint_actual + float(rxbrnum_actual) / rxbrden_actual)
#Load local variables back into model variables
model.vars.rxbrint_actual.value = rxbrint_actual
model.vars.rxbrnum_actual.value = rxbrnum_actual
model.vars.rxbrden_actual.value = rxbrden_actual
model.vars.rxbrfrac_actual.value = rxbrfrac_actual
# FIXME: why do we need a special symbol encoding for mbus? we should combine this with the defaul symbol encoding
def calc_mbus_symbol_encoding(self,model):
#This function calculates the default value for mbus_symbol_encoding
#Set defaults
mbus_symbol_encoding = model.vars.mbus_symbol_encoding.var_enum.NRZ
# Load local variables back into model variables
model.vars.mbus_symbol_encoding.value = mbus_symbol_encoding
model.vars.symbol_encoding.value = model.vars.symbol_encoding.var_enum.NRZ # mbus_symbol_encoding
def calc_bitrate_gross(self, model):
#This function calculates the gross bitrate (bitrate including redundant coding bits)
#Note that this gross bitrate excludes DSSS, because in RX the DSSS chips never make it
#through the demod path (they are only used for correlation)
#Read from model variables
bitrate = model.vars.bitrate.value
encoding = model.vars.symbol_encoding.value
mbus_encoding = model.vars.mbus_symbol_encoding.value
fec_enabled = model.vars.fec_enabled.value
#Start by assuming the gross bitrate is equal to the net bitrate
bitrate_gross = bitrate
#Calculate the encoded bitrate based on the encoding parameters
if (encoding == model.vars.symbol_encoding.var_enum.Manchester or encoding == model.vars.symbol_encoding.var_enum.Inv_Manchester):
bitrate_gross *= 2
if (mbus_encoding == model.vars.mbus_symbol_encoding.var_enum.MBUS_3OF6):
bitrate_gross *= 1.5
if fec_enabled:
bitrate_gross *= 2
#Write the model variable
model.vars.bitrate_gross.value = int(round(bitrate_gross))
def calc_baudrate(self,model):
#This function calculates baudrate based on the input bitrate and modulation/encoding settings
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
bitrate_gross = model.vars.bitrate_gross.value
encoding = model.vars.symbol_encoding.value
spreading_factor = model.vars.dsss_spreading_factor.value
#Based on modulation type calculate baudrate from bitrate
if(mod_type == model.vars.modulation_type.var_enum.OQPSK) or \
(mod_type == model.vars.modulation_type.var_enum.OOK) or \
(mod_type == model.vars.modulation_type.var_enum.ASK) or \
(mod_type == model.vars.modulation_type.var_enum.FSK2) or \
(mod_type == model.vars.modulation_type.var_enum.MSK) or \
(mod_type == model.vars.modulation_type.var_enum.BPSK) or \
(mod_type == model.vars.modulation_type.var_enum.DBPSK):
baudrate = bitrate_gross
elif(mod_type == model.vars.modulation_type.var_enum.FSK4):
baudrate = bitrate_gross / 2
else:
raise CalculationException('ERROR: modulation type not supported in calc_baudrate()')
#Account for the DSSS spreading factor
if (encoding == model.vars.symbol_encoding.var_enum.DSSS):
baudrate *= spreading_factor
#Load local variables back into model variables
model.vars.baudrate.value = int(round(baudrate))
def calc_baudrate_actual(self,model,disable_subfrac_divider=False):
#This function calculates the actual baudrate based on register settings
#Load model variables into local variables
adc_freq = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
dec2_actual = model.vars.dec2_actual.value
src2ratio_actual = model.vars.src2_ratio_actual.value
subfrac_actual = model.vars.subfrac_actual.value
rxbrfrac_actual = model.vars.rxbrfrac_actual.value
dec = model.vars.MODEM_BCRDEMODOOK_RAWNDEC.value
bcr_demod_en_forced = (model.vars.bcr_demod_en.value_forced is not None) # This is currently only done for conc PHYs
agc_subperiod_actual = model.vars.AGC_CTRL7_SUBPERIOD.value
if (subfrac_actual > 0) and (disable_subfrac_divider == False):
frac = subfrac_actual * pow(2, dec)
else:
frac = rxbrfrac_actual
#Calculate actual baudrate once the ADC, decimator, SRC, and rxbr settings are known
if (bcr_demod_en_forced and agc_subperiod_actual == 1):
n_update = pow(2, dec)
baudrate_actual = (adc_freq * src2ratio_actual) / (dec0_actual * dec1_actual * n_update * 8 * frac)
else:
baudrate_actual = (adc_freq * src2ratio_actual) / (dec0_actual * dec1_actual * dec2_actual * 8 * 2 * frac)
#Load local variables back into model variables
model.vars.rx_baud_rate_actual.value = baudrate_actual
def calc_bwsel(self,model, softmodem_narrowing=False):
#This function calculates the bwsel ratio that sets the channel bandwidth
#Load model variables into local variables
adc_freq = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
afc_run_mode = model.vars.afc_run_mode.value
bandwidth = model.vars.bandwidth_hz.value #We don't know the actual channel bandwidth yet
lock_bandwidth = model.vars.lock_bandwidth_hz.value # maybe this cab be reduced further based on residual freq offset
min_bwsel = model.vars.min_bwsel.value
#Calculate the required BWSEL from the adc rate, decimators, and required bandwidth
bwsel = float(bandwidth * 8 * dec0_actual * dec1_actual) / adc_freq
lock_bwsel = float(lock_bandwidth * 8 * dec0_actual * dec1_actual) / adc_freq
if (lock_bwsel < min_bwsel) and ((afc_run_mode == model.vars.afc_run_mode.var_enum.ONE_SHOT) or softmodem_narrowing):
lock_bwsel = min_bwsel
#Load local variables back into model variables
model.vars.bwsel.value = bwsel
model.vars.lock_bwsel.value = lock_bwsel
def calc_chfilt_reg(self,model):
#This function calculates the channel filter registers
#Load model variables into local variables
bwsel = model.vars.bwsel.value
coeffs = self.return_coeffs(bwsel)
bit_widths = [10,10,10,11,11,11,12,12,12,14,14,14,16,16,16]
# replace negative numbers with 2s complement
for i in range(15):
if coeffs[i] < 0:
coeffs[i] = coeffs[i] + 2**bit_widths[i]
# Write registers
self._reg_write(model.vars.MODEM_CHFCOE00_SET0COEFF0, coeffs[0])
self._reg_write(model.vars.MODEM_CHFCOE00_SET0COEFF1, coeffs[1])
self._reg_write(model.vars.MODEM_CHFCOE00_SET0COEFF2, coeffs[2])
self._reg_write(model.vars.MODEM_CHFCOE01_SET0COEFF3, coeffs[3])
self._reg_write(model.vars.MODEM_CHFCOE01_SET0COEFF4, coeffs[4])
self._reg_write(model.vars.MODEM_CHFCOE02_SET0COEFF5, coeffs[5])
self._reg_write(model.vars.MODEM_CHFCOE02_SET0COEFF6, coeffs[6])
self._reg_write(model.vars.MODEM_CHFCOE03_SET0COEFF7, coeffs[7])
self._reg_write(model.vars.MODEM_CHFCOE03_SET0COEFF8, coeffs[8])
self._reg_write(model.vars.MODEM_CHFCOE04_SET0COEFF9, coeffs[9])
self._reg_write(model.vars.MODEM_CHFCOE04_SET0COEFF10, coeffs[10])
self._reg_write(model.vars.MODEM_CHFCOE05_SET0COEFF11, coeffs[11])
self._reg_write(model.vars.MODEM_CHFCOE05_SET0COEFF12, coeffs[12])
self._reg_write(model.vars.MODEM_CHFCOE06_SET0COEFF13, coeffs[13])
self._reg_write(model.vars.MODEM_CHFCOE06_SET0COEFF14, coeffs[14])
# Load model variables into local variables
bwsel = model.vars.lock_bwsel.value
coeffs = self.return_coeffs(bwsel)
# replace negative numbers with 2s complement
for i in range(15):
if coeffs[i] < 0:
coeffs[i] = coeffs[i] + 2**bit_widths[i]
# TODO: calculate the second set separately
self._reg_write(model.vars.MODEM_CHFCOE10_SET1COEFF0, coeffs[0])
self._reg_write(model.vars.MODEM_CHFCOE10_SET1COEFF1, coeffs[1])
self._reg_write(model.vars.MODEM_CHFCOE10_SET1COEFF2, coeffs[2])
self._reg_write(model.vars.MODEM_CHFCOE11_SET1COEFF3, coeffs[3])
self._reg_write(model.vars.MODEM_CHFCOE11_SET1COEFF4, coeffs[4])
self._reg_write(model.vars.MODEM_CHFCOE12_SET1COEFF5, coeffs[5])
self._reg_write(model.vars.MODEM_CHFCOE12_SET1COEFF6, coeffs[6])
self._reg_write(model.vars.MODEM_CHFCOE13_SET1COEFF7, coeffs[7])
self._reg_write(model.vars.MODEM_CHFCOE13_SET1COEFF8, coeffs[8])
self._reg_write(model.vars.MODEM_CHFCOE14_SET1COEFF9, coeffs[9])
self._reg_write(model.vars.MODEM_CHFCOE14_SET1COEFF10, coeffs[10])
self._reg_write(model.vars.MODEM_CHFCOE15_SET1COEFF11, coeffs[11])
self._reg_write(model.vars.MODEM_CHFCOE15_SET1COEFF12, coeffs[12])
self._reg_write(model.vars.MODEM_CHFCOE16_SET1COEFF13, coeffs[13])
self._reg_write(model.vars.MODEM_CHFCOE16_SET1COEFF14, coeffs[14])
def return_coeffs(self, bwsel):
# this table is generated with srw_model/models/channel_filters/gen_channel_filter_coeffs.m
if bwsel < 0.155:
coeffs = [6, -11, -71, -199, -398, -622, -771, -700, -268, 602, 1869, 3351, 4757, 5769, 6138]
elif bwsel < 0.165:
coeffs = [16, 19, -12, -117, -318, -588, -828, -871, -530, 321, 1668, 3318, 4929, 6108, 6540]
elif bwsel < 0.175:
coeffs = [22, 45, 48, -18, -200, -495, -814, -976, -750, 44, 1433, 3224, 5025, 6365, 6861]
elif bwsel < 0.185:
coeffs = [26, 66, 102, 77, -75, -379, -765, -1043, -942, -222, 1189, 3104, 5087, 6586, 7145]
elif bwsel < 0.195:
coeffs = [30, 87, 155, 169, 46, -268, -720, -1112, -1134, -488, 947, 2988, 5154, 6813, 7436]
elif bwsel < 0.205:
coeffs = [30, 100, 200, 268, 201, -88, -581, -1088, -1264, -744, 660, 2792, 5128, 6951, 7639]
elif bwsel < 0.215:
coeffs = [29, 107, 239, 372, 391, 176, -304, -887, -1219, -877, 400, 2502, 4894, 6799, 7525]
elif bwsel < 0.225:
coeffs = [12, 77, 206, 373, 464, 325, -126, -775, -1256, -1075, 127, 2297, 4874, 6971, 7778]
elif bwsel < 0.235:
coeffs = [-1, 42, 160, 348, 510, 467, 72, -619, -1250, -1255, -159, 2066, 4836, 7146, 8045]
elif bwsel < 0.245:
coeffs = [-14, -5, 74, 257, 485, 569, 290, -397, -1178, -1416, -479, 1791, 4803, 7395, 8417]
elif bwsel < 0.255:
coeffs = [-22, -38, 8, 179, 448, 631, 455, -209, -1093, -1515, -718, 1570, 4764, 7576, 8697]
elif bwsel < 0.265:
coeffs = [-29, -72, -69, 73, 372, 658, 611, 3, -968, -1585, -954, 1337, 4722, 7779, 9010]
elif bwsel < 0.275:
coeffs = [-34, -104, -155, -65, 237, 623, 733, 229, -797, -1614, -1178, 1099, 4690, 8026, 9385]
elif bwsel < 0.285:
coeffs = [-39, -138, -255, -244, 35, 510, 800, 440, -610, -1628, -1403, 865, 4710, 8388, 9905]
elif bwsel < 0.295:
coeffs = [-30, -141, -307, -386, -170, 352, 801, 593, -478, -1716, -1722, 569, 4774, 8920, 10651]
elif bwsel < 0.305:
coeffs = [-14, -107, -294, -452, -329, 190, 787, 783, -223, -1644, -1940, 240, 4662, 9179, 11093]
elif bwsel < 0.315:
coeffs = [2, -61, -243, -466, -461, 1, 707, 925, 41, -1517, -2112, -87, 4513, 9395, 11493]
elif bwsel < 0.325:
coeffs = [17, -8, -163, -434, -556, -194, 576, 1013, 297, -1351, -2240, -407, 4338, 9570, 11851]
elif bwsel < 0.335:
coeffs = [30, 50, -55, -340, -586, -370, 398, 1031, 526, -1149, -2313, -707, 4128, 9676, 12132]
elif bwsel < 0.345:
coeffs = [41, 107, 69, -209, -573, -536, 183, 997, 733, -930, -2363, -1012, 3898, 9772, 12412]
elif bwsel < 0.355:
coeffs = [50, 163, 211, -17, -468, -629, -28, 904, 882, -723, -2397, -1316, 3631, 9808, 12627]
elif bwsel < 0.365:
coeffs = [45, 189, 329, 211, -245, -578, -161, 812, 1047, -385, -2220, -1498, 3232, 9442, 12324]
elif bwsel < 0.375:
coeffs = [29, 163, 345, 312, -126, -588, -327, 684, 1153, -145, -2161, -1728, 2996, 9496, 12562]
elif bwsel < 0.385:
coeffs = [11, 120, 335, 408, 33, -549, -498, 501, 1231, 133, -2059, -1974, 2716, 9556, 12843]
elif bwsel < 0.395:
coeffs = [-7, 65, 286, 460, 189, -462, -623, 306, 1259, 386, -1936, -2188, 2455, 9627, 13136]
elif bwsel < 0.405:
coeffs = [-26, -7, 191, 461, 340, -323, -704, 94, 1243, 637, -1784, -2395, 2186, 9727, 13490]
elif bwsel < 0.415:
coeffs = [-43, -80, 70, 410, 459, -156, -727, -99, 1197, 860, -1620, -2573, 1931, 9832, 13847]
elif bwsel < 0.425:
coeffs = [-61, -173, -113, 268, 526, 43, -691, -284, 1131, 1112, -1394, -2734, 1658, 9961, 14268]
elif bwsel < 0.435:
coeffs = [-68, -249, -307, 50, 473, 131, -735, -535, 1017, 1322, -1264, -3025, 1435, 10497, 15288]
elif bwsel < 0.445:
coeffs = [-50, -239, -383, -89, 458, 306, -645, -729, 838, 1505, -1001, -3166, 1111, 10603, 15732]
elif bwsel < 0.455:
coeffs = [-29, -203, -420, -229, 390, 451, -508, -877, 629, 1637, -729, -3264, 795, 10676, 16131]
elif bwsel < 0.465:
coeffs = [-2, -137, -413, -369, 263, 567, -322, -988, 378, 1731, -421, -3331, 448, 10717, 16524]
elif bwsel < 0.475:
coeffs = [25, -49, -341, -454, 109, 609, -136, -1028, 141, 1761, -141, -3344, 140, 10696, 16796]
elif bwsel < 0.485:
coeffs = [52, 61, -210, -493, -81, 587, 55, -1027, -114, 1747, 156, -3322, -188, 10638, 17045]
elif bwsel < 0.495:
coeffs = [83, 196, -7, -446, -254, 508, 211, -1017, -391, 1677, 430, -3295, -526, 10568, 17294]
else:
coeffs = [85, 274, 199, -265, -276, 473, 408, -853, -554, 1550, 686, -3080, -779, 10093, 16843]
# Confirm Sum of the Magnitudes is in spec to not overflow the
# filter accumulator
try:
assert sum([abs(i) for i in coeffs]) < 2**16
except AssertionError:
raise CalculationException('ERROR: Channel Filter Coefficients Sum of Magnitudes >= 2^16')
return coeffs
def calc_bw_carson(self,model):
#This function calculates the Carson bandwidth (minimum bandwidth)
#Load model variables into local variables
baudrate = model.vars.baudrate.value
deviation = model.vars.deviation.value
mod_type = model.vars.modulation_type.value
#Calculate the Carson bandwidth
if (mod_type == model.vars.modulation_type.var_enum.FSK4):
#Assumes deviation = inner symbol deviation
bw_carson = baudrate + 6*deviation
else:
bw_carson = baudrate + 2*deviation
#Load local variables back into model variables
model.vars.bandwidth_carson_hz.value = int(bw_carson)
def calc_rx_tx_ppm(self,model):
#This function calculates the default RX and TX HFXO tolerance in PPM
#Set defaults
rx_ppm = 0
tx_ppm = 0
#Load local variables back into model variables
model.vars.rx_xtal_error_ppm.value = rx_ppm
model.vars.tx_xtal_error_ppm.value = tx_ppm
def get_alpha(self, model):
# Bandwidth adjustment based on mi and bt
# the thresholds were derived based simulating bandwidth of modulated signal with 98% of the energy
mi = model.vars.modulation_index.value
sf = model.vars.shaping_filter.value
if sf == model.vars.shaping_filter.var_enum.NONE.value:
if mi < 0.75:
alpha = 0.1
elif mi < 0.85:
alpha = 0
elif mi < 1.5:
alpha = -0.1
else:
alpha = -0.2
elif sf == model.vars.shaping_filter.var_enum.Gaussian.value:
bt = model.vars.shaping_filter_param.value # BT might not be defined if not Gaussian shaping so read it here
if bt < 0.75:
if mi < 0.95:
alpha = 0.2
elif mi < 1.5:
alpha = 0.1
elif mi < 6.5:
alpha = 0
else:
alpha = -0.1
elif bt < 1.5:
if mi < 0.85:
alpha = 0.1
elif mi < 1.5:
alpha = 0
else:
alpha = -0.1
elif bt < 2.5:
if mi < 0.75:
alpha = 0.1
elif mi < 0.85:
alpha = 0
else:
alpha = -0.1
else:
# for non Gaussian shaping keeping the original alpha calculation
if (mi < 1.0):
alpha = 0.2
elif (mi == 1.0):
alpha = 0.1
else:
alpha = 0
return alpha
def calc_target_bandwidth(self, model):
#This function calculates the target bandwidth in case the user didn't enter one
#This is the acquisition bandwidth
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
bw_carson = model.vars.bandwidth_carson_hz.value
baudrate = model.vars.baudrate.value
freq_offset_hz = model.vars.freq_offset_hz.value
#Calculate bw_demod and bw_acq
#bw_demod is the target demod bandwidth before adding frequency shift
#bw_acq combines bw_demod and frequency shift
if (mod_type == model.vars.modulation_type.var_enum.FSK2) or \
(mod_type == model.vars.modulation_type.var_enum.MSK):
alpha = self.get_alpha(model)
bw_acq = bw_carson + 2 * max( 0.0, freq_offset_hz - alpha * bw_carson )
elif (mod_type == model.vars.modulation_type.var_enum.FSK4):
bw_acq = bw_carson + 2.0 * freq_offset_hz
else:
#Default values for other modulation types
if (mod_type == model.vars.modulation_type.var_enum.OOK) or \
(mod_type == model.vars.modulation_type.var_enum.ASK):
bw_modulation = baudrate * 5.0
if (model.vars.bandwidth_hz._value_forced == None):
print(" WARNING: OOKASK bandwidth_hz has not been optimized")
elif (mod_type == model.vars.modulation_type.var_enum.OQPSK):
bw_modulation = baudrate * 1.25
else:
bw_modulation = baudrate * 1.0
bw_acq = bw_modulation + 2.0 * freq_offset_hz
#Set max limit on bandwidth_hz
bw_acq = min(bw_acq, 2.5e6)
if model.vars.bandwidth_hz.value_forced:
if model.vars.bandwidth_hz.value > 1.2 * bw_acq:
LogMgr.Warning("WARNING: Programmed acquisition channel bandwidth is much higher than calculated")
#Load local variables back into model variables
model.vars.bandwidth_hz.value = int(bw_acq)
def calc_lock_bandwidth(self, model, softmodem_narrowing=False):
#Load model variables into local variables
bw_acq = model.vars.bandwidth_hz.value
bw_demod = model.vars.demod_bandwidth_hz.value
afc_run_mode = model.vars.afc_run_mode.value
rtschmode = model.vars.MODEM_REALTIMCFE_RTSCHMODE.value
antdivmode = model.vars.antdivmode.value
if (model.vars.demod_bandwidth_hz._value_forced != None):
# Prioritize forced value
lock_bandwidth_hz = bw_demod
elif (afc_run_mode == model.vars.afc_run_mode.var_enum.ONE_SHOT) or softmodem_narrowing:
# for calculated bw_demod, upper limit: lock_bandwidth_hz <= bandwidth_hz
lock_bandwidth_hz = min(bw_demod, bw_acq)
elif (afc_run_mode == model.vars.afc_run_mode.var_enum.CONTINUOUS) and rtschmode == 1:
if antdivmode != model.vars.antdivmode.var_enum.DISABLE:
lock_bandwidth_hz = min(bw_demod, bw_acq)
else:
lock_bandwidth_hz = bw_acq
else:
# for calculated bw_demod, if AFC is disabled, set lock_bandwidth_hz = bandwidth_hz
lock_bandwidth_hz = bw_acq
model.vars.lock_bandwidth_hz.value = int(lock_bandwidth_hz)
def calc_bandwidth_actual(self,model):
#This function calculates the actual channel bandwidth based on adc rate, decimator, and bwsel settings
#Load model variables into local variables
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
bwsel = model.vars.bwsel.value
#Calculate the actual channel bandwidth
bandwidth_actual = int(adc_freq_actual * bwsel / dec0_actual / dec1_actual / 8)
#Load local variables back into model variables
model.vars.bandwidth_actual.value = bandwidth_actual
def calc_datafilter(self,model):
#This function calculates the number of datafilter taps
#Load model variables into local variables
osr = model.vars.oversampling_rate_actual.value
demod_sel = model.vars.demod_select.value
modformat = model.vars.modulation_type.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
remoddwn = model.vars.MODEM_PHDMODCTRL_REMODDWN.value + 1
trecs_enabled = model.vars.trecs_enabled.value
cplx_corr_enabled = model.vars.MODEM_CTRL6_CPLXCORREN.value == 1
if demod_sel==model.vars.demod_select.var_enum.COHERENT and \
modformat == model.vars.modulation_type.var_enum.OQPSK:
# : For Cohererent demod, set data filter taps to 9
# : TODO for complex correlation enabled, set datafilter taps to 6
if cplx_corr_enabled:
datafilter_taps = 6
else:
datafilter_taps = 9
# no data filter in path when TRecS is enabled
elif demod_sel==model.vars.demod_select.var_enum.BCR or \
modformat == model.vars.modulation_type.var_enum.OQPSK or \
(trecs_enabled and not remoden) or remoddwn > 1:
datafilter_taps = 2 # 2 here translates to datafilter_reg = 0 meaning disabled datafilter
#Calculate datafitler based on OSR
elif (osr > 1) and (osr < 10):
datafilter_taps = int(round(osr))
else:
raise CalculationException('ERROR: OSR out of range in calc_datafilter()')
#Load local variables back into model variables
model.vars.datafilter_taps.value = datafilter_taps
def calc_datafilter_reg(self,model):
#This function writes the datafilter register
#Load model variables into local variables
datafilter_taps = model.vars.datafilter_taps.value
#The datafilter register setting is 2 less than the number of taps
datafilter_reg = datafilter_taps - 2
if datafilter_reg < 0:
datafilter_reg = 0
# Write register
self._reg_write(model.vars.MODEM_CTRL2_DATAFILTER, datafilter_reg)
def calc_datafilter_actual(self,model):
#This function calculates the actual datafilter taps from the register value
#Load model variables into local variables
datafilter_reg = model.vars.MODEM_CTRL2_DATAFILTER.value
#The number of taps is the register value plus 2
datafilter_taps_actual = datafilter_reg
#Load local variables back into model variables
model.vars.datafilter_taps_actual = datafilter_taps_actual
def calc_digmix_res_actual(self,model):
#This function calculates the digital mixer register
#Load model variables into local variables
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
# digital mixer frequency resolution, Hz/mixer ticks
digmix_res = adc_freq_actual/((2**20) * 8.0 * dec0_actual)
model.vars.digmix_res_actual.value = digmix_res
def calc_digmixfreq_val(self,model):
digmix_res = model.vars.digmix_res_actual.value
fif = model.vars.if_frequency_hz_actual.value # IF frequency based on the actual SYNTH settings
model.vars.digmixfreq.value = int(round(fif/digmix_res))
def calc_digmixfreq_reg(self,model):
#This function calculates the digital mixer register
digmixfreq_reg = model.vars.digmixfreq.value
# Write register
self._reg_write(model.vars.MODEM_DIGMIXCTRL_DIGMIXFREQ, digmixfreq_reg)
def calc_digmixfreq_actual(self,model):
#This function calculates the actual digital mixer frequency based on the register
#Load model variables into local variables
digmixfreq_reg = model.vars.MODEM_DIGMIXCTRL_DIGMIXFREQ.value
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
#Calculate the actual mixer frequency
digmixfreq_actual = int(digmixfreq_reg * model.vars.digmix_res_actual.value)
#Load local variables back into model variables
model.vars.digmixfreq_actual.value = digmixfreq_actual
def calc_devoffcomp_reg(self,model):
#This function calculates the register value of devoffcomp
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
if(mod_type==model.vars.modulation_type.var_enum.FSK4):
devoffcomp=1
else:
devoffcomp=0
#Write register
self._reg_write(model.vars.MODEM_CTRL4_DEVOFFCOMP, devoffcomp)
def calc_demod_rate_actual(self,model):
#This function calculates the actual sample rate at the demod
# Load model variables into local variables
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
dec2_actual = model.vars.dec2_actual.value
src2_actual = model.vars.src2_ratio_actual.value
demod_rate_actual = int(adc_freq_actual * src2_actual / (8 * dec0_actual * dec1_actual * dec2_actual))
#Load local variables back into model variables
model.vars.demod_rate_actual.value = demod_rate_actual
def calc_rx_deviation_scaled(self,model):
#This function calculates the scaled RX deviation
#Load model variables into local variables
deviation = model.vars.deviation.value
freq_gain_actual = model.vars.freq_gain_actual.value
demod_rate_actual = model.vars.demod_rate_actual.value
rx_deviation_scaled = float(256*deviation*freq_gain_actual/demod_rate_actual)
#Load local variables back into model variables
model.vars.rx_deviation_scaled.value = rx_deviation_scaled
def calc_devweightdis_reg(self,model):
#This function calculates the register value of devweightdis
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
rx_deviation_scaled = model.vars.rx_deviation_scaled.value
if(mod_type == model.vars.modulation_type.var_enum.FSK2) or \
(mod_type == model.vars.modulation_type.var_enum.MSK):
if(abs(rx_deviation_scaled-64) > 6):
devweightdis = 1
else:
devweightdis = 0
else:
devweightdis = 0
#Write register
self._reg_write(model.vars.MODEM_CTRL2_DEVWEIGHTDIS, devweightdis)
def calc_freq_gain_target(self,model):
#This function calculates the target frequency gain value
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
demod_rate_actual = model.vars.demod_rate_actual.value
deviation = model.vars.deviation.value
freq_offset_hz = model.vars.freq_offset_hz.value
large_tol = (freq_offset_hz > deviation)
afconeshot = model.vars.MODEM_AFC_AFCONESHOT.value
trecs_remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
if (mod_type == model.vars.modulation_type.var_enum.FSK2 or \
mod_type == model.vars.modulation_type.var_enum.MSK) and deviation > 0:
if large_tol and (not afconeshot) and trecs_remoden:
freq_gain_target = demod_rate_actual / (4.0 * (deviation + freq_offset_hz*0.75) / 2.0)
else:
freq_gain_target = demod_rate_actual / (4.0 * (deviation + freq_offset_hz) / 2.0)
elif (mod_type == model.vars.modulation_type.var_enum.FSK4) and deviation > 0:
freq_gain_target = demod_rate_actual / (4.0 * (3.0 * deviation + freq_offset_hz) / 2.0)
else:
freq_gain_target = 0.0
#Load local variables back into model variables
model.vars.freq_gain.value = freq_gain_target
def calc_freq_gain_reg(self,model):
#This function calculates the frequency gain registers
#Load model variables into local variables
freq_gain_target = model.vars.freq_gain.value
best_error = 1e9
bestM=0
bestE=0
for M in range(1,8):
for E in range(0,8):
calculated_gain = M*2**(2-E)
error = abs(freq_gain_target - calculated_gain)
if error < best_error:
best_error = error
bestM = M
bestE = E
#Write registers
self._reg_write(model.vars.MODEM_MODINDEX_FREQGAINM, bestM)
self._reg_write(model.vars.MODEM_MODINDEX_FREQGAINE, bestE)
def calc_freq_gain_actual(self,model):
#This function calculates the actual frequency gain from the register values
#Load model variables into local variables
M_actual = model.vars.MODEM_MODINDEX_FREQGAINM.value
E_actual = model.vars.MODEM_MODINDEX_FREQGAINE.value
freq_gain_actual = M_actual * float(2**(2-E_actual))
#Load local variables back into model variables
model.vars.freq_gain_actual.value = freq_gain_actual
###Since we are not inheriting the CALC_Demodulator_Ocelot class from prior parts, the functions below are copied in order to maintain functionality###
#TODO: Go through these and decide if they still apply for Ocelot
def calc_interpolation_gain_actual(self, model):
#This function calculates the actual interpolation gain
#Load model variables into local variables
txbrnum = model.vars.MODEM_TXBR_TXBRNUM.value
modformat = model.vars.modulation_type.value
if txbrnum < 256:
interpolation_gain = txbrnum / 1.0
elif modformat == model.vars.modulation_type.var_enum.BPSK or \
modformat == model.vars.modulation_type.var_enum.DBPSK:
interpolation_gain = 16 * txbrnum * 2 ** (3-floor(log(txbrnum, 2)))
elif txbrnum < 512:
interpolation_gain = txbrnum / 2.0
elif txbrnum < 1024:
interpolation_gain = txbrnum / 4.0
elif txbrnum < 2048:
interpolation_gain = txbrnum / 8.0
elif txbrnum < 4096:
interpolation_gain = txbrnum / 16.0
elif txbrnum < 8192:
interpolation_gain = txbrnum / 32.0
elif txbrnum < 16384:
interpolation_gain = txbrnum / 64.0
else:
interpolation_gain = txbrnum / 128.0
# calculate phase interpolation gain for OQPSK cases
if modformat == model.vars.modulation_type.var_enum.OQPSK:
interpolation_gain = 2 ** (ceil(log(interpolation_gain, 2)))
#Load local variables back into model variables
model.vars.interpolation_gain_actual.value = float(interpolation_gain)
def calc_mod_type_actual(self, model):
#This function calculates the actual modulation type based on the register value
#Load model variables into local variables
mod = model.vars.MODEM_CTRL0_MODFORMAT.value
if mod == 0:
modformat = '2-FSK'
elif mod == 1:
modformat = '4-FSK'
elif mod == 2:
modformat = 'BPSK'
elif mod == 3:
modformat = 'DBPSK'
elif mod == 4:
modformat = 'OQPSK'
elif mod == 5:
modformat = 'MSK'
elif mod == 6:
modformat = 'OOKASK'
#Load local variables back into model variables
model.vars.mod_format_actual.value = modformat
def calc_mod_type_reg(self, model):
#This function writes the modulation type register
#Load model variables into local variables
modformat = model.vars.modulation_type.value
if modformat == model.vars.modulation_type.var_enum.FSK2 or \
modformat == model.vars.modulation_type.var_enum.MSK:
mod = 0
elif modformat == model.vars.modulation_type.var_enum.FSK4:
mod = 1
elif modformat == model.vars.modulation_type.var_enum.BPSK:
mod = 2
elif modformat == model.vars.modulation_type.var_enum.DBPSK:
mod = 3
elif modformat == model.vars.modulation_type.var_enum.OQPSK:
mod = 4
elif modformat == model.vars.modulation_type.var_enum.OOK or \
modformat == model.vars.modulation_type.var_enum.ASK:
mod = 6
else:
raise CalculationException('ERROR: modulation method in input file not recognized')
#Write register
self._reg_write(model.vars.MODEM_CTRL0_MODFORMAT, mod)
def calc_resyncper_brcal_val(self, model):
#This function calculates the resynchronization and baud rate calibration values
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
osr = model.vars.oversampling_rate_actual.value
symbols_in_timing_window = model.vars.symbols_in_timing_window.value
baudrate_tol_ppm = model.vars.baudrate_tol_ppm.value
syncword_length = model.vars.syncword_length.value
if symbols_in_timing_window > 0:
timing_wind_size = symbols_in_timing_window
else:
timing_wind_size = syncword_length
#Estimate the baudrate tol with resyncper=2
estimated_baudrate_tol_ppm = int(1.0/(2*timing_wind_size*osr)*1e6/2) #Divide by 2 is to be conservative
#Use a resynchronization period of 2 if we don't need much baudrate tolerance, otherwise use 1
if estimated_baudrate_tol_ppm >= baudrate_tol_ppm:
resyncper = 2
else:
resyncper = 1
#Baudrate calibration does not work well with the Legacy demod, so disable
brcalavg = 0
brcalen = 0
#Load local variables back into model variables
model.vars.brcalavg.value = brcalavg
model.vars.brcalen.value = brcalen
model.vars.timing_resync_period.value = resyncper
def calc_brcalmode_reg(self, model):
#This function writes the brcal model register
#Write register
self._reg_write(model.vars.MODEM_CTRL5_BRCALMODE, 0)
def calc_brcal_reg(self, model):
#This function writes the brcal average and enable registers
#Load model variables into local variables
brcalavg = model.vars.brcalavg.value
brcalen = model.vars.brcalen.value
#Write registers
self._reg_write(model.vars.MODEM_CTRL5_BRCALAVG, brcalavg)
self._reg_write(model.vars.MODEM_CTRL5_BRCALEN, brcalen)
def calc_resyncbaudtrans_reg(self, model):
#This function writes the resyncbaudtrans register
demod_select = model.vars.demod_select.value
# : for coherent demod, disable otherwise the measured baudrate tolerance is effectively 0
if demod_select == model.vars.demod_select.var_enum.COHERENT:
self._reg_write(model.vars.MODEM_CTRL5_RESYNCBAUDTRANS, 0)
else:
#Based on Series 1 findings, always set RESYNCBAUDTRANS for all other demods
self._reg_write(model.vars.MODEM_CTRL5_RESYNCBAUDTRANS, 1)
def calc_rsyncper_reg(self, model):
#This function writes the resyncper register
#Load model variables into local variables
timing_resync_period = model.vars.timing_resync_period.value
#Write register
self._reg_write(model.vars.MODEM_CTRL1_RESYNCPER, timing_resync_period)
def calc_resyncper_actual(self, model):
#This function calculates the actual resynchonization period based on the register value
#Load model variables into local variables
resyncper_actual = float(model.vars.MODEM_CTRL1_RESYNCPER.value)
#Load local variables back into model variables
model.vars.resyncper_actual.value = resyncper_actual
def calc_phasedemod_reg(self, model):
#This function writes the phase demod register
#Load model variables into local variables
length = model.vars.dsss_len.value
modulation = model.vars.modulation_type.value
demod_sel = model.vars.demod_select.value
if modulation == model.vars.modulation_type.var_enum.OQPSK:
if demod_sel == model.vars.demod_select.var_enum.COHERENT:
phasedemod = 2
else:
phasedemod = 1
elif modulation == model.vars.modulation_type.var_enum.BPSK or \
modulation == model.vars.modulation_type.var_enum.DBPSK:
if length > 0:
phasedemod = 2
else:
phasedemod = 1
else:
phasedemod = 0
#Load local variables back into model variables
self._reg_write(model.vars.MODEM_CTRL1_PHASEDEMOD, phasedemod)
def calc_dsa_enable(self,model):
#This function sets a value for dsa_enable
dsa_enable = False
#Write the model variable
model.vars.dsa_enable.value = dsa_enable
# limit accumulated baudrate offset over timing window to 30000 ppm or 3%
# used in calc_resynper_brcal_val and calc_baudrate_tol_ppm_actual
# TODO: might need to tweak this number based on PHY performance
max_accumulated_tolerance_ppm = 30000.0
def calc_intosr_reg(self, model):
# This function sets INTOSR register field
osr = model.vars.oversampling_rate_actual.value
if abs(round(osr) - osr) < 0.001:
intosr = 1
else:
intosr = 0
self._reg_write(model.vars.MODEM_SRCCHF_INTOSR, intosr)
def calc_isicomp_reg(self, model):
# This function calculates the ISICOMP register field
# Read in global variables
modulation = model.vars.modulation_type.value
shaping_filter = model.vars.shaping_filter.value
#Calculate the ISICOMP value based on filter type and BT
if modulation == model.vars.modulation_type.var_enum.FSK4:
if shaping_filter == model.vars.shaping_filter.var_enum.Gaussian:
# Currently we only consider Gaussian shaping, support for other filter types with 4FSK and ISICOMP is TBD
# Read in shaping filter param here as some PHYs do not have shaping filter defined if filter is NONE
shaping_filter_param = model.vars.shaping_filter_param.value
if shaping_filter_param >= 0.75:
isicomp = 5
elif shaping_filter_param >= 0.6:
isicomp = 8
else:
#This is the default BT=0.5 case
isicomp = 10
else:
#Not gaussian filtering
isicomp = 8
else:
#Do not use ISI compensation for other modulation types
isicomp = 0
#Write the register
self._reg_write(model.vars.MODEM_CTRL4_ISICOMP, isicomp)
def calc_offsetphasemasking_reg(self, model):
# This function calculates OFFSETPHASEMASKING
modulation = model.vars.modulation_type.value
if modulation == model.vars.modulation_type.var_enum.BPSK or \
modulation == model.vars.modulation_type.var_enum.DBPSK:
self._reg_write(model.vars.MODEM_CTRL4_OFFSETPHASEMASKING, 1)
else:
self._reg_write(model.vars.MODEM_CTRL4_OFFSETPHASEMASKING, 0)
def calc_dec1gain_value(self, model):
"""calculate additional gain we want in the DEC1 decimator for very low bandwidth
PHY settings.
see register definition of DEC1GAIN in EFR32 Reference Manual (internal.pdf)
Args:
model (ModelRoot) : Data model to read and write variables from
"""
bw = model.vars.bandwidth_actual.value
if bw < 500:
dec1gain = 12
elif bw < 2000:
dec1gain = 6
else:
dec1gain = 0
model.vars.dec1gain.value = dec1gain
def calc_dec1gain_reg(self, model):
"""set DEC1GAIN register based on calculated value
Args:
model (ModelRoot) : Data model to read and write variables from
"""
val = model.vars.dec1gain.value
if val == 12:
reg = 2
elif val == 6:
reg = 1
else:
reg = 0
self._reg_write(model.vars.MODEM_CF_DEC1GAIN, reg)
def calc_syncacqwin_actual(self, model):
""" set syc word acquisition window for TRECS basd on register value
Args:
model (ModelRoot) : Data model to read and write variables from
"""
model.vars.syncacqwin_actual.value = 4 * (model.vars.MODEM_REALTIMCFE_SYNCACQWIN.value + 1)
def calc_phscale_reg(self, model):
#Load model variables into local variables
mi = model.vars.modulation_index.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
demod_sel = model.vars.demod_select.value
osr = model.vars.oversampling_rate_actual.value
phscale_derate_factor = model.vars.phscale_derate_factor.value
if remoden:
# if remodulation path is enabled freqgain block is handling the scaling
phscale_reg = 0
elif mi > 0.0:
if demod_sel == model.vars.demod_select.var_enum.BCR:
# phscale_reg = int(floor(log(8 * 4 * mi / osr, 2)))
bcr_phscale_list = [0,1,2,3]
bcrksi3_list = []
diff_from_opt_bcrksi3_list = []
for bcr_phscale_val in bcr_phscale_list:
bcr_phscale_val_actual = float(2 ** (bcr_phscale_val))
ksi1_val = self.return_ksi1_calc(model, bcr_phscale_val_actual)
ksi2_val, ksi3_val, ksi3wb_val = self.return_ksi2_ksi3_calc(model, ksi1_val)
bcrksi3_list.append(ksi3wb_val)
diff_from_opt_bcrksi3_list.append(40 - ksi3wb_val)
# : Determine lowest phscale value with bcrksi3 < 64
phscale_reg = -1
for diff_index in range(len(diff_from_opt_bcrksi3_list)):
if diff_from_opt_bcrksi3_list[diff_index] >= 0:
phscale_reg = bcr_phscale_list[diff_index]
break
# : If fail, calculate following est osr disable case
if phscale_reg == -1:
phscale_reg = int(floor(log(8 * 4 * mi / osr, 2)))
else:
# this scaling will bring the nominal soft decision as close to 64 as possible with a power of 2 scaling
phscale_reg = int(round(log(2 * mi, 2)))
else:
phscale_reg = 0
#Derate phscale per phscale_derate_factor (used to accomodate large freq offset tol)
phscale_reg += int(round(log2(phscale_derate_factor)))
# limit phscale_reg from 0 to 3
phscale_reg = max(min(phscale_reg, 3), 0)
self._reg_write(model.vars.MODEM_TRECPMDET_PHSCALE, phscale_reg)
def calc_phscale_actual(self,model):
phscale_reg = model.vars.MODEM_TRECPMDET_PHSCALE.value
model.vars.phscale_actual.value = float(2 ** (phscale_reg))
def return_ksi1_calc(self, model, phscale):
# Load model variables into local variables
demod_sel = model.vars.demod_select.value
modtype = model.vars.modulation_type.value
trecs_enabled = model.vars.trecs_enabled.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
freq_gain_actual = model.vars.freq_gain_actual.value
osr = model.vars.oversampling_rate_actual.value
baudrate = model.vars.baudrate.value
freq_dev_max = model.vars.freq_dev_max.value
freq_dev_min = model.vars.freq_dev_min.value
# when remod is enabled scaling is controlled by freqgain and phscale is currently set to 1
if remoden:
gain = freq_gain_actual / phscale / osr
elif demod_sel == model.vars.demod_select.var_enum.BCR:
gain = 8 / (phscale * osr)
else:
gain = 1 / phscale
#Calculate minimum and maximum possible modulation indices
mi_min = 2.0*freq_dev_min/baudrate
mi_max = 2.0*freq_dev_max/baudrate
#Determine which modulation index to use for the purposes of KSI calculation
mi_to_use = mi_min + (mi_max - mi_min) * 0.5
# calculate ksi values for Viterbi demod only
# if the gain is set correctly this should give us nominal soft decisions of 64 for regular case
# in case of remod we actually use the legacy demod's gain which sets the deviation + freq offset to 128
if ((trecs_enabled or demod_sel == model.vars.demod_select.var_enum.BCR) and
(modtype == model.vars.modulation_type.var_enum.FSK2 or
modtype == model.vars.modulation_type.var_enum.MSK)):
if demod_sel == model.vars.demod_select.var_enum.BCR:
saturation_value = 63
else:
saturation_value = 127
ksi1 = int(round(saturation_value * mi_to_use * gain))
else:
ksi1 = 0
return ksi1
def calc_ksi1(self, model):
#This function writes the ksi1 model variable that is used to program both
#hardmodem and softmodem ksi1 regs
# Read in model vars
phscale_actual = model.vars.phscale_actual.value
# Call the calculation routine for ksi1 based on actual selected phscale
model.vars.ksi1.value = self.return_ksi1_calc(model, phscale_actual)
def calc_ksi1_reg(self, model):
#Read in model vars
ksi1 = model.vars.ksi1.value
#Write the reg
self._reg_sat_write(model.vars.MODEM_VITERBIDEMOD_VITERBIKSI1, ksi1)
def calc_syncbits_actual(self, model):
model.vars.syncbits_actual.value = model.vars.MODEM_CTRL1_SYNCBITS.value + 1
def calc_chflatency_actual(self, model):
chflatency = model.vars.MODEM_CHFLATENCYCTRL_CHFLATENCY.value
model.vars.chflatency_actual.value = chflatency
def calc_datapath_delays(self, model):
dec0 = model.vars.dec0_actual.value
dec1 = model.vars.dec1_actual.value
dec2 = model.vars.dec2_actual.value
datafilter_taps = model.vars.datafilter_taps.value
chflatency = model.vars.chflatency_actual.value
src2_actual = model.vars.src2_ratio_actual.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
remoddwn = model.vars.MODEM_PHDMODCTRL_REMODDWN.value
trecs_enabled = model.vars.trecs_enabled.value
oversampling_rate = model.vars.oversampling_rate_actual.value
# need to flush out the entire delay line so delay is not group delay but number of taps
# DEC8 delay: 22 taps
del_dec8 = 22
# DEC0 delay: 27 or 40 taps depending on decimation
del_dec0 = 27.0 if dec0 == 3 or dec0 == 4 else 40
# DC cancel filter group delay of 1, IRCAL delay of 1, no delay in dig mixer
del_dc_ircal_digmix = 2
# DEC1 delay: 4 additional taps per decimation as this is 4th order CIC
del_dec1 = (dec1 - 1) * 4.0 + 1
# CHFLT delay: 29 taps minus the 6 taps for each increment in latency reduction field
del_chflt = 29.0 - chflatency * 6.0
# SRC delay: can be up to 2 samples
del_src2 = 2
# Digital gain and CORDIC do not introduce any delays
del_digigain = 0
del_cordic = 0
# Differentiation delay of 1, frequency gain has no delay
del_diff = 1
# DEC2 delay: 1st or CIC so number of taps is the same as decimation
del_dec2 = dec2
# DATAFILT delay: number of taps
del_data = datafilter_taps
# remod operation delay
# FIXME: verify the delay in this block
del_remod = remoddwn
del_adc_to_diff = (((del_dec8 / 8 + del_dec0) / dec0 + del_dc_ircal_digmix + del_dec1) / dec1 + del_chflt + del_src2 ) / src2_actual + \
del_digigain + del_cordic + del_diff
grpdel_mixer_to_diff = ( (del_dec1+1)/2 / dec1 + (del_chflt+1)/2 + del_src2) / src2_actual + del_digigain + del_cordic + del_diff
if trecs_enabled:
if remoden == 1 and remoddwn == 0: # demod at DEC2 output
delay_adc_to_demod = (del_adc_to_diff + del_dec2) / dec2 # delay at dec2 output in samples at that point
delay_adc_to_demod_symbols = (delay_adc_to_demod + del_data) / oversampling_rate / dec2
grpdelay_to_demod = (grpdel_mixer_to_diff + (del_dec2+1)/2) / dec2 # delay at dec2 output in samples at that point
delay_agc = delay_adc_to_demod * dec2 * src2_actual
elif remoden == 1 and remoddwn > 1:
delay_adc_to_demod = ((del_adc_to_diff + del_dec2) / dec2 + del_data + del_remod) / remoddwn
delay_adc_to_demod_symbols = delay_adc_to_demod / oversampling_rate / dec2
grpdelay_to_demod = ((grpdel_mixer_to_diff + (del_dec2+1)/2) / dec2 + (del_data+1)/2 + (del_remod+1)/2) / remoddwn
delay_agc = delay_adc_to_demod * dec2 * src2_actual * remoddwn
else:
delay_adc_to_demod = del_adc_to_diff
delay_adc_to_demod_symbols = delay_adc_to_demod / oversampling_rate
grpdelay_to_demod = grpdel_mixer_to_diff
delay_agc = del_adc_to_diff * src2_actual
else:
delay_adc_to_demod = (del_adc_to_diff + del_dec2) / dec2 + del_data
delay_adc_to_demod_symbols = delay_adc_to_demod / oversampling_rate / dec2
grpdelay_to_demod = (grpdel_mixer_to_diff + (del_dec2+1)/2) / dec2 + (del_data+1)/2
delay_agc = delay_adc_to_demod * dec2 * src2_actual
model.vars.grpdelay_to_demod.value = int(ceil(grpdelay_to_demod))
model.vars.agc_settling_delay.value = int(ceil(delay_agc))
model.vars.delay_adc_to_demod_symbols.value = int(ceil(delay_adc_to_demod_symbols))
def calc_src2_denominator(self, model):
#Needed by RAIL
# Load model variables into local variables
osr = model.vars.oversampling_rate_actual.value
datarate = model.vars.baudrate.value
dec0 = model.vars.dec0_actual.value
dec1 = model.vars.dec1_actual.value
dec2 = model.vars.dec2_actual.value
adc_clock_mode = model.vars.adc_clock_mode.value
if (model.vars.adc_clock_mode.var_enum.HFXOMULT == adc_clock_mode):
src2_calcDenominator = 0
else:
src2_calcDenominator = datarate * dec0 * dec1 * dec2 * 8 * osr
# Load local variables back into model variables
model.vars.src2_calcDenominator.value = int(src2_calcDenominator)
def calc_dccomp_misc_reg(self, model):
# always enable both DC offset estimation and compensation blocks
self._reg_write(model.vars.MODEM_DCCOMP_DCCOMPEN, 1)
self._reg_write(model.vars.MODEM_DCCOMP_DCESTIEN, 1)
# don't reset at every packet
self._reg_write(model.vars.MODEM_DCCOMP_DCRSTEN, 0)
# always enable gear shifting option
self._reg_write(model.vars.MODEM_DCCOMP_DCGAINGEAREN, 1)
# when AGC gain change happens set the gear to fastest
self._reg_write(model.vars.MODEM_DCCOMP_DCGAINGEAR, 7)
# final gear setting after settling
self._reg_write(model.vars.MODEM_DCCOMP_DCCOMPGEAR, 3)
# limit max DC to 1V
self._reg_write(model.vars.MODEM_DCCOMP_DCLIMIT, 0)
# don't freeze state of DC comp filters
self._reg_write(model.vars.MODEM_DCCOMP_DCCOMPFREEZE, 0)
# time between gear shifts - set to fixed value for now
self._reg_write(model.vars.MODEM_DCCOMP_DCGAINGEARSMPS, 40)
def calc_forceoff_reg(self, model):
demod_sel = model.vars.demod_select.value
trecs_enabled = model.vars.trecs_enabled.value
if demod_sel == model.vars.demod_select.var_enum.BCR or trecs_enabled:
clock_gate_off_reg = 0xfdff
else:
clock_gate_off_reg = 0x00
self._reg_write(model.vars.MODEM_CGCLKSTOP_FORCEOFF, clock_gate_off_reg)
def get_limits(self, demod_select, withremod, relaxsrc2, model):
#Load model variables into local variables
bandwidth = model.vars.bandwidth_hz.value #from calc_target_bandwidth
baudrate = model.vars.baudrate.value #We don't know the actual bandrate yet
modtype = model.vars.modulation_type.value
mi = model.vars.modulation_index.value
min_chfilt_osr = None
max_chfilt_osr = None
osr_list = None
# Define constraints for osr, src2, dec2
if demod_select == model.vars.demod_select.var_enum.BCR:
# FIXME: osr_list and resulting target osr are really chfilt_osr, pro2 calculator defines target_osr
# This doesn't cause an error but is confusing.
osr_est = int(ceil(2 * float(bandwidth) / baudrate))
min_osr = 8
max_osr = 127
min_chfilt_osr = 8
if (modtype == model.vars.modulation_type.var_enum.OOK) or \
(modtype == model.vars.modulation_type.var_enum.ASK):
max_chfilt_osr = 16256 #127*max_bcr_dec = 127*128
osr_list = range(12, max_chfilt_osr)
else:
max_chfilt_osr = 127
osr_list = [osr_est]
min_src2 = 1.0
max_src2 = 1.0
min_dec2 = 1
max_dec2 = 1
min_bwsel = 0.2
target_bwsel = 0.4
max_bwsel = 0.4
elif demod_select == model.vars.demod_select.var_enum.LEGACY:
if (modtype == model.vars.modulation_type.var_enum.FSK2 or \
modtype == model.vars.modulation_type.var_enum.FSK4 or \
modtype == model.vars.modulation_type.var_enum.MSK) and (mi<1):
# >=7 is better for sensitivity and frequency offset
# cost (sens degrade) increases with decreasing osr 6,5,4
osr_list = [7, 8, 9, 6, 5, 4]
min_osr = 4
else:
osr_list = [5, 7, 6, 4, 8, 9]
min_osr = 4
max_osr = 9
min_src2 = 0.8
max_src2 = 1.65 if relaxsrc2 else 1.2
min_dec2 = 1
max_dec2 = 64
min_bwsel = 0.2
target_bwsel = 0.4
max_bwsel = 0.4
elif demod_select == model.vars.demod_select.var_enum.COHERENT:
osr_list = [5]
min_osr = 5
max_osr = 5
min_src2 = 0.8
max_src2 = 1.65 if relaxsrc2 else 1.2
min_dec2 = 1
max_dec2 = 1
min_bwsel = 0.2
target_bwsel = 0.4
max_bwsel = 0.4
elif demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI or demod_select == model.vars.demod_select.var_enum.TRECS_SLICER:
if relaxsrc2 == True:
min_src2 = 0.55
max_src2 = 1.3
else:
min_src2 = 0.8
max_src2 = 1.0
min_bwsel = 0.2
target_bwsel = 0.4
max_bwsel = 0.4
if withremod == True:
min_dec2 = 1
max_dec2 = 64
min_osr = 4
max_osr = 32
osr_list = [4, 5, 6, 7]
elif mi > 2.5: #FIXME: arbitrary threshold here - for zwave 9.6kbps with mi=2.1 we prefer not to use int/diff path but at some point we will have to
min_dec2 = 1
max_dec2 = 64
min_osr = 4
max_osr = 7
osr_list = [4, 5, 6, 7]
else:
# Standard TRECs, no DEC2 or remod path
min_dec2 = 1
max_dec2 = 1
min_osr = 4
max_osr = 7
osr_list = [4, 5, 6, 7]
elif demod_select == model.vars.demod_select.var_enum.LONGRANGE:
min_dec2 = 1
max_dec2 = 1
min_osr = 4
max_osr = 4
osr_list = [4]
min_src2 = 0.8
max_src2 = 1.2
min_bwsel = 0.2
target_bwsel = 0.3
max_bwsel = 0.3
else:
raise CalculationException('ERROR: invalid demod_select in return_osr_dec0_dec1()')
# save to use in other functions
model.vars.min_bwsel.value = min_bwsel # min value for normalized channel filter bandwidth
model.vars.max_bwsel.value = max_bwsel # max value for normalized channel filter bandwidth
model.vars.min_src2.value = min_src2 # min value for SRC2
model.vars.max_src2.value = max_src2 # max value for SRC2
model.vars.max_dec2.value = max_dec2
model.vars.min_dec2.value = min_dec2
return min_bwsel, max_bwsel, min_chfilt_osr, max_chfilt_osr, min_src2, max_src2, min_dec2, max_dec2, min_osr, max_osr, target_bwsel, osr_list
def calc_bandwdith_tol(self, model):
model.vars.bandwidth_tol.value = 0.0
def return_osr_dec0_dec1(self, model, demod_select, withremod=False, relaxsrc2=False, quitatfirstvalid=True):
# Load model variables into local variables
bandwidth = model.vars.bandwidth_hz.value # from calc_target_bandwidth
adc_freq = model.vars.adc_freq_actual.value
baudrate = model.vars.baudrate.value # We don't know the actual bandrate yet
modtype = model.vars.modulation_type.value
mi = model.vars.modulation_index.value
if_frequency_hz = model.vars.if_frequency_hz.value
etsi_cat1_compatability = model.vars.etsi_cat1_compatible.value
bw_var = model.vars.bandwidth_tol.value
xtal_frequency_hz = model.vars.xtal_frequency_hz.value
# set limits based on selected demod
[min_bwsel, max_bwsel, min_chfilt_osr, max_chfilt_osr, min_src2, max_src2, min_dec2, \
max_dec2, min_osr, max_osr, target_bwsel, osr_list] = self.get_limits(demod_select, withremod, relaxsrc2, model)
# initialize output
best_bwsel_error = 1e9
best_osr = 0
best_dec0 = 0
best_dec1 = 0
# Setup for osr loop
# osr_list is a prioritized list, where first value with valid config will be returned
if (model.vars.target_osr._value_forced != None):
osr_forced = model.vars.target_osr._value_forced
osr_list = [osr_forced]
# Setup for dec0 loop
# dec0_list is a prioritized list, where ties in best bwsel go to earlier value in list
dec0_list = self.return_dec0_list(if_frequency_hz,adc_freq)
# Search values of osr, dec0, dec1 to find solution
# Exit on first osr with valid dec0 and dec1
for osr in osr_list:
for dec0 in dec0_list:
# define integer range for dec1
min_dec1 = int(max(1, ceil(float(adc_freq) * min_bwsel / (8 * dec0 * bandwidth*(1+bw_var)))))
max_dec1 = int(min(11500, floor(float(adc_freq) * max_bwsel / (8 * dec0 * bandwidth*(1-bw_var)))))
if min_dec1 <= max_dec1:
# Order list from highest to lowest, bwsel from highest to lowest
dec1_list = range(max_dec1,min_dec1-1,-1)
else:
# No solution
continue
for dec1 in dec1_list:
# check configuration does trigger IPMCUSRW-876 channel filter issue when input sample rate
# is too fast relative to the processing clock cycles needed
if not self._channel_filter_clocks_valid(model, dec0, dec1):
continue
# calculated dec2 range
if demod_select == model.vars.demod_select.var_enum.BCR:
calc_min_dec2 = 1
calc_max_dec2 = 1
chfilt_osr_actual = float(adc_freq) / (8 * dec0 * dec1 * baudrate)
if (modtype == model.vars.modulation_type.var_enum.OOK) or \
(modtype == model.vars.modulation_type.var_enum.ASK):
if chfilt_osr_actual < osr or chfilt_osr_actual > osr + 1.0:
continue
else:
if (chfilt_osr_actual < min_chfilt_osr) or (chfilt_osr_actual > max_chfilt_osr):
# not a solution, next value of dec1 loop
continue
elif demod_select == model.vars.demod_select.var_enum.TRECS_SLICER or \
demod_select == model.vars.demod_select.var_enum.TRECS_VITERBI:
# forcing integer osr condition, which applies to TRECS
# check for TRECS minimum clk cycle requirements
calc_min_dec2 = ceil(min_src2 * float(adc_freq) / (osr * dec0 * dec1 * 8 * baudrate))
calc_max_dec2 = floor(max_src2 * float(adc_freq) / (osr * dec0 * dec1 * 8 * baudrate))
trecs_src_interp_okay = self._check_trecs_required_clk_cycles(adc_freq, baudrate, osr, dec0,
dec1, xtal_frequency_hz, relaxsrc2, model)
if not trecs_src_interp_okay:
# not a solution due to trecs clocking constraints, continue
continue
else:
# forcing integer osr condition, which applies to LEGACY, COHERENT
calc_min_dec2 = ceil(min_src2 * float(adc_freq) / (osr * dec0 * dec1 * 8 * baudrate))
calc_max_dec2 = floor(max_src2 * float(adc_freq) / (osr * dec0 * dec1 * 8 * baudrate))
if (calc_min_dec2 <= calc_max_dec2) and (calc_min_dec2 <= max_dec2) and \
(calc_max_dec2 >= min_dec2):
# calculation of dec1 has constrained bwsel to range bwsel_min to bwsel_max
bwsel = bandwidth * (8 * dec0 * dec1) / float(adc_freq)
bwsel_error = abs(bwsel - target_bwsel)
# Select largest bwsel as best result
if (bwsel_error < best_bwsel_error):
best_bwsel_error = bwsel_error
best_osr = osr
best_dec0 = dec0
best_dec1 = dec1
best_bwsel = bwsel
if best_osr > 0 and quitatfirstvalid:
# break out of the osr loop on first successful configuration
break
return best_osr, best_dec0, best_dec1, min_osr, max_osr
def return_dec0_list(self,if_frequency_hz,adc_freq):
# The purpose of this function is determine the prioritized dec0 list from decimation options 3,4,8
# Rules:
# 1) DEC0=8 was only designed for adc_freq <= 40MHz
# 2) DEC0 anti-aliasing rejection >60dB for DEC0=8 and 4
first_null_d8 = float(adc_freq) / (8 * 8)
ratio_d8 = float(if_frequency_hz) / first_null_d8
first_null_d4 = float(adc_freq) / (8 * 4)
ratio_d4 = float(if_frequency_hz) / first_null_d4
if (ratio_d8 < 0.248) and (adc_freq <= 40e6):
# 0.248 = (.125-.094)/.125 corresponds to >60dB attenuation on d0=8 response
dec0_priority_list = [8,4,3]
elif ratio_d4 < 0.27:
# 0.27 = (.25-0.1825)/.25 corresponds to >60dB attenuation on d0=4 response
dec0_priority_list = [4,3]
else:
dec0_priority_list = [3,4]
return dec0_priority_list
def calc_remoden_reg(self, model):
osr = model.vars.oversampling_rate_actual.value
dec2_actual = model.vars.dec2_actual.value
trecs_enabled = model.vars.trecs_enabled.value
# Current assumption is that we are going to use the REMOD path only for Viterbi/TRecS
if trecs_enabled and (osr > 7 or dec2_actual > 1):
reg = 1
else:
reg = 0
self._reg_write(model.vars.MODEM_PHDMODCTRL_REMODEN, reg)
def calc_remoddwn_reg(self, model):
osr = model.vars.oversampling_rate_actual.value
#trecs_enabled = model.vars.trecs_enabled.value
# if trecs_enabled and osr > 7:
# remoddwn = int(osr/4) - 1 # we know osr is a multiple of 4 if we're here
# else:
# We prefer to not use the slice and remod path so this shoudl always be 0
remoddwn = 0
self._reg_write(model.vars.MODEM_PHDMODCTRL_REMODDWN, remoddwn)
def calc_remodosr_reg(self, model):
osr = model.vars.oversampling_rate_actual.value
trecs_enabled = model.vars.trecs_enabled.value
if trecs_enabled:
remodosr = int(round(osr)) - 1
else:
remodosr = 0
self._reg_write(model.vars.MODEM_PHDMODCTRL_REMODOSR, remodosr)
def calc_target_demod_bandwidth(self, model):
# Calculate target demod bandwidth
#Load model variables into local variables
mod_type = model.vars.modulation_type.value
bw_carson = model.vars.bandwidth_carson_hz.value
harddecision = model.vars.MODEM_VITERBIDEMOD_HARDDECISION.value
baudrate = model.vars.baudrate.value
trecs_enabled = model.vars.trecs_enabled.value
if (mod_type == model.vars.modulation_type.var_enum.FSK2) or \
(mod_type == model.vars.modulation_type.var_enum.MSK):
if trecs_enabled and (harddecision == 0):
bw_demod = baudrate * 1.1
else:
bw_demod = bw_carson
elif (mod_type == model.vars.modulation_type.var_enum.FSK4):
bw_demod = bw_carson
else:
#Default values for other modulation types
if (mod_type == model.vars.modulation_type.var_enum.OOK) or \
(mod_type == model.vars.modulation_type.var_enum.ASK):
bw_demod = baudrate * 5.0
if (model.vars.bandwidth_hz._value_forced == None):
print(" WARNING: OOKASK bandwidth_hz has not been optimized")
elif (mod_type == model.vars.modulation_type.var_enum.OQPSK):
bw_demod = baudrate * 1.25
else:
bw_demod = baudrate * 1.0
#Load local variables back into model variables
model.vars.demod_bandwidth_hz.value = int(bw_demod)
def calc_lock_bandwidth_actual(self, model):
# This function calculates the actual channel bandwidth based on adc rate, decimator, and bwsel settings
# Load model variables into local variables
adc_freq_actual = model.vars.adc_freq_actual.value
dec0_actual = model.vars.dec0_actual.value
dec1_actual = model.vars.dec1_actual.value
lock_bwsel = model.vars.lock_bwsel.value
min_bwsel = model.vars.min_bwsel.value
# Lower limit - calc filter coeffs limits lock_bwsel to min_bwsel
lock_bwsel_actual = max(lock_bwsel,min_bwsel)
# Calculate the actual channel bandwidth
lock_bandwidth_actual = int(adc_freq_actual * lock_bwsel_actual / dec0_actual / dec1_actual / 8)
# Load local variables back into model variables
model.vars.lock_bandwidth_actual.value = lock_bandwidth_actual
def get_data_filter(self, datafilter):
if datafilter == 0:
coef = [ 1 ]
elif datafilter == 1:
coef = [1/4, 2/4, 1/4]
elif datafilter == 2:
coef = [1/4, 1/4, 1/4, 1/4]
elif datafilter == 3:
coef = [1/8, 2/8, 2/8, 2/8, 1/8]
elif datafilter == 4:
coef = [1/8, 1/8, 2/8, 2/8, 1/8, 1/8]
elif datafilter == 5:
coef = [1/8, 1/8, 1/8, 2/8, 1/8, 1/8, 1/8]
elif datafilter == 6:
coef = [1/8, 1/8, 1/8, 1/8, 1/8, 1/8, 1/8, 1/8]
elif datafilter == 7:
coef = [1/16, 2/16, 2/16, 2/16, 2/16, 2/16, 2/16, 2/16, 1/16]
else:
raise CalculationException('ERROR: Invalid setting for datafilter in get_datafilter in calc_demodulator.py')
return coef
def get_ma1_filter(self, rawgain):
if rawgain == 0:
df = [4]
elif rawgain == 1:
df = [2, 2]
elif rawgain == 2:
df = [1, 2, 1]
else:
df = [1/2, 3/2, 3/2, 1/2]
return df
def get_ma2_filter(self, rawfltsel):
if rawfltsel == 0:
df = [1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4]
elif rawfltsel == 1:
df = [1/2, 1/2, 1/2, 1/2]
else:
df = [1]
return df
def gen_frequency_signal(self, x, sf, cf, sfosr, model):
# get parameters
deviation = model.vars.deviation.value
baudrate = model.vars.baudrate.value
demodosr = round(model.vars.oversampling_rate_actual.value)
src2 = model.vars.MODEM_SRCCHF_SRCRATIO2.value
datafilter = model.vars.MODEM_CTRL2_DATAFILTER.value
remoden = model.vars.MODEM_PHDMODCTRL_REMODEN.value
remodoutsel = model.vars.MODEM_PHDMODCTRL_REMODOUTSEL.value
demod_select = model.vars.demod_select.value
dec2 = model.vars.dec2_actual.value
remodpath = True if remoden or demod_select == model.vars.demod_select.var_enum.BCR else False
if demod_select == model.vars.demod_select.var_enum.BCR:
rawndec = model.vars.MODEM_BCRDEMODOOK_RAWNDEC.value #Moved inside BCR statement to allow inheritance
dec2 = 2 ** rawndec
# scale shaping filter to desired amplitude OSR = 8
sf = sf / np.sum(sf) * sfosr
# pulse shape OSR = 8
y = sp.lfilter(sf, 1, x)
# apply deviation OSR = 8
z = y * deviation
# integrate to get phase after scaling by sampling rate at TX OSR = 8
t = np.cumsum(z / (baudrate * sfosr))
# modulate at baseband OSR = 8
u = np.exp(1j * 2 * pi * t)
# resample at channel filter rate (e.g. sfosr -> osr) OSR = chflt_osr * src2
# FIXME: handle other remod paths here if we end up using them
if remodpath:
osr = demodosr * dec2
else:
osr = demodosr
u2 = sp.resample_poly(u,osr*src2, sfosr*16384)
# channel filter OSR = chflt_osr * src2
v = sp.lfilter(cf, 1, u2)
# src2 - resample to target OSR rate OSR = target_osr * dec2
v2 = sp.resample_poly(v, 16384, src2)
# CORDIC OSR = target_osr * dec2
a = np.unwrap(np.angle(v2))
# downsample by dec2 to get to target_osr if remod enabled
if remodpath: #and remodoutsel == 1:
# differentiate phase to frequency OSR = target_osr * dec2
f1 = a[1:] - a[0:-1]
#f = sp.resample_poly(f1, 1, dec2)
# when downsampling pick the best phase that results in max eye opening as we are going to feed the samples
# from here to the datafilter. Low value samples will bring the average soft decision to a lower value.
best_min = 0
for phase in range(dec2):
f2 = sp.resample_poly(f1[round(len(f1)/4)+phase:], 1, dec2)
min_val = min(abs(f2[3:-3]))
if min_val >= best_min:
best_min = min_val
f = f2
else:
# differentiate phase to frequency OSR = target_osr * dec2
f = a[osr:] - a[0:-osr]
# optional decimation and filtering for remod paths
if demod_select == model.vars.demod_select.var_enum.BCR:
rawgain = model.vars.MODEM_BCRDEMODOOK_RAWGAIN.value #Moved inside BCR statement to allow inheritance
rawfltsel = model.vars.MODEM_BCRDEMODCTRL_RAWFLTSEL.value
ma1 = self.get_ma1_filter(rawgain)
g1 = sp.lfilter(ma1, 1, f)
ma2 = self.get_ma2_filter(rawfltsel)
g = sp.lfilter(ma2, 1, g1)
elif remoden and (remodoutsel == 0 or remodoutsel == 1):
df = self.get_data_filter(datafilter)
g = sp.lfilter(df, 1, f)
else:
g = f
# return frequency signal
return g
def return_ksi2_ksi3_calc(self, model, ksi1):
# get parameters
lock_bwsel = model.vars.lock_bwsel.value # use the lock bw
bwsel = model.vars.bwsel.value # use the lock bw
osr = int(round(model.vars.oversampling_rate_actual.value))
# calculate only if needed - ksi1 would be already calculated if that is the case
if (ksi1 == 0):
best_ksi2 = 0
best_ksi3 = 0
best_ksi3wb = 0
else:
# get shaping filter and it oversampling rate with respect to baudrate
sf = CALC_Shaping_ocelot().get_shaping_filter(model)/1.0
sfosr = 8 # shaping filter coeffs are sampled at 8x
# get channel filter and expend the symmetric part
cfh = np.asarray(self.return_coeffs(lock_bwsel))
cf = np.block([cfh, cfh[-2::-1]])/1.0
cfh = np.asarray(self.return_coeffs(bwsel))
cfwb = np.block([cfh, cfh[-2::-1]])/1.0
# base sequences for +1 and -1
a = np.array([ 1.0, 0, 0, 0, 0, 0, 0, 0])
b = np.array([-1.0, 0, 0, 0, 0, 0, 0, 0])
# generate frequency signal for periodic 1 1 1 0 0 0 sequence for ksi1
x1 = np.matlib.repmat(np.append(np.matlib.repmat(a, 1, 3),np.matlib.repmat(b, 1, 3)), 1, 4)
f1 = self.gen_frequency_signal( x1[0], sf, cf, sfosr, model)
# generate frequency signal for periodic 1 1 0 0 1 1 sequence for ksi2
x2 = np.matlib.repmat(np.append(np.matlib.repmat(a, 1, 2), np.matlib.repmat(b, 1, 2)), 1, 6)
f2 = self.gen_frequency_signal( x2[0], sf, cf, sfosr, model)
# generate frequency signal for periodic 1 0 1 0 1 0 sequence for ksi3
x3 = np.matlib.repmat(np.append(np.matlib.repmat(a, 1, 1), np.matlib.repmat(b, 1, 1)), 1, 12)
f3 = self.gen_frequency_signal( x3[0], sf, cf, sfosr, model)
# generate frequency signal for periodic 1 0 1 0 1 0 sequence for ksi3 but with aqcusition channel filter
f3wb = self.gen_frequency_signal( x3[0], sf, cfwb, sfosr, model)
# find scaling needed to get f1 to the desired ksi1 value and apply it to f2 and f3
ind = osr - 1
scaler = ksi1 / np.max(np.abs(f1[ind + 8 * osr - 1: - 2 * osr: osr]))
f2 = scaler * f2
f3 = scaler * f3
f3wb = scaler * f3wb
# from matplotlib import pyplot as plt
# plt.plot(f1*scaler,'x-')
# plt.show()
# plt.plot(f2,'x-')
# plt.plot(f3,'x-')
# plt.plot(f3wb,'x-')
# search for best phase to sample to get ksi3 value.
# best phase is the phase that gives largest eye opening
best_ksi3 = 0
for ph in range(osr):
ksi3 = np.max(np.round(np.abs(f3[ - 6 * osr + ph: - 2 * osr: osr])))
if ksi3 > best_ksi3:
best_ksi3 = ksi3
best_ksi3wb = 0
for ph in range(osr):
ksi3wb = np.max(np.round(np.abs(f3wb[ - 6 * osr + ph: - 2 * osr: osr])))
if ksi3wb > best_ksi3wb:
best_ksi3wb = ksi3wb
# ksi2 is tricky depending if we sampled perfectly (symmetric around a
# pulse we should see the same value for 1 1 0 and 0 1 1 sequence but
# most of the time we cannot sample perfectly since can go as low as 4x
# oversampling for Viterbi PHYs. In this case we have 2 ksi values which we
# average to get the ksi2 value
best_cost = 1e9
for ph in range(osr):
x = np.round(np.abs(f2[- 6 * osr + ph: - 2 * osr: osr]))
cost = np.sum(np.abs(x - np.mean(x)))
if cost < best_cost:
best_cost = cost
best_ksi2 = np.round(np.mean(x))
# ensure that ksi1 >= ksi2 >= ksi3
# this code should only be needed in the extreme case when ksi1 = ksi2 = ksi3 and
# small variation can cause one to be larger than the other
best_ksi2 = ksi1 if best_ksi2 > ksi1 else best_ksi2
best_ksi3 = best_ksi2 if best_ksi3 > best_ksi2 else best_ksi3
best_ksi3wb = best_ksi2 if best_ksi3wb > best_ksi2 else best_ksi3wb
return best_ksi2, best_ksi3, best_ksi3wb
def calc_ksi2_ksi3(self, model):
# This function writes the ksi2,3 model variables that are used to program both
# hardmodem and softmodem ksi regs
#Read in model vars
ksi1 = model.vars.ksi1.value
# Call the calculation routine for ksi2 and ksi3
ksi2, ksi3, ksi3wb = self.return_ksi2_ksi3_calc(model, ksi1)
#Write the model vars
model.vars.ksi2.value = int(ksi2)
model.vars.ksi3.value = int(ksi3)
model.vars.ksi3wb.value = int(ksi3wb)
def calc_ksi2_ksi3_reg(self, model):
#Read in model vars
ksi2 = model.vars.ksi2.value
ksi3 = model.vars.ksi3.value
ksi3wb = model.vars.ksi3wb.value
#Write the reg fields
self._reg_write(model.vars.MODEM_VITERBIDEMOD_VITERBIKSI2, int(ksi2))
self._reg_write(model.vars.MODEM_VITERBIDEMOD_VITERBIKSI3, int(ksi3))
self._reg_write(model.vars.MODEM_VTCORRCFG1_VITERBIKSI3WB, int(ksi3wb))
def calc_prefiltcoeff_reg(self, model):
dsss0 = model.vars.MODEM_DSSS0_DSSS0.value
modtype = model.vars.modulation_type.value
demod_select = model.vars.demod_select.value
if modtype == model.vars.modulation_type.var_enum.OQPSK and dsss0 != 0:
dsss0_rotated = ((dsss0 << 1) | (dsss0 >> 31)) & 0xFFFFFFFF
dsss0_rotated_conj = dsss0_rotated ^ 0x55555555
prefilt = 2**32 + (dsss0 ^ ~dsss0_rotated_conj)
elif demod_select == model.vars.demod_select.var_enum.LONGRANGE:
prefilt = 0x3C3C3C3C
else:
prefilt = 0
self._reg_write(model.vars.MODEM_PREFILTCOEFF_PREFILTCOEFF, prefilt)
def calc_prefiltercoff_len(self, model):
demod_select = model.vars.demod_select.value
cplxcorr_enabled = model.vars.MODEM_CTRL6_CPLXCORREN.value
dsss_len = model.vars.dsss_len_actual.value
# : For coherent demod, set prefilter length to 4 symbols
if demod_select == model.vars.demod_select.var_enum.COHERENT:
prefilter_len_actual = dsss_len * 4
# If complex correlation is enabled, max length is 64 (prefilter_len_reg = 1)
if cplxcorr_enabled == 1:
if prefilter_len_actual > 64:
prefilter_len_actual = 64
else: # : default value for all other demods
prefilter_len_actual = 64
# : convert actual length to register values
prefilter_len_reg = int(round(prefilter_len_actual / 32.0 - 1.0))
self._reg_write(model.vars.MODEM_LONGRANGE1_PREFILTLEN, prefilter_len_reg)
def calc_demod_misc(self, model):
#Now that we always use the digital mixer, the CFOSR reg field is never used
self._reg_do_not_care(model.vars.MODEM_CF_CFOSR)
def _check_trecs_required_clk_cycles(self, adc_freq, baudrate, osr, dec0, dec1, xtal_frequency_hz, relaxsrc2, model):
# Returns True if the filter chain configuration meets the requirement for trecs
# minimum clock cycles between samples. Returns False if the configuration is invalid
#
# IPMCUSRW-668 - TRECS requires minimum of 4 clk between samples. SRC interpolation on ocelot
# has a fixed 3 clk separation and cannot be used with TRECS. Limiting max_src2_ratio is sufficient
# for ocelot, but this function is used by inherited classes which are able to adjust the
# interpolated sample clk delay
# calculate the src_ratio as this function is called in the process of evaluating
# osr, dec0, dec1, so the src_ratio_actual cannot be calculated
dec1_freq = adc_freq / (8 * dec0 * dec1)
src_freq = baudrate * osr
src_ratio = src_freq / dec1_freq
TRECS_REQUIRED_CLKS_PER_SAMPLE = 4
bandwidth_hz = model.vars.bandwidth_hz.value
is_vcodiv = model.vars.adc_clock_mode.value == model.vars.adc_clock_mode.var_enum.VCODIV
if src_ratio > 1:
# ocelot has fixed clk delay of 3
# IPMCUSRW-668 when it occurs causes slightly slower waterfall curves, and minor < 1% PER bumps
# if a PHY suffers from IPMCUSRW-876 (channel filter clocks), it is preferable to solve the channel
# filter issue by allowing the PHY workaround of a lower f_dec1 and interpolation on SRC2
bandwidth_threshold = 38e6 / 4 * 0.2 # minimum hfxo / chf_clks_per_sample * min_bwsel
return relaxsrc2 and is_vcodiv and bandwidth_hz > bandwidth_threshold
else:
cycles_per_sample = floor(xtal_frequency_hz / src_freq)
meets_clk_cycle_requirement = cycles_per_sample >= TRECS_REQUIRED_CLKS_PER_SAMPLE
return meets_clk_cycle_requirement
def calc_rx_restart_reg(self, model):
"""
Calculate collision restart control registers.
Args:
model:
Returns:
"""
antdivmode = model.vars.antdivmode.value
fltrsten = 0
antswrstfltdis = 1
rxrestartb4predet = 0
rxrestartmatap = 1
rxrestartmalatchsel = 0
rxrestartmacompensel = 2
rxrestartmathreshold = 6
rxrestartuponmarssi = 0
#The following need to be set the same regardless of antdiv enable
self._reg_write(model.vars.MODEM_RXRESTART_FLTRSTEN, fltrsten)
self._reg_write(model.vars.MODEM_RXRESTART_ANTSWRSTFLTTDIS, antswrstfltdis)
if antdivmode == model.vars.antdivmode.var_enum.DISABLE or \
antdivmode == model.vars.antdivmode.var_enum.ANTENNA1:
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTB4PREDET)
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTMATAP)
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTMALATCHSEL)
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTMACOMPENSEL)
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTMATHRESHOLD)
self._reg_do_not_care(model.vars.MODEM_RXRESTART_RXRESTARTUPONMARSSI)
else:
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTB4PREDET, rxrestartb4predet)
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTMATAP, rxrestartmatap)
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTMALATCHSEL, rxrestartmalatchsel)
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTMACOMPENSEL, rxrestartmacompensel)
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTMATHRESHOLD, rxrestartmathreshold)
self._reg_write(model.vars.MODEM_RXRESTART_RXRESTARTUPONMARSSI, rxrestartuponmarssi)
def calc_preamble_detection_length(self, model):
#This method calculates a defualt value for preamble_detection_length
preamble_length = model.vars.preamble_length.value
#Set the preamble detection length to the preamble length (TX) by default
model.vars.preamble_detection_length.value = preamble_length
def calc_detdis_reg(self, model):
#This method calculates the MODEM_CTRL0_DETDIS field
#For Ocelot always set to 0
self._reg_write(model.vars.MODEM_CTRL0_DETDIS, 0)
def calc_dec1gain_actual(self, model):
"""given register settings return actual DEC1GAIN used
Args:
model (ModelRoot) : Data model to read and write variables from
"""
reg = model.vars.MODEM_CF_DEC1GAIN.value
if reg == 0:
val = 0
elif reg == 1:
val = 6
else:
val = 12
model.vars.dec1gain_actual.value = val
def calc_rssi_dig_adjust_db(self, model):
#These variables are passed to RAIL so that RSSI corrections can be made to more accurately measure power
#Read in model vars
dec0gain = model.vars.MODEM_DIGIGAINCTRL_DEC0GAIN.value
dec1_actual = model.vars.dec1_actual.value
dec1gain_actual = model.vars.dec1gain_actual.value
digigainen = model.vars.MODEM_DIGIGAINCTRL_DIGIGAINEN.value
digigainsel = model.vars.MODEM_DIGIGAINCTRL_DIGIGAINSEL.value
digigaindouble = model.vars.MODEM_DIGIGAINCTRL_DIGIGAINDOUBLE.value
digigainhalf = model.vars.MODEM_DIGIGAINCTRL_DIGIGAINHALF.value
#Calculate gains
dec0_gain_db = 6.0*dec0gain
dec1_gain_linear = (dec1_actual**4) * (2**(-1*math.floor(4*math.log2(dec1_actual)-4)))
dec1_gain_db = 20*math.log10(dec1_gain_linear/16) + dec1gain_actual #Normalize so that dec1=0 gives gain=16
if digigainen:
digigain_db = -3+(digigainsel*0.25)
else:
digigain_db = 0
digigain_db += 6*digigaindouble-6*digigainhalf
# For consistency / simplicity, let's treat the rssi_adjust_db output from the calculator like RAIL handles
# EFR32_FEATURE_SW_CORRECTED_RSSI_OFFSET in that the value is thought to be added to the RSSI
# So to compensate for the digital gain, the value should be the negative of the excess gain
# Note that RSSISHIFT is actually subtracted from the RSSI, but EFR32_FEATURE_SW_CORRECTED_RSSI_OFFSET is
# subtracted from the default RSSISHIFT so that the proper sign is maintained
rssi_dig_adjust_db = -(dec0_gain_db + dec1_gain_db + digigain_db)
#Write the vars
model.vars.rssi_dig_adjust_db.value = rssi_dig_adjust_db
def calc_rssi_rf_adjust_db(self, model):
#Read in model vars
rf_band = model.vars.rf_band.value
#Calculate rf adjustment based on band
if rf_band == model.vars.rf_band.var_enum.BAND_169:
rssi_rf_adjust_db = -15.5
elif rf_band == model.vars.rf_band.var_enum.BAND_315:
rssi_rf_adjust_db = -16.4
elif rf_band == model.vars.rf_band.var_enum.BAND_434:
rssi_rf_adjust_db = -14.3
elif rf_band == model.vars.rf_band.var_enum.BAND_490:
rssi_rf_adjust_db = -14.3
elif rf_band == model.vars.rf_band.var_enum.BAND_868 or \
rf_band == model.vars.rf_band.var_enum.BAND_915:
rssi_rf_adjust_db = -10.4
else:
LogMgr.Warning("Warning: No RSSI adjustment available for this band")
rssi_rf_adjust_db = 0.0
#Write the model var
model.vars.rssi_rf_adjust_db.value = rssi_rf_adjust_db
def calc_rssi_adjust_db(self, model):
#Read in model vars
rssi_dig_adjust_db = model.vars.rssi_dig_adjust_db.value
rssi_rf_adjust_db = model.vars.rssi_rf_adjust_db.value
#Add digital and RF adjustments
rssi_adjust_db = rssi_dig_adjust_db + rssi_rf_adjust_db
#Write the model var
model.vars.rssi_adjust_db.value = rssi_adjust_db
def _channel_filter_clocks_valid(self, model, dec0, dec1):
# returns if the requested configuration is safe to not trigger ipmcusrw-876
# to avoid the channel filter sampling issue, clks_per_sample >= 4
# helper function for return_osr_dec0_dec1
# no margin on the first check. hfxomult clocking at exactly 4 clks/sample will not trigger this issue
safe_clks_per_sample = self.chf_required_clks_per_sample
xtal_frequency_hz = model.vars.xtal_frequency_hz.value
adc_freq = model.vars.adc_freq_actual.value
adc_clock_mode_actual = model.vars.adc_clock_mode_actual.value
base_frequency_hz = model.vars.base_frequency_hz.value
f_dec1 = adc_freq / (8 * dec0 * dec1)
clks_per_sample = xtal_frequency_hz / f_dec1
base_config_valid = clks_per_sample >= safe_clks_per_sample
# for lodiv based clocking, sample rate varies with RF. VCODIV PHYs are only used in the 2.4G band
# maximum ppm change can be determined by the min, max of the FCC band of 2400-2483.5 MHz
# for current 2.4G LODIV products, if its LODIV and subG the channel plan doesn't span
# wide enough where this is a problem
in_2p4G_band = base_frequency_hz >= 2400e6 and base_frequency_hz <= 2500e6
if adc_clock_mode_actual == model.vars.adc_clock_mode.var_enum.VCODIV and in_2p4G_band:
max_rf_frequency = 2480e6
max_ppm = (max_rf_frequency - base_frequency_hz) / base_frequency_hz
# (1-max_ppm because adc_freq is in the denominator
clks_per_sample_highest_channel = clks_per_sample * (1 - max_ppm)
highest_channel_valid = clks_per_sample_highest_channel >= self.chf_required_clks_per_sample
valid = base_config_valid and highest_channel_valid
else:
valid = base_config_valid
return valid
def calc_phscale_derate_factor(self, model):
#This function calculates the derating factor for PHSCALE for TRECS PHYs with large freq offset tol
#Always set to 1 on Ocelot for now
phscale_derate_factor = 1
#Write the model var
model.vars.phscale_derate_factor.value = phscale_derate_factor
|
11594751
|
from datetime import datetime, timedelta, timezone
import logging
import random
from django.core.exceptions import ValidationError
from app_ccf.models import VoucherCode
LOGGER = logging.getLogger(__name__)
def import_voucher_codes(filename, batch):
LOGGER.info('Uploading codes...')
num_valid_codes = 0
num_invalid_codes = 0
with open(filename, 'r') as f:
voucher_codes_to_write = []
for line in f:
voucher_code_str = line.strip()
voucher_code = VoucherCode(
code=voucher_code_str,
added_amount=0,
batch=batch,
is_active=True,
)
try:
pass
# voucher_code.full_clean() # Validate code format
except ValidationError as e:
LOGGER.error('Error importing code \'%s\': %s' % (
voucher_code_str, str(e)))
num_invalid_codes += 1
else:
voucher_codes_to_write.append(voucher_code)
LOGGER.info('Imported code \'%s\'.' % voucher_code_str)
num_valid_codes += 1
LOGGER.info('Writing %d codes to database (%d invalid)...' %
(num_valid_codes, num_invalid_codes))
VoucherCode.objects.bulk_create(voucher_codes_to_write)
LOGGER.info('Done.')
LOGGER.debug('Current codes: %s' % VoucherCode.objects.filter())
def generate_voucher_codes(num_codes, code_length, alphabet):
"""Returns a list of new unique codes not already in the database.
Args:
num_codes: The number of codes to generate.
code_length: The number of characters in each code.
alphabet: A string to choose characters from for each code.
"""
# Highly unlikely to happen, but we pull up the existing code set to check
# against in case we generate a duplicate
code_set = set(VoucherCode.objects.filter().values_list('code', flat=True))
new_codes = []
while len(new_codes) < num_codes:
new_code = ''.join([random.choice(alphabet)
for j in range(code_length)])
if new_code not in code_set:
new_codes.append(new_code)
code_set.add(new_code)
return new_codes
def invalidate_voucher_codes_with_campaign(affiliate, campaign):
"""Invalidates all codes under the given campaign name."""
voucher_codes = VoucherCode.objects.filter(batch__affiliate=affiliate,
batch__campaign=campaign)
LOGGER.info('Found %d codes with affiliate \'%s\', '
'and campaign \'%s\'. Invalidating...' %
(len(voucher_codes), affiliate, campaign))
voucher_codes.update(is_active=False)
LOGGER.info('Done.')
def invalidate_voucher_codes_with_code_list(codes):
"""Invalidates the codes given in a list. Unknown codes are ignored."""
voucher_codes = VoucherCode.objects.filter(code__in=codes)
LOGGER.info('Found %d out of %d provided codes. Invalidating...' %
(len(voucher_codes), len(codes)))
voucher_codes.update(is_active=False)
LOGGER.info('Done.')
|
11594783
|
import random
def problem2_5():
lst = []
random.seed(171) # don't remove when you submit for grading
""" Simulates rolling a die 10 times."""
for i in range(10):
# Setting the seed makes the random numbers always the same
# This is to make the auto-grader's job easier.
ran = random.randint(1,6)
lst.append(ran)
#print(lst)
for item in lst:
print(item)
pass # replace this pass (a do-nothing) statement with your code
|
11594801
|
from fence.errors import AuthError, InternalError
class JWTError(AuthError):
def __init__(self, message, code=401):
super(JWTError, self).__init__(message)
self.message = str(message)
self.code = code
class JWTPurposeError(JWTError):
pass
class JWTSizeError(InternalError):
"""
JWT exceeded 4096 bytes, after which browser may clip cookies.
See RFC 2109 $6.3.
"""
def __init__(self, message):
super(JWTSizeError, self).__init__(message)
self.message = str(message)
|
11594834
|
import cv2
import numpy as np
from scipy.spatial.transform import Rotation as R
# Dictionary that maps from joint names to keypoint indices.
KEYPOINT_DICT = {
'nose': 0,
'left_eye': 1,
'right_eye': 2,
'left_ear': 3,
'right_ear': 4,
'left_shoulder': 5,
'right_shoulder': 6,
'left_elbow': 7,
'right_elbow': 8,
'left_wrist': 9,
'right_wrist': 10,
'left_hip': 11,
'right_hip': 12,
'left_knee': 13,
'right_knee': 14,
'left_ankle': 15,
'right_ankle': 16
}
EDGES = [
(0, 1),
(0, 2),
(1, 3),
(2, 4),
(0, 5),
(0, 6),
(5, 7),
(7, 9),
(6, 8),
(8, 10),
(5, 6),
(5, 11),
(6, 12),
(11, 12),
(11, 13),
(13, 15),
(12, 14),
(14, 16)
]
skeleton3d = ((0,1),(1,2),(5,4),(4,3),(2,6),(3,6),(6,16),(16,7),(7,8),(8,9),(7,12),(7,13),(10,11),(11,12),(15,14),(14,13)) #head is 9, one hand is 10, other is 15
def draw_pose(frame,pose,size):
pose = pose*size
for sk in EDGES:
cv2.line(frame,(int(pose[sk[0],1]),int(pose[sk[0],0])),(int(pose[sk[1],1]),int(pose[sk[1],0])),(0,255,0),3)
def mediapipeTo3dpose(lms):
#convert landmarks returned by mediapipe to skeleton that I use.
#lms = results.pose_world_landmarks.landmark
pose = np.zeros((29,3))
pose[0]=[lms[28].x,lms[28].y,lms[28].z]
pose[1]=[lms[26].x,lms[26].y,lms[26].z]
pose[2]=[lms[24].x,lms[24].y,lms[24].z]
pose[3]=[lms[23].x,lms[23].y,lms[23].z]
pose[4]=[lms[25].x,lms[25].y,lms[25].z]
pose[5]=[lms[27].x,lms[27].y,lms[27].z]
pose[6]=[0,0,0]
#some keypoints in mediapipe are missing, so we calculate them as avarage of two keypoints
pose[7]=[lms[12].x/2+lms[11].x/2,lms[12].y/2+lms[11].y/2,lms[12].z/2+lms[11].z/2]
pose[8]=[lms[10].x/2+lms[9].x/2,lms[10].y/2+lms[9].y/2,lms[10].z/2+lms[9].z/2]
pose[9]=[lms[0].x,lms[0].y,lms[0].z]
pose[10]=[lms[15].x,lms[15].y,lms[15].z]
pose[11]=[lms[13].x,lms[13].y,lms[13].z]
pose[12]=[lms[11].x,lms[11].y,lms[11].z]
pose[13]=[lms[12].x,lms[12].y,lms[12].z]
pose[14]=[lms[14].x,lms[14].y,lms[14].z]
pose[15]=[lms[16].x,lms[16].y,lms[16].z]
pose[16]=[pose[6][0]/2+pose[7][0]/2,pose[6][1]/2+pose[7][1]/2,pose[6][2]/2+pose[7][2]/2]
#right foot
pose[17] = [lms[31].x,lms[31].y,lms[31].z] #forward
pose[18] = [lms[29].x,lms[29].y,lms[29].z] #back
pose[19] = [lms[25].x,lms[25].y,lms[25].z] #up
#left foot
pose[20] = [lms[32].x,lms[32].y,lms[32].z] #forward
pose[21] = [lms[30].x,lms[30].y,lms[30].z] #back
pose[22] = [lms[26].x,lms[26].y,lms[26].z] #up
#right hand
pose[23] = [lms[17].x,lms[17].y,lms[17].z] #forward
pose[24] = [lms[15].x,lms[15].y,lms[15].z] #back
pose[25] = [lms[19].x,lms[19].y,lms[19].z] #up
#left hand
pose[26] = [lms[18].x,lms[18].y,lms[18].z] #forward
pose[27] = [lms[16].x,lms[16].y,lms[16].z] #back
pose[28] = [lms[20].x,lms[20].y,lms[20].z] #up
return pose
def keypoints_to_original(scale,center,points):
scores = points[:,2]
points -= 0.5
#print(scale,center)
#print(points)
points *= scale
#print(points)
points[:,0] += center[0]
points[:,1] += center[1]
#print(points)
points[:,2] = scores
return points
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X / w * 2 - [1, h / w]
def get_rot_hands(pose3d):
hand_r_f = pose3d[26]
hand_r_b = pose3d[27]
hand_r_u = pose3d[28]
hand_l_f = pose3d[23]
hand_l_b = pose3d[24]
hand_l_u = pose3d[25]
# left hand
x = hand_l_f - hand_l_b
w = hand_l_u - hand_l_b
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
l_hand_rot = np.vstack((z, y, -x)).T
# right hand
x = hand_r_f - hand_r_b
w = hand_r_u - hand_r_b
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
r_hand_rot = np.vstack((z, y, -x)).T
r_hand_rot = R.from_matrix(r_hand_rot).as_quat()
l_hand_rot = R.from_matrix(l_hand_rot).as_quat()
return l_hand_rot, r_hand_rot
def get_rot_mediapipe(pose3d):
hip_left = pose3d[2]
hip_right = pose3d[3]
hip_up = pose3d[16]
foot_r_f = pose3d[20]
foot_r_b = pose3d[21]
foot_r_u = pose3d[22]
foot_l_f = pose3d[17]
foot_l_b = pose3d[18]
foot_l_u = pose3d[19]
# hip
x = hip_right - hip_left
w = hip_up - hip_left
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
hip_rot = np.vstack((x, y, z)).T
# left foot
x = foot_l_f - foot_l_b
w = foot_l_u - foot_l_b
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
l_foot_rot = np.vstack((x, y, z)).T
# right foot
x = foot_r_f - foot_r_b
w = foot_r_u - foot_r_b
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
r_foot_rot = np.vstack((x, y, z)).T
hip_rot = R.from_matrix(hip_rot).as_quat()
r_foot_rot = R.from_matrix(r_foot_rot).as_quat()
l_foot_rot = R.from_matrix(l_foot_rot).as_quat()
return hip_rot, l_foot_rot, r_foot_rot
def get_rot(pose3d):
## guesses
hip_left = 2
hip_right = 3
hip_up = 16
knee_left = 1
knee_right = 4
ankle_left = 0
ankle_right = 5
# hip
x = pose3d[hip_right] - pose3d[hip_left]
w = pose3d[hip_up] - pose3d[hip_left]
z = np.cross(x, w)
y = np.cross(z, x)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
hip_rot = np.vstack((x, y, z)).T
# right leg
y = pose3d[knee_right] - pose3d[ankle_right]
w = pose3d[hip_right] - pose3d[ankle_right]
z = np.cross(w, y)
if np.sqrt(sum(z**2)) < 1e-6:
w = pose3d[hip_left] - pose3d[ankle_left]
z = np.cross(w, y)
x = np.cross(y,z)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
leg_r_rot = np.vstack((x, y, z)).T
# left leg
y = pose3d[knee_left] - pose3d[ankle_left]
w = pose3d[hip_left] - pose3d[ankle_left]
z = np.cross(w, y)
if np.sqrt(sum(z**2)) < 1e-6:
w = pose3d[hip_right] - pose3d[ankle_left]
z = np.cross(w, y)
x = np.cross(y,z)
x = x/np.sqrt(sum(x**2))
y = y/np.sqrt(sum(y**2))
z = z/np.sqrt(sum(z**2))
leg_l_rot = np.vstack((x, y, z)).T
rot_hip = R.from_matrix(hip_rot).as_quat()
rot_leg_r = R.from_matrix(leg_r_rot).as_quat()
rot_leg_l = R.from_matrix(leg_l_rot).as_quat()
return rot_hip, rot_leg_l, rot_leg_r
|
11594901
|
import requests
def test_load():
response = requests.post(
"http://127.0.0.1:5000/data/load",
json={"File": "/mols_data/test_100.smi"}
)
assert response.status_code == 200
assert response.json() == {'status': True, 'msg': "Successfully loaded data!"}
def test_progress():
response = requests.get("http://127.0.0.1:5000/progress")
assert response.status_code == 200
assert response.json() == "current: 100, total: 100"
def test_count():
response = requests.post("http://127.0.0.1:5000/data/count")
assert response.status_code == 200
assert response.json() == 100
def test_search():
response = requests.post(
"http://127.0.0.1:5000/data/search",
json={"Mol": "Cc1ccc(cc1)S(=O)(=O)N"}
)
assert response.status_code == 200
assert len(response.json()) == 10
def test_drop():
response = requests.post("http://127.0.0.1:5000/data/drop")
assert response.status_code == 200
|
11594922
|
from pymtl import *
from lizard.util.rtl.interface import Interface, UseInterface
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.types import canonicalize_type
from lizard.bitutil import clog2, clog2nz
class OverlapCheckerInterface(Interface):
def __init__(s, base_width, max_size):
s.Base = canonicalize_type(base_width)
s.Size = canonicalize_type(clog2nz(max_size + 1))
super(OverlapCheckerInterface, s).__init__([
MethodSpec(
'check',
args={
'base_a': s.Base,
'size_a': s.Size,
'base_b': s.Base,
'size_b': s.Size,
},
rets={
'disjoint': Bits(1),
},
call=False,
rdy=False,
),
])
class OverlapChecker(Model):
def __init__(s, interface):
UseInterface(s, interface)
s.end_a = Wire(s.interface.Base)
s.end_b = Wire(s.interface.Base)
s.size_a_zext = Wire(s.interface.Base)
s.size_b_zext = Wire(s.interface.Base)
# Have to zext otherwise verilator warning
# E %Warning-WIDTH: OverlapChecker_0x3861fd4332620aca.v:38: Operator ADD expects 64 bits on the RHS, but RHS's VARREF 'check_size_a' generates 3 bits.
# E %Warning-WIDTH: Use "/* verilator lint_off WIDTH */" and lint_on around source to disable this message.
# E %Warning-WIDTH: OverlapChecker_0x3861fd4332620aca.v:39: Operator ADD expects 64 bits on the RHS, but RHS's VARREF 'check_size_b' generates 3 bits.
# E %Error: Exiting due to 2 warning(s)
@s.combinational
def compute_zext_sizes():
s.size_a_zext.v = zext(s.check_size_a, s.interface.Base.nbits)
s.size_b_zext.v = zext(s.check_size_b, s.interface.Base.nbits)
@s.combinational
def compute_ends():
# End exclusive
s.end_a.v = s.check_base_a + s.size_a_zext
s.end_b.v = s.check_base_b + s.size_b_zext
s.base_l = Wire(s.interface.Base)
s.end_s = Wire(s.interface.Base)
# @s.combinational
# def compute_smaller():
# if s.check_base_a < s.check_base_b:
# s.base_l.v = s.check_base_b
# s.end_s.v = s.end_a
# else:
# s.base_l.v = s.check_base_a
# s.end_s.v = s.end_b
@s.combinational
def compute_disjoint():
# Since end is exclusive, if it equals start we are still OK
s.check_disjoint.v = not (((s.check_base_a >= s.check_base_b) and
(s.check_base_a < s.end_b)) or
((s.check_base_b >= s.check_base_a) and
(s.check_base_b < s.end_a)))
|
11594974
|
from typing import Dict, Any, List
import attr
import cassis
from cassis import load_cas_from_xmi, load_typesystem
# Types
JsonDict = Dict[str, Any]
# Data classes
@attr.s
class PredictionRequest:
cas: cassis.Cas = attr.ib()
layer: str = attr.ib()
feature: str = attr.ib()
project_id: str = attr.ib()
document_id: str = attr.ib()
user_id: str = attr.ib()
@attr.s
class TrainingRequest:
layer: str = attr.ib()
feature: str = attr.ib()
project_id: str = attr.ib()
_typesystem_xml: str = attr.ib()
_documents_json: List[Dict[str, str]] = attr.ib()
@property
def user_id(self) -> str:
return self._documents_json[0]["userId"]
@property
def documents(self) -> List["TrainingDocument"]:
# We parse this lazily as sometimes when already training, we just do not need to parse it at all.
typesystem = load_typesystem(self._typesystem_xml)
training_documents = []
for document in self._documents_json:
cas = load_cas_from_xmi(document["xmi"], typesystem)
document_id = document["documentId"]
user_id = document["userId"]
training_documents.append(TrainingDocument(cas, document_id, user_id))
return training_documents
@attr.s
class TrainingDocument:
cas: cassis.Cas = attr.ib()
document_id: str = attr.ib()
user_id: str = attr.ib()
def parse_prediction_request(json_object: JsonDict) -> PredictionRequest:
metadata = json_object["metadata"]
document = json_object["document"]
layer = metadata["layer"]
feature = metadata["feature"]
project_id = metadata["projectId"]
typesystem = load_typesystem(json_object["typeSystem"])
cas = load_cas_from_xmi(document["xmi"], typesystem)
document_id = document["documentId"]
user_id = document["userId"]
return PredictionRequest(cas, layer, feature, project_id, document_id, user_id)
def parse_training_request(json_object: JsonDict) -> TrainingRequest:
metadata = json_object["metadata"]
layer = metadata["layer"]
feature = metadata["feature"]
project_id = metadata["projectId"]
typesystem_xml = json_object["typeSystem"]
documents_json = json_object["documents"]
return TrainingRequest(layer, feature, project_id, typesystem_xml, documents_json)
|
11594985
|
from yacs.config import CfgNode
def get_cfg() -> CfgNode:
from .default import _C
return _C.clone()
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
|
11595001
|
from boto.auth_handler import AuthHandler
from boto.auth_handler import NotReadyToAuthenticate
import oauth2_client
import oauth2_helper
class OAuth2Auth(AuthHandler):
capability = ['google-oauth2', 's3']
def __init__(self, path, config, provider):
if (provider.name == 'google'
and config.has_option('Credentials', 'gs_oauth2_refresh_token')):
self.oauth2_client = oauth2_helper.OAuth2ClientFromBotoConfig(config)
self.refresh_token = oauth2_client.RefreshToken(
self.oauth2_client,
config.get('Credentials', 'gs_oauth2_refresh_token'))
else:
raise NotReadyToAuthenticate()
def add_auth(self, http_request):
http_request.headers['Authorization'] = \
self.refresh_token.GetAuthorizationHeader()
|
11595037
|
from setuptools import setup, find_packages
setup(
name='FogLAMP',
python_requires='~=3.5',
version='0.1',
description='FogLAMP',
url='http://github.com/foglamp/FogLAMP',
author='OSIsoft, LLC',
author_email='<EMAIL>',
license='Apache 2.0',
# TODO: list of excludes (tests)
packages=find_packages(),
entry_points={
'console_scripts': [],
},
zip_safe=False
)
|
11595055
|
import numpy as np
# function to evaluate IOU between two boxes
def iou(bbox1, bbox2):
# shape of both tensor is (num_box, 4)
# value in format (xmin, ymin, xmax, ymax)
xmin_inter = np.maximum(bbox1[..., 0], bbox2[..., 0])
ymin_inter = np.maximum(bbox1[..., 1], bbox2[..., 1])
xmax_inter = np.minimum(bbox1[..., 2], bbox2[..., 2])
ymax_inter = np.minimum(bbox1[..., 3], bbox2[..., 3])
inter = (xmax_inter - xmin_inter) * (ymax_inter - ymin_inter)
bb1_ar = (bbox1[..., 2] - bbox1[..., 0]) * (bbox1[..., 3] - bbox1[..., 1])
bb2_ar = (bbox2[..., 2] - bbox2[..., 0]) * (bbox2[..., 3] - bbox2[..., 1])
union_ar = bb1_ar + bb2_ar - inter
iou_res = inter/union_ar
iou_res[xmax_inter < xmin_inter] = 0
iou_res[ymax_inter < ymin_inter] = 0
iou_res[iou_res < 0] = 0
iou_res[iou_res > 1] = 0
return iou_res
|
11595057
|
from __future__ import unicode_literals
import json
import time
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
)
class HypemIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
'info_dict': {
'id': '1v6ga',
'ext': 'mp3',
'title': 'Tame',
'uploader': 'BODYWORK',
}
}
def _real_extract(self, url):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
html_tracks = self._html_search_regex(
r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
track = track_list['tracks'][0]
except ValueError:
raise ExtractorError('Hypemachine contained invalid JSON.')
key = track['key']
track_id = track['id']
title = track['song']
request = sanitized_Request(
'http://hypem.com/serve/source/%s/%s' % (track_id, key),
'', {'Content-Type': 'application/json'})
song_data = self._download_json(request, track_id, 'Downloading metadata')
final_url = song_data['url']
artist = track.get('artist')
return {
'id': track_id,
'url': final_url,
'ext': 'mp3',
'title': title,
'uploader': artist,
}
|
11595091
|
if __name__ == '__main__':
from yapkernel import kernelapp as app
app.launch_new_instance()
|
11595093
|
from floem import *
Inject1 = create_inject("inject1", "int", 10, "gen_func")
Inject2 = create_inject("inject2", "int", 10, "gen_func")
add = Add(configure=[Int])
inject1 = Inject1()
inject2 = Inject2()
inject1 >> add.inp1
inject2 >> add.inp2
t = APIThread("run", [], "int")
t.run(add)
t1 = InternalThread("t1")
t1.run(inject1)
t2 = InternalThread("t2")
t2.run(inject2)
c = Compiler()
c.include = r'''int gen_func(int i) { return i; }'''
c.testing = r'''
for(int i=0;i<10;i++)
out(run());
'''
c.generate_code_and_run([2*i for i in range(10)])
|
11595104
|
import random
from collections import namedtuple, deque
import torch
import numpy as np
from .Replay_Buffer import Replay_Buffer
class Action_Balanced_Replay_Buffer(Replay_Buffer):
"""Replay buffer that provides sample of experiences that have an equal number of each action being conducted"""
def __init__(self, buffer_size, batch_size, seed, num_actions):
self.num_actions = num_actions
self.buffer_size_per_memory = int(buffer_size / self.num_actions)
print("NUM ACTIONS ", self.num_actions)
self.memories = {action: deque(maxlen=self.buffer_size_per_memory) for action in range(self.num_actions)}
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_experience(self, states, actions, rewards, next_states, dones):
"""Adds experience or list of experiences into the replay buffer"""
if type(dones) == list:
assert type(dones[0]) != list, "A done shouldn't be a list"
experiences = [self.experience(state, action, reward, next_state, done)
for state, action, reward, next_state, done in
zip(states, actions, rewards, next_states, dones)]
for experience in experiences:
action = experience.action
self.memories[action].append(experience)
else:
experience = self.experience(states, actions, rewards, next_states, dones)
self.memories[actions].append(experience)
def pick_experiences(self, num_experiences=None):
"""Picks the experiences that the sample function will return as a random sample of experiences. It works by picking
an equal number of experiences that used each action (as far as possible)"""
if num_experiences: batch_size = num_experiences
else: batch_size = self.batch_size
batch_per_action = self.calculate_batch_sizes_per_action(batch_size)
samples_split_by_action = self.sample_each_action_equally(batch_per_action)
combined_sample = []
for key in samples_split_by_action.keys():
combined_sample.extend(samples_split_by_action[key])
return combined_sample
def calculate_batch_sizes_per_action(self, batch_size):
"""Calculates the batch size we need to randomly draw from each action to make sure there is equal coverage
per action and that the batch gets filled up"""
min_batch_per_action = int(batch_size / self.num_actions)
batch_per_action = {k: min_batch_per_action for k in range(self.num_actions)}
current_batch_size = np.sum([batch_per_action[k] for k in range(self.num_actions)])
remainder = batch_size - current_batch_size
give_remainder_to = random.sample(range(self.num_actions), remainder)
for action in give_remainder_to:
batch_per_action[action] += 1
return batch_per_action
def sample_each_action_equally(self, batch_per_action):
"""Samples a number of experiences (determined by batch_per_action) from the memory buffer for each action"""
samples = {}
for action in range(self.num_actions):
memory = self.memories[action]
batch_size_for_action = batch_per_action[action]
action_memory_size = len(memory)
assert action_memory_size > 0, "Need at least 1 experience for each action"
if action_memory_size >= batch_size_for_action:
samples[action] = random.sample(memory, batch_size_for_action)
else:
print("Memory size {} vs. required batch size {}".format(action_memory_size, batch_size_for_action))
samples_for_action = []
while len(samples_for_action) < batch_per_action[action]:
remainder = batch_per_action[action] - len(samples_for_action)
sampled_experiences = random.sample(memory, min(remainder, action_memory_size))
samples_for_action.extend(sampled_experiences)
samples[action] = samples_for_action
return samples
def __len__(self):
return np.sum([len(memory) for memory in self.memories.values()])
def sample_experiences_with_certain_actions(self, allowed_actions, num_all_actions, required_batch_size):
"""Samples a number of experiences where the action conducted was in the list of required actions"""
assert isinstance(allowed_actions, list)
assert len(allowed_actions) > 0
num_new_actions = len(allowed_actions)
experiences_to_sample = int(required_batch_size * float(num_all_actions) / float(num_new_actions))
experiences = self.sample(num_experiences=experiences_to_sample)
states, actions, rewards, next_states, dones = experiences
matching_indexes = np.argwhere((np.in1d(actions.numpy(), allowed_actions)))
assert matching_indexes.shape[1] == 1
matching_indexes = matching_indexes[:, 0]
states = states[matching_indexes]
actions = actions[matching_indexes]
rewards = rewards[matching_indexes]
next_states = next_states[matching_indexes]
dones = dones[matching_indexes]
assert abs(states.shape[0] - required_batch_size) <= 0.05*required_batch_size, "{} vs. {}".format(states.shape[0], required_batch_size)
return (states, actions, rewards, next_states, dones)
|
11595113
|
import json
from matrix.lambdas.daemons.notification import NotificationHandler
def notification_handler(event, context):
notification = json.loads(event["Records"][0]["body"])
assert ('bundle_uuid' in notification and 'bundle_version' in notification and 'event_type' in notification)
bundle_uuid = notification["bundle_uuid"]
bundle_version = notification["bundle_version"]
event_type = notification["event_type"]
notification_handler = NotificationHandler(bundle_uuid, bundle_version, event_type)
notification_handler.run()
|
11595119
|
from .encoder import CKKSEncoder # noqa: F401
from .encrypter import CKKSEncrypter # noqa: F401
from .plaintext import CKKSPlaintext # noqa: F401
|
11595188
|
def test():
if foo in bar:
do_something()
if foo not in bar:
do_something_else()
if foo or bar:
good()
if foo and bar:
bad()
|
11595200
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import os
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
if not demisto.params().get('useProxy', False):
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
SERVER_URL_V1 = 'https://www.cymon.io:443/api/nexus/v1'
SERVER_DASHBOARD_URL_V1 = 'https://www.cymon.io:443/api/dashboard/v1'
SERVER_URL_V2 = 'https://api.cymon.io/v2/ioc/search'
VERIFY_CERTIFICATES = False if demisto.params().get('unsecure') else True
DEFAULT_HEADERS = {
"Content-Type": "application/json"
}
''' HELPER FUNCTIONS '''
def cymon_says():
return_error('Cymon service discontinued. Please disable or delete the integration instance.')
def http_request(method, url, headers):
try:
res = requests.request(method,
url,
verify=VERIFY_CERTIFICATES,
headers=headers)
if res.status_code == 200:
return res.json()
# 204 HTTP status code is returned when api rate limit has been exceeded
elif res.status_code == 204:
return_error("You've reached your API call quota.")
elif res.status_code == 404:
return {}
res.raise_for_status()
except Exception as e:
raise (e)
''' DOMAIN COMMAND '''
# def get_domain_full_report(domain):
# report_results = []
#
# from_param = 0
# size_param = 10
# total = None
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# while total is None or total > from_param:
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# hits = response.get('hits', [])
# for hit in hits:
# timestamp = datetime.strptime(
# hit.get('timestamp', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%S.%fZ')
#
# report_results.append({
# 'Title': hit.get('title', "").title(),
# 'Feed': hit.get('feed'),
# 'Timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
# # Formatting the timestamp to human readable date and time
# 'Tags': hit.get('tags'),
# 'Hostname': hit.get('ioc', {}).get('hostname'),
# 'IP': hit.get('ioc', {}).get('ip'),
# 'Domain': hit.get('ioc', {}).get('domain'),
# 'Reported By': hit.get('reported_by'),
# 'Location': hit.get('location', {}).get('country')
# })
#
# from_param = from_param + size_param
# total = int(response.get('total', 0))
#
# url = '{}/{}/{}?from={}&size={}'.format(SERVER_URL_V2, 'domain', domain, from_param, size_param)
#
# return report_results
# def get_domain_report(domain_full_report):
# reports = {} # type:dict
#
# for report in domain_full_report:
# title = report.get('Title')
# timestamp = datetime.strptime(
# report.get('Timestamp', datetime.now().strftime("%Y-%m-%d %H:%M:%S")), '%Y-%m-%d %H:%M:%S')
#
# if (title in reports and reports.get(title).get('Timestamp') < timestamp) or title not in reports: # type: ignore
# reports.update({title: {
# 'Feed': report.get('Feed'),
# 'Timestamp': timestamp,
# 'Tags': report.get('Tags'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP'),
# 'Domain': report.get('Domain'),
# 'Reported By': report.get('Reported By'),
# 'Location': report.get('Location')
# }})
#
# report_results = []
#
# for report in reports:
# report_results.append({
# 'Title': report,
# 'Feed': reports.get(report).get('Feed'), # type: ignore
# 'Timestamp': reports.get(report).get('Timestamp').strftime("%Y-%m-%d %H:%M:%S"), # type: ignore
# # Formatting the timestamp to human readable date and time
# 'Tags': reports.get(report).get('Tags'), # type: ignore
# 'Hostname': reports.get(report).get('Hostname'), # type: ignore
# 'IP': reports.get(report).get('IP'), # type: ignore
# 'Domain': reports.get(report).get('Domain'), # type: ignore
# 'Reported By': reports.get(report).get('Reported By'), # type: ignore
# 'Location': reports.get(report).get('Location') # type: ignore
# })
#
# return {
# 'reports': report_results,
# 'total': len(domain_full_report)
# }
# def create_domain_command_markdown(domain, total_hits, reports, domain_full_report, is_full_response):
# md = '## Cymon Domain report for: {}\n'.format(domain)
#
# md += '\n'
#
# md += '**Total Hits:** {}'.format(total_hits)
#
# md += '\n'
#
# md += tableToMarkdown("The following reports are the latest malicious hits resolved to the given domain:", reports,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By', 'Domain'])
#
# if is_full_response:
# md += tableToMarkdown("Full report list:", domain_full_report,
# ['Title', 'Hostname', 'IP', 'Timestamp', 'Feed', 'Tags', 'Location', 'Reported By',
# 'Domain'])
#
# return md
# def create_context_domain_command(domain, reports):
# cymon_domain_context_activities = []
# description = 'Reported suspicious activities: '
#
# for report in reports:
# cymon_domain_context_activities.append({
# 'Title': report.get('Title'),
# 'Tags': report.get('Tags'),
# 'Time': report.get('Timestamp'),
# 'Hostname': report.get('Hostname'),
# 'IP': report.get('IP')
# })
#
# description += '{}, '.format(report.get('Title'))
#
# description = description[:-2]
#
# context = {
# outputPaths['domain']: {
# 'Name': domain,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# },
# 'Cymon': {
# 'Domain': {
# 'Activities': cymon_domain_context_activities
# }
# }
# }
#
# return context
# def get_domain_report_command():
# args = demisto.args()
#
# domain = args.get('domain')
# is_full_response = args.get('fullResponse') == 'true'
#
# domain_full_report = get_domain_full_report(domain)
# domain_summarized_report = get_domain_report(domain_full_report)
#
# if len(domain_full_report) == 0:
# return "Domain " + domain + " is not in Cymons's dataset"
#
# markdown = create_domain_command_markdown(domain, domain_summarized_report.get('total'),
# domain_summarized_report.get('reports'), domain_full_report,
# is_full_response)
# context = create_context_domain_command(domain, domain_summarized_report.get('reports'))
#
# return {
# 'Type': entryTypes['note'],
# 'Contents': domain_full_report,
# 'ContentsFormat': formats['json'],
# 'HumanReadable': markdown,
# 'EntryContext': context
# }
''' IP COMMAND '''
# def get_ip_events_sources(ip):
# url = '{}/{}/{}'.format(SERVER_URL_V1, 'ip', ip)
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# return response.get('sources', None)
# def get_ip_events(ip):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'events', 100)
# events = {} # type:dict
#
# next_link = url
#
# while next_link is not None:
# response = http_request('GET', next_link, DEFAULT_HEADERS)
#
# for event in response.get('results', []):
# tag = event.get('tag')
# date = datetime.strptime(
# event.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# if (tag in events and events[tag] < date) or tag not in events:
# events.update({tag: date})
#
# next_link = response.get('next')
#
# for event in events:
# events[event] = events[event].strftime(
# "%Y-%m-%d %H:%M:%S") # Formatting the timestamp to human readable date and time
#
# return events
# def get_ip_location(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'geolocation', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# lon = response.get('longitude', None)
# lat = response.get('latitude', None)
#
# if not lon or not lat:
# return {}
# else:
# return {
# 'lon': lon,
# 'lat': lat
# }
# def get_ip_domains(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'domains', max_len)
# domains = []
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for domain in response.get('results', []):
# date = datetime.strptime(
# domain.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")), '%Y-%m-%dT%H:%M:%SZ')
#
# domains.append({'Hostname': domain.get('name'),
# 'Last Resolved': date.strftime("%Y-%m-%d %H:%M:%S")})
#
# return domains
# def get_ip_urls(ip, max_len):
# url = '{}/{}/{}/{}?limit={}'.format(SERVER_URL_V1, 'ip', ip, 'urls', max_len)
# urls = {} # type:dict
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# for response_url in response.get('results', []):
# url = response_url.get('location')
# if url.endswith("/"):
# url = url[:-1]
#
# date = datetime.strptime(
# response_url.get('updated', datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")),
# '%Y-%m-%dT%H:%M:%SZ')
#
# if (url in urls and urls[url] < date) or url not in urls:
# urls.update({url: date})
#
# urls_result = []
# for url in urls:
# urls_result.append({'Url': url, "Last Resolved": urls[url].strftime(
# "%Y-%m-%d %H:%M:%S")}) # Formatting the timestamp to human readable date and time
#
# return urls_result
# def get_ip_asn(ip):
# url = '{}/{}/{}'.format(SERVER_DASHBOARD_URL_V1, 'ipwhois', ip)
#
# response = http_request('GET', url, DEFAULT_HEADERS)
#
# asn = response.get('asn')
# asn_country_code = response.get('asn_country_code')
#
# if not asn or not asn_country_code:
# return {}
# else:
# return {
# 'asn': asn,
# 'country': asn_country_code
# }
# def create_ip_command_markdown(ip, sources, events, domains, urls, asn):
# md = '## Cymon IP report for: {}\n'.format(ip)
#
# if asn:
# md += 'ASN: **{}** ({})\n'.format(asn.get('asn'), asn.get('country'))
#
# md += '\n'
#
# if events:
# md += '### Reports\n'
# for event in events:
# md += '**{}** (Last reported on: {})\n'.format(event.title(), events[event])
#
# if sources:
# md += '#### Sources\n'
# for source in sources:
# md += '{}\n'.format(source)
#
# if domains and len(domains) > 0:
# md += tableToMarkdown("The following domains were resolved to the given IP address:", domains)
#
# if urls and len(urls) > 0:
# md += tableToMarkdown("The following urls were resolved to the given IP address:", urls)
#
# return md
# def create_ip_command_context(ip, asn, events, domains):
# if events:
# description = 'Reported suspicious activities: '
#
# for event in events:
# description += '{}, '.format(event)
#
# description = description[:-2]
# else:
# description = 'No suspicious activities were reported'
#
# asn_in_context = {} # type:dict
#
# if asn:
# asn_in_context = {
# 'ASN': asn.get('asn'),
# 'Geo': {
# 'Country': asn.get('country')
# }
# }
#
# context = {'Cymon': {
# 'IP': {
# 'Domains': domains
# }
# }, outputPaths['ip']: {
# 'Address': ip,
# 'Malicious': {
# 'Vendor': 'Cymon',
# 'Description': description
# }
# }}
#
# context[outputPaths['ip']].update(asn_in_context)
#
# return context
# def get_ip_report_command():
# args = demisto.args()
#
# full_response = args.get('fullResponse') == 'true'
#
# ip = args.get('ip')
# if not is_ip_valid(ip):
# return_error('An inalid IP was specified')
#
# sources = get_ip_events_sources(ip)
#
# if not sources:
# return "IP " + ip + " is not in Cymons's dataset"
#
# if full_response:
# max_len = 1000
# else:
# max_len = 50
#
# events = get_ip_events(ip)
# location = get_ip_location(ip)
# domains = get_ip_domains(ip, max_len)
# urls = get_ip_urls(ip, max_len)
# asn = get_ip_asn(ip)
#
# markdown = create_ip_command_markdown(ip, sources, events, domains, urls, asn)
# context = create_ip_command_context(ip, asn, events, domains)
#
# return [
# {
# 'Type': entryTypes['map'],
# 'Contents': {
# 'lat': float(location.get('lat')),
# 'lng': float(location.get('lon'))
# },
# 'ContentsFormat': formats['json']
# },
# {
# 'Type': entryTypes['note'],
# 'Contents': {
# 'events': events,
# 'sources': sources,
# 'location': location,
# 'domains': domains,
# 'urls': urls,
# 'asn': asn
# },
# 'HumanReadable': markdown,
# 'EntryContext': context,
# 'ContentsFormat': formats['json']
# }]
''' EXECUTION CODE '''
try:
command = demisto.command()
if command == 'test-module':
demisto.results('Cymon has been Deprecated and is no longer in service. Please delete the instance.')
elif command == 'ip':
cymon_says()
elif command == 'domain':
cymon_says()
except Exception as e:
raise
|
11595205
|
import json
from vcx.api.connection import Connection
from utils import init_vcx, run_coroutine_in_new_loop
from connection import BaseConnection
class Inviter(BaseConnection):
async def start(self):
await init_vcx()
print("Create a connection to alice and print out the invite details")
connection_ = await Connection.create('alice')
await connection_.connect('{"use_public_did": true}')
await connection_.update_state()
details = await connection_.invite_details(False)
print("**invite details**")
print(json.dumps(details))
print("******************")
self.connection_data = await connection_.serialize()
connection_.release()
return json.dumps(details)
def connect(self):
run_coroutine_in_new_loop(self.update_state)
|
11595219
|
import sys
sys.path.append('..')
import unittest
import random
from armoryengine.ALL import *
class SigningTester(unittest.TestCase):
def testLowSig(self):
sbdPrivKey = SecureBinaryData(b'\x01'*32)
pub = CryptoECDSA().ComputePublicKey(sbdPrivKey).toBinStr()
for i in range(100):
msg = "some random msg %s" % random.random()
sbdSig = CryptoECDSA().SignData(SecureBinaryData(msg), sbdPrivKey, False)
binSig = sbdSig.toBinStr()
derSig = createDERSigFromRS(binSig[:32], binSig[32:])
r, s = getRSFromDERSig(derSig)
j = binary_to_int(s, BIGENDIAN)
self.assertTrue( j <= SECP256K1_ORDER / 2)
|
11595223
|
from types import MethodType
import onnx
import torch
from torch.onnx import OperatorExportTypes
from onnxsim import simplify
import argparse
import io
import sys
import torch.nn as nn
sys.path.insert(0, '.')
from configs import add_centernet_config
from detectron2.config import get_cfg
from inference.centernet import build_model
from detectron2.checkpoint import DetectionCheckpointer
from fvcore.common.file_io import PathManager
def centerX_forward(self, x):
x = self.normalizer(x / 255.)
y = self._forward(x)
fmap_max = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)(y['cls'])
keep = (y['cls'] - fmap_max).float() + 1e-9
keep = nn.ReLU()(keep)
keep = keep * 1e9
result = y['cls'] * keep
ret = [result,y['reg'],y['wh']] ## change dict to list
return ret
def load_model(config_file,model_path):
cfg = get_cfg()
add_centernet_config(cfg)
cfg.merge_from_file(config_file)
forward = {'centerX': centerX_forward}
# model
model = build_model(cfg)
model.forward = MethodType(forward['centerX'], model)
DetectionCheckpointer(model).load(model_path)
model.eval()
model.cuda()
return model
def get_parser():
parser = argparse.ArgumentParser(description="Convert Pytorch to ONNX model")
parser.add_argument(
"--config-file",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--model-path",
metavar="FILE",
help="path to model",
)
parser.add_argument(
"--name",
default="baseline",
help="name for converted model"
)
parser.add_argument(
"--output",
default='onnx_model',
help='path to save converted onnx model'
)
parser.add_argument(
"--input_w",
default=640,
type=int,
help='image_width'
)
parser.add_argument(
"--input_h",
default=384,
type=int,
help='image_height'
)
return parser
def remove_initializer_from_input(model):
if model.ir_version < 4:
print(
'Model with ir_version below 4 requires to include initilizer in graph input'
)
return
inputs = model.graph.input
name_to_input = {}
for input in inputs:
name_to_input[input.name] = input
for initializer in model.graph.initializer:
if initializer.name in name_to_input:
inputs.remove(name_to_input[initializer.name])
return model
def export_onnx_model(model, inputs):
"""
Trace and export a model to onnx format.
Args:
model (nn.Module):
inputs (torch.Tensor): the model will be called by `model(*inputs)`
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training state
# of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
# Apply ONNX's Optimization
all_passes = onnx.optimizer.get_available_passes()
passes = ["extract_constant_to_initializer", "eliminate_unused_initializer", "fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnx.optimizer.optimize(onnx_model, passes)
return onnx_model
if __name__ == '__main__':
args = get_parser().parse_args()
model = load_model(args.config_file, args.model_path)
inputs = torch.randn(1, 3, args.input_h, args.input_w).cuda()
onnx_model = export_onnx_model(model, inputs)
model_simp, check = simplify(onnx_model)
model_simp = remove_initializer_from_input(model_simp)
assert check, "Simplified ONNX model could not be validated"
PathManager.mkdirs(args.output)
onnx.save_model(model_simp, f"{args.output}/{args.name}.onnx")
print(f"Export onnx model in {args.output} successfully!")
|
11595264
|
import os
from pathlib import Path
project_dir = os.path.split(os.path.dirname(__file__))[0]
project_dir_path = Path(project_dir)
src_dir = os.path.join(project_dir, "src")
src_dir_path = Path(src_dir)
ch_src_dir = lambda: os.chdir(src_dir)
|
11595281
|
import socket, ssl
from binascii import hexlify, unhexlify
dump = open("everything.dump", "wb")
serverDump = open("fromserver.dump", "wb")
clientDump = open("fromclient.dump", "wb")
listensocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listensocket.bind(("127.0.0.1", 5222))
listensocket.listen(1) # 1 for now - you can add more if you want multiple clients but we only need one
clientsocket, clientaddress = listensocket.accept()
xmpp_msg1 = clientsocket.recv(8192)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.connect( ("10.11.1.25", 5222) )
serversocket.sendall(xmpp_msg1)
xmpp_msg2 = serversocket.recv(8192)
clientsocket.sendall(xmpp_msg2)
xmpp_msg3 = clientsocket.recv(8192)
serversocket.sendall(xmpp_msg3)
xmpp_msg4 = serversocket.recv(8192)
tlsclient = ssl.wrap_socket(clientsocket, keyfile="host.key", certfile="host.crt", server_side=True, cert_reqs=ssl.CERT_NONE)
tlsclient.setblocking(0)
tlsclient.settimeout(0.5)
tlsserver = ssl.wrap_socket(serversocket, server_side=False, cert_reqs=ssl.CERT_NONE)
tlsserver.setblocking(0)
tlsserver.settimeout(0.5)
while 1:
try:
msg_from_client = tlsclient.recv(8192)
dump.write ( "\n[+] Received from client:\n%s" % str(msg_from_client) )
clientDump.write( str(msg_from_client)+"\n" )
tlsserver.sendall(msg_from_client)
except socket.error as socket_exception:
if "timed out" not in str(socket_exception):
print "\n[+] Error receiving data from client\n%s" % str(socket_exception)
try:
msg_from_server = tlsserver.recv(8192)
dump.write( "\n[*] Received from server1:\n%s" % str(msg_from_server) )
serverDump.write( str(msg_from_server)+"\n" )
tlsclient.sendall(msg_from_server)
clientDump.flush()
serverDump.flush()
except socket.error as socket_exception:
if "timed out" not in str(socket_exception):
print "\n[+] Error receiving data from server\n%s" % str(socket_exception)
|
11595290
|
import h5py
import json
import sys
import cv2
import os.path as osp
import numpy as np
from tools import Timer
from config import CAFFE_ROOT, DATA_ROOT
sys.path.insert(0, osp.join(CAFFE_ROOT, 'python'))
import caffe
BS = 1
def cnn_patch_feature(net, transformer, image, bbox, include_whole_image=False):
im_list = [image] if include_whole_image else []
for j in xrange(bbox.shape[0]):
h1, w1, h2, w2 = bbox[j, :] # (h1, w1, h2, w2)
im_list.append(image[h1:h2, w1:w2, :])
im_list = [transformer.preprocess('data', im)[np.newaxis, :, :, :] for im in im_list]
feat = []
for i in xrange(0, bbox.shape[0], BS):
net.blobs['data'].data[...] = np.concatenate(im_list[i:i+BS], axis=0)
net.forward()
feat.append(net.blobs['pool5'].data.reshape([BS, -1]))
return np.vstack(feat)
def cnn_whole_feature(net, transformer, image):
im = transformer.preprocess('data', image)[np.newaxis, :, :, :]
net.blobs['data'].data[...] = im
net.forward()
feat = net.blobs['pool5'].data
return feat.flatten()
def load_network(proto_txt, caffe_model, device):
if 'gpu' in device:
caffe.set_mode_gpu()
device_id = int(device.split('gpu')[-1])
caffe.set_device(device_id)
else:
caffe.set_mode_cpu()
# load network
net = caffe.Net(proto_txt, caffe_model, caffe.TEST)
# tansformer
mu = np.load(osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet_mean.npy'))
mu = mu.mean(1).mean(1)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2, 1, 0)) # swap channels from RGB to BGR
# reshape input
net.blobs['data'].reshape(BS, 3, 224, 224)
return net, transformer
def _script_for_patch_feature(head, tail, device, dataset='mscoco', data_split='train'):
data_dir = osp.join(DATA_ROOT, dataset)
# set the model path here
prototxt = osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet-152-deploy.prototxt')
caffemodel = osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet-152-model.caffemodel')
# load network
net, transformer = load_network(prototxt, caffemodel, device)
# prepare image files
cap_file = osp.join(data_dir, 'captions_{}.json'.format(data_split))
image_ids = json.load(open(cap_file, 'r'))['image_ids']
im_files = ['COCO_{}2014_'.format(data_split) + str(i).zfill(12) + '.jpg' for i in image_ids]
im_files = [osp.join(data_dir, data_split+'2014', i) for i in im_files]
with h5py.File(osp.join(data_dir, 'bbox.h5'), 'r') as f:
bbox = np.array(f[data_split])
# initialize h5 file
save_file = osp.join(data_dir, 'features', 'features_30res.h5')
with h5py.File(save_file) as f:
if data_split not in f:
f.create_dataset(data_split, [len(im_files), 30, 2048], 'float32')
# computing
timer = Timer()
print '\n\n... computing'
for i in xrange(head, tail):
timer.tic()
im = caffe.io.load_image(im_files[i]) # (h, w, c)
with h5py.File(save_file) as f:
feat = cnn_patch_feature(net, transformer, im, bbox[i, :, :])
f[data_split][i, :] = feat
print '[{:d}] {} [{:.3f} sec]'.format(i, osp.split(im_files[i])[-1], timer.toc())
if __name__ == '__main__':
_script_for_patch_feature(0, 82783, 'gpu0', 'mscoco', 'train')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.