id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
140190
|
from subprocess import Popen
from subprocess import PIPE, STDOUT
from scripts.os_check import ADB, SUC, SEP
print("Removing lockscreen ...")
Popen([ADB, 'shell', SUC, 'rm', '/data/system/*.key'], stdout=PIPE, stderr=STDOUT).stdout.read().decode('UTF-8')
Popen([ADB, 'shell', SUC, 'rm', '/data/system/locksettings.*'], stdout=PIPE, stderr=STDOUT).stdout.read()\
.decode('UTF-8')
|
140206
|
from django.contrib import messages
from django.db import IntegrityError
from django.shortcuts import redirect
from .models import Subscriber
from .tasks import async_send_newsletter
def subscribe(request):
if request.method == 'POST':
email = request.POST.get('email')
subscriber = Subscriber(email=email, confirmed=True)
if subscriber == Subscriber.objects.filter(email=subscriber.email):
messages.error(request, 'You are already subscribed to our newsletter!')
return redirect('home')
else:
try:
subscriber.save()
async_send_newsletter.delay()
messages.success(request, 'You have been subscribed to our newsletter!')
return redirect('home')
except IntegrityError as e:
messages.error(request, 'You are already subscribed to our newsletter!')
return redirect('home')
else:
return redirect('home')
def unsubscribe(request):
confirme_subscribers = Subscriber.objects.get(email=request.GET['email'])
for subscriber in confirme_subscribers:
subscriber.delete()
messages.success(request, 'You have successfully unsubscribed from our newsletter!')
return redirect('home')
|
140214
|
from django.urls import path
from .schema import schema
from .views import AsyncGraphQLView
urlpatterns = [path("graphql", AsyncGraphQLView.as_view(schema=schema))]
|
140216
|
from fints2ledger.transaction_retriever import TRetriever
from fints.client import FinTS3PinTanClient
from fints2ledger.csv_converter import CsvConverter
from mt940.models import Date
class Fints2Csv:
def __init__(self, config):
self.config = config
def retrieveAndSave(self):
client = FinTS3PinTanClient(
self.config["fints"]["blz"], # Your bank's BLZ
self.config["fints"]["account"], # your account number
self.config["fints"]["password"],
# e.g. 'https://fints.ing-diba.de/fints/'
self.config["fints"]["endpoint"]
)
retriever = TRetriever(client, self.config["fints"]["selectedAccount"])
converter = CsvConverter(self.config["fints"]["csv_separator"], self.config["fints"]["csv_date_format"])
csv_output = "\n".join(map(lambda transaction: converter.convert(
transaction), retriever.get_hbci_transactions(self.config["fints"]["start"], Date.today())))
with open(self.config["files"]["csv_file"], 'w') as f:
f.write(converter.get_headline())
f.write("\n")
f.write(csv_output)
|
140232
|
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.segan.segan_model as segan
from awesome_gans.datasets import MNISTDataSet
results = {'output': './gen_img/', 'checkpoint': './model/checkpoint', 'model': './model/SEGAN-model.ckpt'}
train_step = {
'global_step': 150001,
'logging_interval': 1500,
}
def main():
start_time = time.time() # Clocking start
# UrbanSound8K Dataset load
mnist = MNISTDataSet().data
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# CoGAN Model
model = segan.SEGAN(s)
# Initializing
s.run(tf.global_variables_initializer())
sample_x, _ = mnist.test.next_batch(model.sample_num)
sample_y = np.zeros(shape=[model.sample_num, model.n_classes])
for i in range(10):
sample_y[10 * i : 10 * (i + 1), i] = 1
for step in range(train_step['global_step']):
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run(
[model.d_op, model.d_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Update G network
_, g_loss = s.run(
[model.g_op, model.g_loss],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
if step % train_step['logging_interval'] == 0:
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = np.reshape(batch_x, model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged],
feed_dict={
model.x_1: batch_x,
model.x_2: batch_x,
# model.y: batch_y,
model.z: batch_z,
},
)
# Print loss
print("[+] Step %08d => " % step, " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss))
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
# Training G model with sample image and noise
samples_1 = s.run(
model.g_sample_1,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_2 = s.run(
model.g_sample_2,
feed_dict={
# model.y: sample_y,
model.z: sample_z,
},
)
samples_1 = np.reshape(samples_1, [-1] + model.image_shape[1:])
samples_2 = np.reshape(samples_2, [-1] + model.image_shape[1:])
# Summary saver
model.writer.add_summary(summary, global_step=step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir_1 = results['output'] + 'train_1_{:08d}.png'.format(step)
sample_dir_2 = results['output'] + 'train_2_{:08d}.png'.format(step)
# Generated image save
iu.save_images(samples_1, size=[sample_image_height, sample_image_width], image_path=sample_dir_1)
iu.save_images(samples_2, size=[sample_image_height, sample_image_width], image_path=sample_dir_2)
# Model save
model.saver.save(s, results['model'], global_step=step)
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
140243
|
N_ITER_CHEAP = 10
N_ITER_EXACT = 50
EM_ITER_CHEAP = 1
DEFAULT_LAMBDA = (.1, .001)
MAX_CLD_SIZE = 150
MAX_TRAJ_LEN = 100
EXACT_LAMBDA = (10, .001)
DATA_DIM = 3
#DS_SIZE = 0.03 # for fig8
DS_SIZE = 0.025 # for overhand
N_STREAMS = 10
DEFAULT_NORM_ITERS = 10
BEND_COEF_DIGITS = 6
GRIPPER_OPEN_CLOSE_THRESH = 0.04 # 0.07 for thick rope...
try:
from lfd_settings.tpsopt.settings import *
except ImportError:
pass
|
140244
|
def policy_threshold(threshold, belief, loc):
"""
chooses whether to switch side based on whether the belief
on the current site drops below the threshold
Args:
threshold (float): the threshold of belief on the current site,
when the belief is lower than the threshold, switch side
belief (numpy array of float, 2-dimensional): the belief on the
two sites at a certain time
loc (int) : the location of the agent at a certain time
-1 for left side, 1 for right side
Returns:
act (string): "stay" or "switch"
"""
# Write the if statement
if belief[(loc + 1) // 2] <= threshold:
# action below threshold
act = "switch"
else:
# action above threshold
act = "stay"
return act
test_policy_threshold()
|
140261
|
from enum import Enum
from .fileEntryData import FileEntryData
class eFileTypes(Enum):
NOTHING = 0
MY_FOLDER = 1
PUBLIC = 2
COMPANY = 3
MODELS_PATH = 4
SHARED_WITH_ME = 5
class FileEntry(object):
def __init__(self, show=True, text="", type=eFileTypes.NOTHING, data=FileEntryData()):
self.show = show
self.text = text
self.type = type.value
self.data = data
|
140270
|
import os
import time
import unittest
from numpy.testing import assert_almost_equal
from supervised.tuner.time_controller import TimeController
class TimeControllerTest(unittest.TestCase):
def test_to_and_from_json(self):
tc = TimeController(
start_time=time.time(),
total_time_limit=10,
model_time_limit=None,
steps=["simple_algorithms"],
algorithms=["Baseline"],
)
tc.log_time("1_Baseline", "Baseline", "simple_algorithms", 123.1)
tc2 = TimeController.from_json(tc.to_json())
assert_almost_equal(tc2.step_spend("simple_algorithms"), 123.1)
assert_almost_equal(tc2.model_spend("Baseline"), 123.1)
def test_enough_time_for_stacking(self):
for t in [5, 10, 20]:
tc = TimeController(
start_time=time.time(),
total_time_limit=100,
model_time_limit=None,
steps=[
"default_algorithms",
"not_so_random",
"golden_features",
"insert_random_feature",
"features_selection",
"hill_climbing_1",
"hill_climbing_3",
"hill_climbing_5",
"ensemble",
"stack",
"ensemble_stacked",
],
algorithms=["Xgboost"],
)
tc.log_time("1_Xgboost", "Xgboost", "default_algorithms", t)
tc.log_time("2_Xgboost", "Xgboost", "not_so_random", t)
tc.log_time("3_Xgboost", "Xgboost", "insert_random_feature", t)
tc.log_time("4_Xgboost", "Xgboost", "features_selection", t)
tc.log_time("5_Xgboost", "Xgboost", "hill_climbing_1", t)
tc.log_time("6_Xgboost", "Xgboost", "hill_climbing_2", t)
tc.log_time("7_Xgboost", "Xgboost", "hill_climbing_3", t)
tc._start_time = time.time() - 7 * t
assert_almost_equal(tc.already_spend(), 7 * t)
if t < 20:
self.assertTrue(tc.enough_time("Xgboost", "stack"))
else:
self.assertFalse(tc.enough_time("Xgboost", "stack"))
self.assertTrue(tc.enough_time("Ensemble_Stacked", "ensemble_stacked"))
|
140313
|
description = 'minimal NICOS startup setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'tofhw.toftof.frm2:14869',
)
|
140359
|
import numpy as np
import json
# with open("pattern.json", "r") as fh:
# patterns = json.load(fh)
class Pat_Match(object):
def __init__(self, config, label_to_id, filt=None):
self.config = config
self.label_to_id = label_to_id
self.patterns = config.patterns
if filt is not None:
patterns = [pattern for pattern in self.patterns if label_to_id[pattern[0]] not in filt]
self.patterns = patterns
def match(self, tokens):
config = self.config
num_pat = len(self.patterns)
num_text = len(tokens)
res = np.zeros([num_text, num_pat])
pred = np.zeros([num_text, config.num_class])
for i, pattern in enumerate(self.patterns):
rel, pat = pattern
rel = self.label_to_id[rel]
for j, token in enumerate(tokens):
text = " ".join(token)
if pat in text:
# print(text)
# print(pat)
res[j, i] += 1
pred[j, rel] += 1
none_zero = (np.amax(pred, axis=1) > 0).astype(np.int32)
pred = np.argmax(pred, axis=1)
pred = pred * none_zero
return res, pred
|
140363
|
import collections
import dominoes
import unittest
class TestSeries(unittest.TestCase):
def test_init(self):
s1 = dominoes.Series()
self.assertEqual(len(s1.games), 1)
self.assertEqual(len(s1.games[0].board), 1)
self.assertEqual(s1.games[0].board.left_end(), 6)
self.assertEqual(s1.games[0].board.right_end(), 6)
hand_lengths1 = collections.Counter(len(h) for h in s1.games[0].hands)
self.assertEqual(hand_lengths1[6], 1)
self.assertEqual(hand_lengths1[7], 3)
self.assertTrue(s1.games[0].turn in range(4))
self.assertTrue(bool(s1.games[0].valid_moves))
self.assertIsNone(s1.games[0].result)
self.assertEqual(s1.scores, [0, 0])
self.assertEqual(s1.target_score, 200)
s2 = dominoes.Series(target_score=100)
self.assertEqual(len(s2.games), 1)
self.assertEqual(len(s2.games[0].board), 1)
self.assertEqual(s2.games[0].board.left_end(), 6)
self.assertEqual(s2.games[0].board.right_end(), 6)
hand_lengths2 = collections.Counter(len(h) for h in s2.games[0].hands)
self.assertEqual(hand_lengths2[6], 1)
self.assertEqual(hand_lengths2[7], 3)
self.assertTrue(s2.games[0].turn in range(4))
self.assertTrue(bool(s2.games[0].valid_moves))
self.assertIsNone(s2.games[0].result)
self.assertEqual(s2.scores, [0, 0])
self.assertEqual(s2.target_score, 100)
d = dominoes.Domino(1, 2)
s3 = dominoes.Series(starting_domino=d)
self.assertEqual(len(s3.games), 1)
self.assertEqual(len(s3.games[0].board), 1)
self.assertEqual(s3.games[0].board.left_end(), 1)
self.assertEqual(s3.games[0].board.right_end(), 2)
hand_lengths3 = collections.Counter(len(h) for h in s3.games[0].hands)
self.assertEqual(hand_lengths3[6], 1)
self.assertEqual(hand_lengths3[7], 3)
self.assertTrue(s3.games[0].turn in range(4))
self.assertTrue(bool(s3.games[0].valid_moves))
self.assertIsNone(s3.games[0].result)
self.assertEqual(s3.scores, [0, 0])
self.assertEqual(s3.target_score, 200)
def test_is_over(self):
s = dominoes.Series()
self.assertFalse(s.is_over())
s.scores = [199, 199]
self.assertFalse(s.is_over())
s.scores = [200, 199]
self.assertTrue(s.is_over())
s.scores = [199, 200]
self.assertTrue(s.is_over())
s.scores = [200, 200]
self.assertTrue(s.is_over())
s.scores = [201, 201]
self.assertTrue(s.is_over())
def test_next_game(self):
s = dominoes.Series()
str1 = str(s)
repr1 = repr(s)
self.assertRaises(dominoes.GameInProgressException, s.next_game)
self.assertEqual(len(s.games), 1)
self.assertEqual(len(s.games[0].board), 1)
self.assertTrue(bool(s.games[0].valid_moves))
self.assertIsNone(s.games[0].result)
self.assertEqual(s.scores, [0, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str1)
self.assertTrue('Team 0 has 0 points.' in str1)
self.assertTrue('Team 1 has 0 points.' in str1)
self.assertEqual(str1, repr1)
scores = [200, 200]
s.scores = scores
str2 = str(s)
repr2 = repr(s)
self.assertRaises(dominoes.SeriesOverException, s.next_game)
self.assertEqual(len(s.games), 1)
self.assertEqual(len(s.games[0].board), 1)
self.assertTrue(bool(s.games[0].valid_moves))
self.assertIsNone(s.games[0].result)
self.assertEqual(s.scores, scores)
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str2)
self.assertTrue('Team 0 has 200 points.' in str2)
self.assertTrue('Team 1 has 200 points.' in str2)
self.assertEqual(str2, repr2)
s.scores = [0, 0]
s.games[0].result = dominoes.Result(0, True, 50)
g1 = s.next_game()
str3 = str(s)
repr3 = repr(s)
self.assertEqual(len(s.games), 2)
self.assertEqual(len(g1.board), 0)
self.assertEqual(g1.turn, 0)
self.assertTrue(bool(g1.valid_moves))
self.assertIsNone(g1.result)
self.assertEqual(s.scores, [50, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str3)
self.assertTrue('Team 0 has 50 points.' in str3)
self.assertTrue('Team 1 has 0 points.' in str3)
self.assertEqual(str3, repr3)
s.games[1].result = dominoes.Result(1, False, 50)
g2 = s.next_game()
str4 = str(s)
repr4 = repr(s)
self.assertEqual(len(s.games), 3)
self.assertEqual(len(g2.board), 0)
self.assertEqual(g2.turn, 2)
self.assertTrue(bool(g2.valid_moves))
self.assertIsNone(g2.result)
self.assertEqual(s.scores, [100, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str4)
self.assertTrue('Team 0 has 100 points.' in str4)
self.assertTrue('Team 1 has 0 points.' in str4)
self.assertEqual(str4, repr4)
s.games[2].result = dominoes.Result(2, True, 50)
g3 = s.next_game()
str5 = str(s)
repr5 = repr(s)
self.assertEqual(len(s.games), 4)
self.assertEqual(len(g3.board), 0)
self.assertEqual(g3.turn, 2)
self.assertTrue(bool(g3.valid_moves))
self.assertIsNone(g3.result)
self.assertEqual(s.scores, [150, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str5)
self.assertTrue('Team 0 has 150 points.' in str5)
self.assertTrue('Team 1 has 0 points.' in str5)
self.assertEqual(str5, repr5)
s.games[3].result = dominoes.Result(3, False, -50)
g4 = s.next_game()
str6 = str(s)
repr6 = repr(s)
self.assertEqual(len(s.games), 5)
self.assertEqual(len(g4.board), 0)
self.assertEqual(g4.turn, 3)
self.assertTrue(bool(g4.valid_moves))
self.assertIsNone(g4.result)
self.assertEqual(s.scores, [150, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str6)
self.assertTrue('Team 0 has 150 points.' in str6)
self.assertTrue('Team 1 has 50 points.' in str6)
self.assertEqual(str6, repr6)
s.games[4].result = dominoes.Result(2, False, 0)
g5 = s.next_game()
str7 = str(s)
repr7 = repr(s)
self.assertEqual(len(s.games), 6)
self.assertEqual(len(g5.board), 0)
self.assertEqual(g5.turn, 3)
self.assertTrue(bool(g5.valid_moves))
self.assertIsNone(g5.result)
self.assertEqual(s.scores, [150, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str7)
self.assertTrue('Team 0 has 150 points.' in str7)
self.assertTrue('Team 1 has 50 points.' in str7)
self.assertEqual(str7, repr7)
s.games[5].result = dominoes.Result(0, True, 100)
self.assertIsNone(s.next_game())
str8 = str(s)
repr8 = repr(s)
self.assertEqual(len(s.games), 6)
self.assertEqual(s.scores, [250, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str8)
self.assertTrue('Team 0 has 250 points.' in str8)
self.assertTrue('Team 1 has 50 points.' in str8)
self.assertEqual(str8, repr8)
if __name__ == '__main__':
unittest.main()
|
140365
|
import os
from typing import NamedTuple, Sequence
import subprocess
class UnoOfficePaths(NamedTuple):
uno_path: str
uno_python_path: str
class EnvironmentVariables:
UNO_PATH = 'UNO_PATH'
UNO_PYTHON_PATH = 'UNO_PYTHON_PATH'
UNO_OFFICE_BINARY_PATH = 'UNO_OFFICE_BINARY_PATH'
class DefaultValues:
UNO_PATH = '/usr/lib/python3/dist-packages'
UNO_PYTHON_PATH = 'python3'
UNO_OFFICE_BINARY_PATH = '/usr/lib/libreoffice/program/soffice.bin'
def get_uno_path() -> str:
return os.environ.get('UNO_PATH') or DefaultValues.UNO_PATH
def get_uno_python_path() -> str:
return os.environ.get('UNO_PYTHON_PATH') or DefaultValues.UNO_PYTHON_PATH
def get_uno_office_binary_path() -> str:
return os.environ.get('UNO_OFFICE_BINARY_PATH') or DefaultValues.UNO_OFFICE_BINARY_PATH
def find_offices() -> Sequence[UnoOfficePaths]:
return [UnoOfficePaths(
uno_path=get_uno_path(),
uno_python_path=get_uno_python_path()
)]
def find_pyuno_office() -> UnoOfficePaths:
offices = find_offices()
if not offices:
raise RuntimeError('no suitable office installation found')
for office in offices:
try:
subprocess.check_output(
[office.uno_python_path, '-c', 'import uno, unohelper'],
env={'PYTHONPATH': office.uno_path}
)
return office
except subprocess.CalledProcessError:
pass
except OSError:
pass
raise RuntimeError(
'none of the potential office installations seem to function, tried: %s' % offices
)
def get_start_listener_command(port: int) -> Sequence[str]:
return [
get_uno_office_binary_path(),
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
|
140395
|
from mjcf import elements as e
def main():
#########################
# Level 1
mujoco = e.Mujoco(
model="tendon"
)
#########################
# Level 2
compiler = e.Compiler(
coordinate="global",
)
default = e.Default()
visual = e.Visual()
worldbody = e.Worldbody()
tendon = e.Tendon()
mujoco.add_children([
compiler,
default,
visual,
worldbody,
tendon
])
######################
# Level 3
# Default
d_geom = e.Geom(
rgba=[0.9, 0.7, 0.1, 1],
size=0.01
)
d_site = e.Site(
type="sphere",
rgba=[0.9, 0.9, 0.9, 1],
size=0.005
)
d_joint = e.Joint(
type="hinge",
axis=[0, 1, 0],
limited=True,
range=[0, 60],
solimplimit=[0.95, 0.95, 0.1]
)
default.add_children([
d_geom,
d_site,
d_joint
])
# Visual
headlight = e.visual.Headlight(
diffuse=[0.7, 0.7, 0.7]
)
visual.add_child(headlight)
# Worldbody
b1 = e.Body()
s2 = e.Site(
name="s2",
pos=[-0.03, 0, 0.32]
)
b2 = e.Body()
worldbody.add_children([
b1,
s2,
b2
])
# Tendon
spatial_tendon = e.Spatial(
width=0.002,
rgba=[.95, .3, .3, 1],
limited=True,
range=[0, 0.33]
)
tendon.add_child(spatial_tendon)
######################
# Level 4
# b1
b1_geom = e.Geom(
type="cylinder",
fromto=[-0.03, 0, 0.2, -0.03, 0, 0.15],
size=0.03,
rgba=[.2, .2, .5, 1],
density=5000
)
b1_joint = e.Joint(
type="slide",
pos=[-0.03, 0, 0.2],
axis=[0, 0, 1],
limited=False,
)
s1 = e.Site(
name="s1",
pos=[-0.03, 0, 0.2]
)
b1.add_children([
b1_geom,
b1_joint,
s1
])
# b2
b2_geom_1 = e.Geom(
type="capsule",
fromto=[0, 0, 0.3, 0.1, 0, 0.3]
)
g1 = e.Geom(
name="g1",
type="cylinder",
fromto=[0.0, 0.015, 0.3, 0.0, -0.015, 0.3],
size=0.02,
rgba=[.3, .9, .3, .4]
)
b2_joint = e.Joint(
pos=[0, 0, 0.3]
)
s3 = e.Site(
name="s3",
pos=[0.02, 0, 0.32],
size=None
)
b3 = e.Body()
b2.add_children([
b2_geom_1,
g1,
b2_joint,
s3,
b3
])
# spatial_tendon
ss1 = e.spatial.Site(site="s1")
ss2 = e.spatial.Site(site="s2")
sg1 = e.spatial.Geom(geom="g1")
ss3 = e.spatial.Site(site="s3")
sp1 = e.spatial.Pulley(divisor=2)
sg2 = e.spatial.Geom(geom="g2", sidesite="side2")
ss4 = e.spatial.Site(site="s4")
sp2 = e.spatial.Pulley(divisor=2)
ss5 = e.spatial.Site(site="s5")
sg3 = e.spatial.Geom(geom="g3", sidesite="side3")
ss6 = e.spatial.Site(site="s6")
spatial_tendon.add_children([
ss1,
ss2,
sg1,
ss3,
sp1,
ss3,
sg2,
ss4,
sp2,
ss3,
sg2,
ss5,
sg3,
ss6
])
######################
# Level 5
# b3
b3_geom_1 = e.Geom(
type="capsule",
fromto=[0.1, 0, 0.3, 0.2, 0, 0.3]
)
g2 = e.Geom(
name="g2",
type="cylinder",
fromto=[0.1, 0.015, 0.3, 0.1, -0.015, 0.3],
size=0.02,
rgba=[.3, .9, .3, .4]
)
b3_joint = e.Joint(
pos=[0.1, 0, 0.3]
)
s4 = e.Site(
name="s4",
pos=[0.13, 0, 0.31],
size=None
)
s5 = e.Site(
name="s5",
pos=[0.15, 0, 0.32]
)
side2 = e.Site(
name="side2",
pos=[0.1, 0, 0.33]
)
b4 = e.Body()
b3.add_children([
b3_geom_1,
g2,
b3_joint,
s4,
s5,
side2,
b4
])
######################
# Level 6
# b4
b4_geom_1 = e.Geom(
type="capsule",
fromto=[0.2, 0, 0.3, 0.27, 0, 0.3]
)
g3 = e.Geom(
name="g3",
type="cylinder",
fromto=[0.2, 0.015, 0.3, 0.2, -0.015, 0.3],
size=0.02,
rgba=[.3, .9, .3, .4]
)
b4_joint = e.Joint(
pos=[0.2, 0, 0.3]
)
s6 = e.Site(
name="s6",
pos=[0.23, 0, 0.31]
)
side3 = e.Site(
name="side3",
pos=[0.2, 0, 0.33]
)
b4.add_children([
b4_geom_1,
g3,
b4_joint,
s6,
side3
])
model_xml = mujoco.xml()
# Output
with open('tendon-gen.xml', 'w') as fh:
fh.write(model_xml)
if __name__ == '__main__':
main()
|
140460
|
import numpy as np
import teaserpp_python
from Config import Config
import gtsam as gt
from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2,
NonlinearFactorGraph, NonlinearISAM, Pose3,
PriorFactorPoint3, PriorFactorPose3, Rot3,
PinholeCameraCal3_S2, Values, Point3) # symbol_shorthand_X, symbol_shorthand_L)
from gtsam.symbol_shorthand import X, L
import matplotlib.pyplot as plt
# import g2o
# class PoseOptimizer(g2o.SparseOptimizer):
# def __init__(self, ):
# super().__init__()
# solver = g2o.BlockSolverX(g2o.LinearSolverDenseX())
# solver = g2o.OptimizationAlgorithmLevenberg(solver)
# super().set_algorithm(solver)
# self.edge_list = []
# self.edge_outlier = np.array([], dtype=bool)
# self.v_se3 = g2o.VertexSE3Expmap()
# self.v_se3.set_id(0) # internal id
# self.v_se3.set_fixed(False)
# super().add_vertex(self.v_se3)
# self.pose = []
# self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float)
# for idx in np.arange(8):
# self.inv_lvl_sigma2[idx] = 1./1.2**(2*idx-2)
#
# def optimize(self, max_iterations=10):
# self.edge_outlier = np.full(len(self.edge_list), False)
# for iteration in range(4):
# # self.v_se3.set_estimate(self.pose)
# super().initialize_optimization(0)
# super().optimize(max_iterations)
# print("ITER", self.vertex(0).estimate().to_vector())
# print("Initial Correspondence: ", np.count_nonzero(1-self.edge_outlier))
# n_bad = 0
# for idx in range(len(self.edge_list)):
# e = self.edge_list[idx]
# e.compute_error()
# chi2 = e.chi2()
# # print("Iter ", iteration, "Chi: " ,chi2)
# if chi2 > 7.815:
# self.edge_outlier[idx] = True
# e.set_level(1)
# n_bad += 1
# else:
# self.edge_outlier[idx] = False
# e.set_level(0)
# if iteration == 2:
# e.set_robust_kernel(None)
#
# print("NUM BADS: ", n_bad, ":", len(self.edge_list))
# return self.edge_outlier
#
# def add_pose(self, pose, fixed=False):
# self.v_se3.set_estimate(pose)
# self.pose = pose
#
# def add_point(self, world_pos,
# measurement_cam,
# octave,
# robust_kernel=g2o.RobustKernelHuber(np.sqrt(7.815))): # ??% CI
#
# edge = g2o.EdgeStereoSE3ProjectXYZOnlyPose()
# edge.set_vertex(0, self.vertex(0))
#
# fx = Config().fx
# fy = Config().fy
# cx = Config().cx
# cy = Config().cy
# bf = Config().bf
#
# edge.fx = fx
# edge.fy = fy
# edge.cx = cx
# edge.cy = cy
# edge.bf = bf
# edge.Xw = world_pos
#
# edge.set_measurement(measurement_cam) # projection
# information = self.inv_lvl_sigma2[octave]*np.identity(3)
# edge.set_information(information)
#
# if robust_kernel is not None:
# edge.set_robust_kernel(robust_kernel)
#
# super().add_edge(edge)
#
# self.edge_list.append(edge)
#
# def get_pose(self):
# return self.vertex(0).estimate()
class PoseOptimizerTeaser:
def __init__(self):
self.NOISE_BOUND = 0.1 # 0.05
self.solver_params = teaserpp_python.RobustRegistrationSolver.Params()
self.solver_params.cbar2 = 0.6 # 1
self.solver_params.noise_bound = self.NOISE_BOUND
self.solver_params.estimate_scaling = False
self.solver_params.rotation_estimation_algorithm = \
teaserpp_python.RobustRegistrationSolver.ROTATION_ESTIMATION_ALGORITHM.GNC_TLS
self.solver_params.rotation_gnc_factor = 1.4
self.solver_params.rotation_max_iterations = 200
self.solver_params.rotation_cost_threshold = 1e-12
self.solver = teaserpp_python.RobustRegistrationSolver(self.solver_params)
def optimize(self, src, dst):
# start = time.time()
self.solver.solve(src, dst)
# end = time.time()
solution = self.solver.getSolution()
trans = np.hstack((solution.rotation, np.expand_dims(solution.translation, axis=1)))
trans = np.concatenate((trans, np.expand_dims(np.array([0, 0, 0, 1]), axis=1).T), axis=0)
return trans
class PoseOptimizerGTSAM:
def __init__(self):
fx = Config().fx
fy = Config().fy
cx = Config().cx
cy = Config().cy
bf = Config().bf
# Create realistic calibration and measurement noise model
# format: fx fy skew cx cy baseline
baseline = bf/fx
self.K_stereo = gt.Cal3_S2Stereo(fx, fy, 0.0, cx, cy, baseline)
self.K_mono = gt.Cal3_S2(fx, fy, 0.0, cx, cy)
self.deltaMono = np.sqrt(5.991)
self.deltaStereo = np.sqrt(7.815)
self.depth_threshold = bf/fx * 60
# Create graph container and add factors to it
self.graph = gt.NonlinearFactorGraph()
# Create initial estimate for camera poses and landmarks
self.initialEstimate = gt.Values()
# add a constraint on the starting pose
# first_pose = gt.Pose3()
# self.graph.add(gt.NonlinearEqualityPose3(X(1), first_pose))
self.inv_lvl_sigma2 = np.zeros((8,), dtype=np.float)
for idx in np.arange(8):
self.inv_lvl_sigma2[idx] = 1. / 1.2 ** (2 * idx - 2)
# point counter for landmarks and octave container
self.counter = 1
self.octave = []
self.is_stereo = []
def add_pose(self, R, t):
# Add measurements
# pose 1
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(520, 480, 440), stereo_model, x1, l1, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(120, 80, 440), stereo_model, x1, l2, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 280, 140), stereo_model, x1, l3, K))
# pose 2
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(570, 520, 490), stereo_model, x2, l1, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(70, 20, 490), stereo_model, x2, l2, K))
# graph.add(gt.GenericStereoFactor3D(gt.StereoPoint2(320, 270, 115), stereo_model, x2, l3, K))
# self.initialEstimate.insert(X(1), gt.Rot3(pose[0]), gt.Point3(pose[1]))
t = t.reshape((3, 1))
self.initialEstimate.insert(X(1), gt.Pose3(np.concatenate((R, t), axis=1)))
def add_point(self, pointsInitial, measurements, octave):
if pointsInitial[-1] > self.depth_threshold:
information = self.inv_lvl_sigma2[octave] * np.identity(2)
stereo_model = gt.noiseModel_Diagonal.Information(information)
huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaMono)
robust_model = gt.noiseModel_Robust(huber, stereo_model)
factor = gt.GenericProjectionFactorCal3_S2(gt.Point2(measurements[0], measurements[2]), robust_model,
X(1), L(self.counter), self.K_mono)
self.is_stereo.append(False)
else:
information = self.inv_lvl_sigma2[octave] * np.identity(3)
stereo_model = gt.noiseModel_Diagonal.Information(information)
huber = gt.noiseModel_mEstimator_Huber.Create(self.deltaStereo)
robust_model = gt.noiseModel_Robust(huber, stereo_model)
factor = gt.GenericStereoFactor3D(gt.StereoPoint2(*tuple(measurements)), robust_model,
X(1), L(self.counter), self.K_stereo)
self.is_stereo.append(True)
self.graph.add(gt.NonlinearEqualityPoint3(L(self.counter), gt.Point3(pointsInitial)))
self.initialEstimate.insert(L(self.counter), gt.Point3(pointsInitial))
self.graph.add(factor)
self.octave.append(octave)
self.counter += 1
def optimize(self, flag_verbose=False):
# optimize
edge_outlier = np.full(self.counter-1, False)
error_th_stereo = [7.815, 7.815, 5, 5]
error_th_mono = [5.991, 5.991, 3.5, 3.5]
# error_th_stereo = [7.815, 7.815, 7.815, 7.815]
# error_th_mono = [5.991, 5.991, 5.991, 5.991]
for iteration in range(4):
if flag_verbose:
errors = []
optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate)
result = optimizer.optimize()
n_bad = 0
if flag_verbose:
print(f"Number of Factors: {self.graph.nrFactors()-self.graph.size()//2, self.graph.size()//2}")
error_s = error_th_stereo[iteration]
error_m = error_th_mono[iteration]
for idx in range(1, self.graph.size(), 2):
try:
if self.is_stereo[idx]:
factor = gt.dynamic_cast_GenericStereoFactor3D_NonlinearFactor(self.graph.at(idx))
else:
factor = gt.dynamic_cast_GenericProjectionFactorCal3_S2_NonlinearFactor(self.graph.at(idx))
except:
if flag_verbose:
errors.append(0)
continue
error = factor.error(result)
# print(error)
if flag_verbose:
errors.append(error)
# if error > 7.815:
if (self.is_stereo[idx] and error > error_s) or (not self.is_stereo[idx] and error > error_m):
edge_outlier[idx//2] = True
self.graph.remove(idx)
n_bad += 1
else:
edge_outlier[idx//2] = False
if iteration == 2:
if self.is_stereo[idx]:
information = self.inv_lvl_sigma2[self.octave[idx//2]] * np.identity(3)
stereo_model = gt.noiseModel_Diagonal.Information(information)
new_factor = gt.GenericStereoFactor3D(factor.measured(), stereo_model, X(1),
L(idx//2+1), self.K_stereo)
else:
information = self.inv_lvl_sigma2[self.octave[idx // 2]] * np.identity(2)
stereo_model = gt.noiseModel_Diagonal.Information(information)
new_factor = gt.GenericProjectionFactorCal3_S2(factor.measured(), stereo_model,
X(1),
L(idx // 2 + 1), self.K_mono)
self.graph.replace(idx, new_factor)
if flag_verbose:
fig, ax = plt.subplots()
ax.bar(np.arange(0, len(errors)).tolist(), errors)
plt.show()
print("NUM BADS: ", n_bad)
pose = result.atPose3(X(1))
# marginals = gt.Marginals(self.graph, result)
# cov = marginals.marginalCovariance(gt.X(1))
return pose, edge_outlier # self.graph, result
class PoseGraphOptimizerGTSAM:
def __init__(self):
# Create graph container and add factors to it
self.graph = gt.NonlinearFactorGraph()
# Create initial estimate for camera poses and landmarks
self.initialEstimate = gt.Values()
sigmas = np.array([5*np.pi/180, 5*np.pi/180, 5*np.pi/180, 0.05, 0.05, 0.05])
self.covariance = gt.noiseModel.Diagonal.Sigmas(sigmas)
self.graph.add(gt.NonlinearEqualityPose3(X(0), gt.Pose3(np.eye(4))))
self.result = None
self.marginals = None
def add_node(self, kf):
self.initialEstimate.insert(X(kf.kfID), gt.Pose3(kf.pose_matrix()))
for kf_n, rel_pose, _ in kf.neighbors:
if kf_n.kfID > kf.kfID:
continue
self.graph.add(gt.BetweenFactorPose3(X(kf.kfID), X(kf_n.kfID),
gt.Pose3(rel_pose), self.covariance))
def add_node_optimize(self, kf):
self.add_node(kf)
result, marginals = self.optimize()
return result, marginals
def optimize(self):
optimizer = gt.LevenbergMarquardtOptimizer(self.graph, self.initialEstimate)
result = optimizer.optimize()
marginals = gt.Marginals(self.graph, result)
return result, marginals
class PoseOptimizerRANSAC:
def __init__(self):
self.n_iteration = 100
@classmethod
def procrustes(cls, X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n, m = X.shape
ny, my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0 ** 2.).sum()
ssY = (Y0 ** 2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m - my)), 0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U, s, Vt = np.linalg.svd(A, full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:, -1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA ** 2
# transformed coords
Z = normX * traceTA * np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY / ssX - 2 * traceTA * normY / normX
Z = normY * np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my, :]
c = muX - b * np.dot(muY, T)
# transformation values
tform = {'rotation': T, 'scale': b, 'translation': c}
return d, Z, tform
|
140499
|
from __future__ import print_function
import torch
import torch.utils.data as data
import torchvision
from torchvision import transforms
import random
import os
import numpy as np
from PIL import Image
class Base_Dataset(data.Dataset):
def __init__(self, root, partition, target_ratio=0.0):
super(Base_Dataset, self).__init__()
# set dataset info
self.root = root
self.partition = partition
self.target_ratio = target_ratio
# self.target_ratio=0 no mixup
mean_pix = [0.485, 0.456, 0.406]
std_pix = [0.229, 0.224, 0.225]
normalize = transforms.Normalize(mean=mean_pix, std=std_pix)
if self.partition == 'train':
self.transformer = transforms.Compose([transforms.Resize(256),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(224),
transforms.ToTensor(),
normalize])
else:
self.transformer = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize])
def __len__(self):
if self.partition == 'train':
return int(min(sum(self.alpha), len(self.target_image)) / (self.num_class - 1))
elif self.partition == 'test':
return int(len(self.target_image) / (self.num_class - 1))
def __getitem__(self, item):
image_data = []
label_data = []
target_real_label = []
class_index_target = []
domain_label = []
ST_split = [] # Mask of targets to be evaluated
# select index for support class
num_class_index_target = int(self.target_ratio * (self.num_class - 1))
if self.target_ratio > 0:
available_index = [key for key in self.target_image_list.keys() if len(self.target_image_list[key]) > 0
and key < self.num_class - 1]
class_index_target = random.sample(available_index, min(num_class_index_target, len(available_index)))
class_index_source = list(set(range(self.num_class - 1)) - set(class_index_target))
random.shuffle(class_index_source)
for classes in class_index_source:
# select support samples from source domain or target domain
image = Image.open(random.choice(self.source_image[classes])).convert('RGB')
if self.transformer is not None:
image = self.transformer(image)
image_data.append(image)
label_data.append(classes)
domain_label.append(1)
ST_split.append(0)
# target_real_label.append(classes)
for classes in class_index_target:
# select support samples from source domain or target domain
image = Image.open(random.choice(self.target_image_list[classes])).convert('RGB')
if self.transformer is not None:
image = self.transformer(image)
image_data.append(image)
label_data.append(classes)
domain_label.append(0)
ST_split.append(0)
# target_real_label.append(classes)
# adding target samples
for i in range(self.num_class - 1):
if self.partition == 'train':
if self.target_ratio > 0:
index = random.choice(list(range(len(self.label_flag))))
else:
index = random.choice(list(range(len(self.target_image))))
# index = random.choice(list(range(len(self.label_flag))))
target_image = Image.open(self.target_image[index]).convert('RGB')
if self.transformer is not None:
target_image = self.transformer(target_image)
image_data.append(target_image)
label_data.append(self.label_flag[index])
target_real_label.append(self.target_label[index])
domain_label.append(0)
ST_split.append(1)
elif self.partition == 'test':
# For last batch
# if item * (self.num_class - 1) + i >= len(self.target_image):
# break
target_image = Image.open(self.target_image[item * (self.num_class - 1) + i]).convert('RGB')
if self.transformer is not None:
target_image = self.transformer(target_image)
image_data.append(target_image)
label_data.append(self.num_class)
target_real_label.append(self.target_label[item * (self.num_class - 1) + i])
domain_label.append(0)
ST_split.append(1)
image_data = torch.stack(image_data)
label_data = torch.LongTensor(label_data)
real_label_data = torch.tensor(target_real_label)
domain_label = torch.tensor(domain_label)
ST_split = torch.tensor(ST_split)
return image_data, label_data, real_label_data, domain_label, ST_split
def load_dataset(self):
source_image_list = {key: [] for key in range(self.num_class - 1)}
target_image_list = []
target_label_list = []
with open(self.source_path) as f:
for ind, line in enumerate(f.readlines()):
image_dir, label = line.split(' ')
label = label.strip()
if label == str(self.num_class-1):
continue
source_image_list[int(label)].append(image_dir)
# source_image_list.append(image_dir)
with open(self.target_path) as f:
for ind, line in enumerate(f.readlines()):
image_dir, label = line.split(' ')
label = label.strip()
# target_image_list[int(label)].append(image_dir)
target_image_list.append(image_dir)
target_label_list.append(int(label))
return source_image_list, target_image_list, target_label_list
class Office_Dataset(Base_Dataset):
def __init__(self, root, partition, label_flag=None, source='A', target='W', target_ratio=0.0):
super(Office_Dataset, self).__init__(root, partition, target_ratio)
# set dataset info
src_name, tar_name = self.getFilePath(source, target)
self.source_path = os.path.join(root, src_name)
self.target_path = os.path.join(root, tar_name)
self.class_name = ["back_pack", "bike", "bike_helmet", "bookcase", "bottle",
"calculator", "desk_chair", "desk_lamp", "desktop_computer", "file_cabinet", "unk"]
self.num_class = len(self.class_name)
self.source_image, self.target_image, self.target_label = self.load_dataset()
self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()]
self.label_flag = label_flag
# create the unlabeled tag
if self.label_flag is None:
self.label_flag = torch.ones(len(self.target_image)) * self.num_class
else:
# if pseudo label comes
self.target_image_list = {key: [] for key in range(self.num_class + 1)}
for i in range(len(self.label_flag)):
self.target_image_list[self.label_flag[i].item()].append(self.target_image[i])
if self.target_ratio > 0:
self.alpha_value = [len(self.source_image[key]) + len(self.target_image_list[key]) for key in self.source_image.keys()]
else:
self.alpha_value = self.alpha
self.alpha_value = np.array(self.alpha_value)
self.alpha_value = (self.alpha_value.max() + 1 - self.alpha_value) / self.alpha_value.mean()
self.alpha_value = torch.tensor(self.alpha_value).float().cuda()
def getFilePath(self, source, target):
if source == 'A':
src_name = 'amazon_src_list.txt'
elif source == 'W':
src_name = 'webcam_src_list.txt'
elif source == 'D':
src_name = 'dslr_src_list.txt'
else:
print("Unknown Source Type, only supports A W D.")
if target == 'A':
tar_name = 'amazon_tar_list.txt'
elif target == 'W':
tar_name = 'webcam_tar_list.txt'
elif target == 'D':
tar_name = 'dslr_tar_list.txt'
else:
print("Unknown Target Type, only supports A W D.")
return src_name, tar_name
class Home_Dataset(Base_Dataset):
def __init__(self, root, partition, label_flag=None, source='A', target='R', target_ratio=0.0):
super(Home_Dataset, self).__init__(root, partition, target_ratio)
src_name, tar_name = self.getFilePath(source, target)
self.source_path = os.path.join(root, src_name)
self.target_path = os.path.join(root, tar_name)
self.class_name = ['Alarm_Clock', 'Backpack', 'Batteries', 'Bed', 'Bike', 'Bottle', 'Bucket', 'Calculator',
'Calendar', 'Candles', 'Chair', 'Clipboards', 'Computer', 'Couch', 'Curtains', 'Desk_Lamp',
'Drill', 'Eraser', 'Exit_Sign', 'Fan', 'File_Cabinet', 'Flipflops', 'Flowers', 'Folder',
'Fork', 'unk']
self.num_class = len(self.class_name)
self.source_image, self.target_image, self.target_label = self.load_dataset()
self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()]
self.label_flag = label_flag
# create the unlabeled tag
if self.label_flag is None:
self.label_flag = torch.ones(len(self.target_image)) * self.num_class
else:
# if pseudo label comes
self.target_image_list = {key: [] for key in range(self.num_class + 1)}
for i in range(len(self.label_flag)):
self.target_image_list[self.label_flag[i].item()].append(self.target_image[i])
# if self.target_ratio > 0:
# self.alpha_value = [len(self.source_image[key]) + len(self.target_image_list[key]) for key in
# self.source_image.keys()]
# else:
# self.alpha_value = self.alpha
#
# self.alpha_value = np.array(self.alpha_value)
# self.alpha_value = (self.alpha_value.max() + 1 - self.alpha_value) / self.alpha_value.mean()
# self.alpha_value = torch.tensor(self.alpha_value).float().cuda()
def getFilePath(self, source, target):
if source == 'A':
src_name = 'art_source.txt'
elif source == 'C':
src_name = 'clip_source.txt'
elif source == 'P':
src_name = 'product_source.txt'
elif source == 'R':
src_name = 'real_source.txt'
else:
print("Unknown Source Type, only supports A C P R.")
if target == 'A':
tar_name = 'art_tar.txt'
elif target == 'C':
tar_name = 'clip_tar.txt'
elif target == 'P':
tar_name = 'product_tar.txt'
elif target == 'R':
tar_name = 'real_tar.txt'
else:
print("Unknown Target Type, only supports A C P R.")
return src_name, tar_name
class Visda_Dataset(Base_Dataset):
def __init__(self, root, partition, label_flag=None, target_ratio=0.0):
super(Visda_Dataset, self).__init__(root, partition, target_ratio)
# set dataset info
self.source_path = os.path.join(root, 'source_list.txt')
self.target_path = os.path.join(root, 'target_list.txt')
self.class_name = ["bicycle", "bus", "car", "motorcycle", "train", "truck", 'unk']
self.num_class = len(self.class_name)
self.source_image, self.target_image, self.target_label = self.load_dataset()
self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()]
self.label_flag = label_flag
# create the unlabeled tag
if self.label_flag is None:
self.label_flag = torch.ones(len(self.target_image)) * self.num_class
else:
# if pseudo label comes
self.target_image_list = {key: [] for key in range(self.num_class + 1)}
for i in range(len(self.label_flag)):
self.target_image_list[self.label_flag[i].item()].append(self.target_image[i])
class Visda18_Dataset(Base_Dataset):
def __init__(self, root, partition, label_flag=None, target_ratio=0.0):
super(Visda18_Dataset, self).__init__(root, partition, target_ratio)
# set dataset info
self.source_path = os.path.join(root, 'source_list_k.txt')
self.target_path = os.path.join(root, 'target_list.txt')
self.class_name = ["areoplane","bicycle", "bus", "car", "horse", "knife", "motorcycle", "person", "plant",
"skateboard", "train", "truck", 'unk']
self.num_class = len(self.class_name)
self.source_image, self.target_image, self.target_label = self.load_dataset()
self.alpha = [len(self.source_image[key]) for key in self.source_image.keys()]
self.label_flag = label_flag
# create the unlabeled tag
if self.label_flag is None:
self.label_flag = torch.ones(len(self.target_image)) * self.num_class
else:
# if pseudo label comes
self.target_image_list = {key: [] for key in range(self.num_class + 1)}
for i in range(len(self.label_flag)):
self.target_image_list[self.label_flag[i].item()].append(self.target_image[i])
|
140532
|
import os
import json
import requests
import networkx as nx
from urllib import parse
from itertools import chain
from multiprocessing import Pool
def link_to_title(link):
return link["title"]
def clean_if_key(page, key):
if key in page.keys():
return map(link_to_title, page[key])
else:
return []
def get_wiki_linked_pages(page_title, page_number_limit=500):
"""
Retrieve the inbound and outbound page links for
a wikipedia page
"""
# Properly quote the title to turn into a url
safe_title = parse.quote(page_title)
url = (
f"https://en.wikipedia.org/w/api.php?action=query&prop=links|linkshere&pllimit={page_number_limit}&lhlimit={page_number_limit}&titles={safe_title}&format=json&formatversion=2"
)
content = requests.get(url).content
json_content = json.loads(content)
json_page = json_content["query"]["pages"][0]
# inbound links are identified by "links" key
inbound_pages = clean_if_key(json_page, "links")
# outbound links are identified by linkshere key
outbound_pages = clean_if_key(json_page, "linkshere")
return dict(
title=page_title,
inbound_pages=list(inbound_pages),
outbound_pages=list(outbound_pages),
)
def flatten_network(wiki_linked_pages):
"""
Return a list of the combine inbound and outbound links
"""
return wiki_linked_pages["inbound_pages"] + wiki_linked_pages["outbound_pages"]
def pages_to_edges(wiki_linked_pages):
"""
Create edges linking to (inbound) and from (outbound) a page
"""
page = wiki_linked_pages["title"]
in_edges = [(in_page, page) for in_page in wiki_linked_pages["inbound_pages"]]
out_edges = [(page, out_page) for out_page in wiki_linked_pages["outbound_pages"]]
return in_edges + out_edges
if __name__ == "__main__":
title = "Freeman_Dyson"
page_number_limit = 50
# Retrieve the pages associated with the central page
# These are first degree pages
root_linked_pages = get_wiki_linked_pages(
title, page_number_limit=page_number_limit
)
initial_network = flatten_network(root_linked_pages)
# Retrieve the pages associated with the pages on the central page
# These are second degree pages
with Pool(processes=os.cpu_count()) as pool:
all_linked_pages = pool.map(get_wiki_linked_pages, initial_network)
# Get a list of lists of the edges from all the pages
edges = pool.map(pages_to_edges, all_linked_pages)
# Flatten the list of lists to a single list (a map here)
edges = chain.from_iterable(edges)
# Directed graph creation
graph = nx.DiGraph()
for edge in edges:
graph.add_edge(*edge)
# Write in gexf format for Gephi
nx.readwrite.gexf.write_gexf(
graph, f"./{title}-{page_number_limit}_links_wiki_graph.gexf"
)
|
140547
|
import asyncio
import mqttools
async def subscriber():
client = mqttools.Client('localhost', 1883)
await client.start()
# Subscribe to two topics in parallel.
await asyncio.gather(
client.subscribe('$SYS/#'),
client.subscribe('/test/mqttools/foo')
)
print('Waiting for messages.')
while True:
message = await client.messages.get()
if message is None:
print('Broker connection lost!')
break
print(f'Topic: {message.topic}')
print(f'Message: {message.message}')
asyncio.run(subscriber())
|
140553
|
from django import forms
from apps.Testings.models import Phase
from .models import Argument, Source, Command
from django.utils.safestring import mark_safe
class ArgumentForm(forms.ModelForm):
class Meta:
model = Argument
fields = '__all__'
widgets = {
'command' : forms.HiddenInput(),
'name': forms.TextInput(attrs={'data-length': 30, 'id': 'args_name'}),
'description': forms.Textarea(attrs={'class': 'materialize-textarea',
'data-length': 70, 'id': 'args_description'}),
}
def __init__(self, *args, **kwargs):
cmd = None
try:
cmd = kwargs.pop('cmd')
except KeyError:
pass
super(ArgumentForm, self).__init__(*args, **kwargs)
if cmd:
self.initial["command"] = cmd.id
self.fields['include'].queryset = Argument.objects.filter(command=cmd)
self.fields['exclude'].queryset = Argument.objects.filter(command=cmd)
try:
if self.instance:
self.fields['include'].queryset = Argument.objects.filter(command=self.instance.command).exclude(id = self.instance.id)
self.fields['exclude'].queryset = Argument.objects.filter(command=self.instance.command).exclude(id = self.instance.id)
else:
self.initial["command"] = cmd.id
except:
pass
class PhaseForm(forms.ModelForm):
class Meta:
model = Phase
fields = {
'name',
'product'
}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(PhaseForm, self).__init__(*args, **kwargs)
self.fields['product'].queryset = Source.objects.filter(category=3)
class SourceProductForm(forms.ModelForm):
path = forms.CharField(widget=forms.TextInput(), required=False)
regex = forms.CharField(widget=forms.Textarea(attrs={'rows': 6, 'class': 'materialize-textarea'}), required=False)
host = forms.CharField(required=False)
port = forms.IntegerField(required=False)
username = forms.CharField(required=False)
password = forms.CharField(widget=forms.PasswordInput(), required=False)
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
'host',
'port',
'username',
'password',
'path',
'regex',
]
labels = {"name": "Product Name", "depends": "Dependence Requirement (Optional)"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceProductForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
self.fields[
'regex'].initial = '( {2}-\w+, --\w+[ \\n=]| {2}-\w+[ \\n=]| {2}--\w+[ \\n=]| {2}--\w+-\w+[ \\n=]| {2}-\w+, --\w+-\w+[ \\n=])(?=[ <]*)'
class SourceEditProductForm(forms.ModelForm):
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
]
labels = {"name": "Product Name", "depends": "Dependence Requirement (Optional)"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceEditProductForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class SourceRobotForm(forms.ModelForm):
zip_file = forms.FileField()
class Meta:
model = Source
fields = [
'version',
'zip_file'
]
labels = {"version": "Robot Framework Version"}
class SourceLibraryForm(forms.ModelForm):
url = forms.CharField(label='Documentation URL')
class Meta:
model = Source
fields = [
'name',
'version',
'url',
'depends',
]
labels = {"name": "Library Name", "depends": "Robot Version Requirement"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceLibraryForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class SourceEditLibraryForm(forms.ModelForm):
class Meta:
model = Source
fields = [
'name',
'version',
'depends',
]
labels = {"name": "Library Name", "depends": "Robot Version Requirement"}
def __init__(self, *args, **kwargs):
"""This filter only for sources in the category 4(Robot)"""
super(SourceEditLibraryForm, self).__init__(*args, **kwargs)
self.fields['depends'].queryset = Source.objects.filter(category=4)
class CommandForm(forms.ModelForm):
class Meta:
model = Command
fields = [
'name',
'source',
'description',
]
labels = {
"name" : mark_safe('<b>Command <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Provide the command that will be used in the product">help_outline</i></b>'),
"source" : mark_safe('<b>Source <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Select the product associated with the new command">help_outline</i></b>'),
"description" : mark_safe('<b>Description <i style="float: right" class="tiny material-icons tooltipped" data-position="bottom" data-tooltip="Provide a brief description about the command">help_outline</i></b>'),
}
widgets = {
'name': forms.TextInput(attrs={'data-length': 30}),
'description': forms.Textarea(attrs={'class': 'materialize-textarea', 'data-length': 70})
}
def __init__(self, *args, **kwargs):
""" This filter exclude the control flow sentences """
super(CommandForm, self).__init__(*args, **kwargs)
self.fields['source'].queryset = Source.objects.exclude(category=1)
""" This Make required the source field """
self.fields['source'].required = True
|
140555
|
import argparse
import torch
from stereo import MinSumStereo, BlockMatchStereo, RefinedMinSumStereo
import data
import imageio
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--im0', action='store', required=True, type=str)
parser.add_argument('--im1', action='store', required=True, type=str)
parser.add_argument('--min-disp', action='store', default=0, type=int)
parser.add_argument('--max-disp', action='store', default=127, type=int)
parser.add_argument('--stride-in', action='store', default=1, type=int)
parser.add_argument('--stride-out', action='store', default=1, type=int)
parser.add_argument('--multi-level-output', action='store_true', default=False)
parser.add_argument('--activation', action='store', choices=['relu', 'leakyrelu', 'elu'], default='leakyrelu')
parser.add_argument('--with-bn', action='store_true', default=False)
parser.add_argument('--with-upconv', action='store_true', default=False)
parser.add_argument('--with-output-bn', action='store_true', default=False)
parser.add_argument('--pad', action='store', default=(0, 0), nargs=2, type=int,
help='extra padding of in height and in width on every side')
parser.add_argument('--model', action='store', default='bp+ms+h',
choices=['wta', 'bp+ms', 'bp+ms+h', 'bp+ms+ref+h'])
parser.add_argument('--checkpoint-unary', action='store', default=None, type=str)
parser.add_argument('--checkpoint-matching', action='store', default=[], nargs='+', type=str)
parser.add_argument('--checkpoint-affinity', action='store', default=None, type=str)
parser.add_argument('--checkpoint-crf', action='append', default=[], type=str, nargs='+')
parser.add_argument('--checkpoint-refinement', action='store', default=None, type=str)
parser.add_argument('--lbp-min-disp', action='store_true', default=False)
parser.add_argument('--max-iter', action='store', default=1, type=int)
parser.add_argument('--num-bp-layers', action='store', default=1, type=int)
parser.add_argument('--bp-inference', action='store', default='sub-exp',
choices=['wta', 'expectation', 'sub-exp'], type=str)
parser.add_argument('--matching', action='store', choices=['corr', 'sad', 'conv3d'],
default='sad', type=str)
parser.add_argument('--input-level-offset', action='store', default=1, type=int,
help='1 means that level 1 is the input resolution')
parser.add_argument('--output-level-offset', action='store', default=1, type=int,
help="0 means that level 0 (=full res) is the output resolution")
args = parser.parse_args()
I0_pyramid, I1_pyramid = data.load_sample(args.im0, args.im1)
device = 'cuda:0'
with torch.no_grad():
if args.model == 'wta':
model = BlockMatchStereo(device, args)
elif args.model == 'bp+ms':
model = MinSumStereo(device, args)
elif args.model == 'bp+ms+h':
model = MinSumStereo(device, args)
elif args.model == 'bp+ms+ref+h':
model = RefinedMinSumStereo(device, args)
max_disp = None # use original max-disp
res_dict = model.to(device).forward(I0_pyramid, I1_pyramid, max_disp=args.max_disp, step=1)
imageio.imwrite("data/output/stereo/" + args.model + ".pfm",
np.flipud(res_dict['disps0'][0].squeeze().float().detach().cpu().numpy()))
|
140560
|
from test.functional.test_framework import BitcoinTestFramework
class TestCaseBase(BitcoinTestFramework) :
def set_test_params(self) :
pass
def run_test(self) :
key_list = dir(self)
for name in key_list :
if name.startswith("initialize") :
print('Initialize test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
for name in key_list :
if name.startswith("test_") :
print('Test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
for name in key_list :
if name.startswith("finalize") :
print('Finalize test case:', self.__class__.__name__ + '.' + name)
getattr(self, name)()
|
140570
|
import re
from ja_timex.extract_filter import DecimalFilter, NumexpFilter, PartialNumFilter
from ja_timex.pattern.place import Pattern
from ja_timex.tag import Extract
def make_extract(target, original, type_name="abstime"):
return Extract(
type_name=type_name,
re_match=re.search(target, original),
pattern=Pattern(re_pattern=None, parse_func=lambda x: x, option=None),
)
def test_numexp_filter():
f = NumexpFilter()
assert f.filter(make_extract("7.18", "7.18キロメートル"), "7.18キロメートル")
assert f.filter(make_extract("7.18", "7.18 キロメートル"), "7.18 キロメートル")
assert f.filter(make_extract("7.18", "7.18cm"), "7.18cm")
assert f.filter(make_extract("7.18", "7.18mm"), "7.18mm")
assert f.filter(make_extract("7.18", "7.18%"), "7.18%")
assert f.filter(make_extract("7.18", "7.18インチ"), "7.18インチ")
assert f.filter(make_extract("7.18", "7.18GHz"), "7.18GHz")
assert f.filter(make_extract("7.18", "7.18円"), "7.18円")
assert f.filter(make_extract("2.4", "2.4GHz"), "2.4GHz")
assert not f.filter(make_extract("7.18", "7.18は晴れ"), "7.18は晴れ")
assert not f.filter(make_extract("7.18", "7.18に釣り上げられた10メートルの魚"), "7.18に釣り上げられた10メートルの魚")
# 3つ以上の数字に分けられる場合はフィルタの対象外
assert not f.filter(make_extract("2020.7.18", "2020.7.18"), "2020.7.18")
assert not f.filter(make_extract("2020.7.18", "2020.7.18円相場は"), "2020.7.18円相場は") # 単位が付いていた場合も同様
def test_partial_num_filter():
f = PartialNumFilter()
# 前後に数字または+がある場合
# マイナスは1/12-1/20といった表現があるため、対象外
assert f.filter(make_extract("13/1", "13/13"), "13/13")
assert f.filter(make_extract("3/13", "13/13"), "13/13")
assert f.filter(make_extract("13/1", "13/1+2"), "13/1+2")
assert f.filter(make_extract("3/13", "+3/13"), "+3/13")
# 前後に数字ではない文字の場合
assert not f.filter(make_extract("13/1", "13/1は"), "13/1は")
assert not f.filter(make_extract("3/13", "は3/13"), "は3/13")
# 末尾の0.1の前方に"."がある場合もTrueと判定する
assert f.filter(make_extract("0.1", "127.0.0.1"), "127.0.0.1")
def test_decimal_filter():
f = DecimalFilter()
# 0.1や0/1, 0-1といった表現において、0が0000年を表すことはない
assert f.filter(make_extract("0.18", "0.18"), "0.18")
assert f.filter(make_extract("0/10", "0/10"), "0/10")
assert f.filter(make_extract("0-10", "0-10"), "0-10")
# DURATIONの場合た対象外(0.1ヶ月という表記はあり得るため)
assert not f.filter(make_extract("0.18", "0.18", "duration"), "0.18")
|
140574
|
from __future__ import absolute_import
import collections
from huskar_sdk_v2.consts import OVERALL
from huskar_api import settings
from huskar_api.models.const import ROUTE_DEFAULT_INTENT
RouteKey = collections.namedtuple('RouteKey', 'application_name intent')
def make_route_key(application_name, intent=None):
if intent and intent != ROUTE_DEFAULT_INTENT:
return u'{0}@{1}'.format(application_name, intent)
return application_name
def parse_route_key(route_key):
args = route_key.split('@', 1)
if len(args) == 1:
return RouteKey(args[0], ROUTE_DEFAULT_INTENT)
return RouteKey(*args)
def try_to_extract_ezone(cluster_name, default=OVERALL):
for ezone in settings.ROUTE_EZONE_LIST:
if cluster_name.startswith(u'{0}-'.format(ezone)):
return ezone
return default
|
140580
|
expected_output = {
"Mgmt-intf": {
"address_family": {
"ipv4 unicast": {
"flags": "0x0",
"table_id": "0x1",
"vrf_label": {"allocation_mode": "per-prefix"},
}
},
"cli_format": "New",
"flags": "0x1808",
"interface": {"GigabitEthernet1": {"vrf": "Mgmt-intf"}},
"interfaces": ["GigabitEthernet1"],
"support_af": "multiple address-families",
"vrf_id": 1,
},
"VRF1": {
"address_family": {
"ipv4 unicast": {
"flags": "0x0",
"table_id": "0x2",
"vrf_label": {
"allocation_mode": "per-prefix",
"distribution_protocol": "LDP",
},
}
},
"cli_format": "New",
"flags": "0x180C",
"interface": {
"GigabitEthernet2.390": {"vrf": "VRF1"},
"GigabitEthernet2.410": {"vrf": "VRF1"},
"GigabitEthernet2.415": {"vrf": "VRF1"},
"GigabitEthernet2.420": {"vrf": "VRF1"},
"GigabitEthernet3.390": {"vrf": "VRF1"},
"GigabitEthernet3.410": {"vrf": "VRF1"},
"GigabitEthernet3.415": {"vrf": "VRF1"},
"GigabitEthernet3.420": {"vrf": "VRF1"},
"Loopback300": {"vrf": "VRF1"},
"Tunnel1": {"vrf": "VRF1"},
"Tunnel3": {"vrf": "VRF1"},
"Tunnel4": {"vrf": "VRF1"},
"Tunnel6": {"vrf": "VRF1"},
"Tunnel8": {"vrf": "VRF1"},
},
"interfaces": [
"Tunnel1",
"Loopback300",
"GigabitEthernet2.390",
"GigabitEthernet2.410",
"GigabitEthernet2.415",
"GigabitEthernet2.420",
"GigabitEthernet3.390",
"GigabitEthernet3.410",
"GigabitEthernet3.415",
"GigabitEthernet3.420",
"Tunnel3",
"Tunnel4",
"Tunnel6",
"Tunnel8",
],
"route_distinguisher": "65000:1",
"support_af": "multiple address-families",
"vrf_id": 2,
},
}
|
140601
|
from .model import VLG
ARCHITECTURES = {"VLG": VLG}
def build_model(cfg):
return ARCHITECTURES[cfg.MODEL.ARCHITECTURE](cfg)
|
140640
|
import copy
import logging
from datetime import datetime
from http.client import IncompleteRead
from typing import Dict
import pika
from requests.exceptions import (ConnectionError as ReqConnectionError,
ReadTimeout, ChunkedEncodingError,
MissingSchema, InvalidSchema, InvalidURL)
from urllib3.exceptions import ProtocolError
from web3 import Web3
from src.configs.nodes.evm import EVMNodeConfig
from src.message_broker.rabbitmq import RabbitMQApi
from src.monitors.monitor import Monitor
from src.utils.constants.rabbitmq import (RAW_DATA_EXCHANGE,
EVM_NODE_RAW_DATA_ROUTING_KEY)
from src.utils.exceptions import (PANICException, NodeIsDownException,
DataReadingException, InvalidUrlException)
class EVMNodeMonitor(Monitor):
def __init__(self, monitor_name: str, node_config: EVMNodeConfig,
logger: logging.Logger, monitor_period: int,
rabbitmq: RabbitMQApi) -> None:
super().__init__(monitor_name, logger, monitor_period, rabbitmq)
self._node_config = node_config
# This interface performs RPC requests, therefore no connection needs
# to be managed. We can just perform the requests immediately and catch
# errors. DISCLAIMER: There might be an issue with open connections not
# being closed.
self._w3_interface = Web3(Web3.HTTPProvider(
self.node_config.node_http_url, request_kwargs={'timeout': 2}))
@property
def node_config(self) -> EVMNodeConfig:
return self._node_config
@property
def w3_interface(self) -> Web3:
return self._w3_interface
def _display_data(self, data: Dict) -> str:
# This function assumes that the data has been obtained and processed
# successfully by the node monitor
return "current_height={}, syncing={}".format(
data['current_height'], data['syncing'])
def _get_data(self) -> Dict:
"""
w3_interface.eth.syncing
Returns either False if the node is not syncing or a dictionary
showing sync status. We only care about the bool state of the node's
syncing status.
"""
return {
'current_height': self.w3_interface.eth.block_number,
'syncing': bool(self.w3_interface.eth.syncing)
}
def _process_error(self, error: PANICException) -> Dict:
processed_data = {
'error': {
'meta_data': {
'monitor_name': self.monitor_name,
'node_name': self.node_config.node_name,
'node_id': self.node_config.node_id,
'node_parent_id': self.node_config.parent_id,
'time': datetime.now().timestamp()
},
'message': error.message,
'code': error.code,
}
}
return processed_data
def _process_retrieved_data(self, data: Dict) -> Dict:
# Add some meta-data to the processed data
processed_data = {
'result': {
'meta_data': {
'monitor_name': self.monitor_name,
'node_name': self.node_config.node_name,
'node_id': self.node_config.node_id,
'node_parent_id': self.node_config.parent_id,
'time': datetime.now().timestamp()
},
'data': copy.deepcopy(data),
}
}
return processed_data
def _send_data(self, data: Dict) -> None:
self.rabbitmq.basic_publish_confirm(
exchange=RAW_DATA_EXCHANGE,
routing_key=EVM_NODE_RAW_DATA_ROUTING_KEY, body=data,
is_body_dict=True, properties=pika.BasicProperties(delivery_mode=2),
mandatory=True)
self.logger.debug("Sent data to '%s' exchange", RAW_DATA_EXCHANGE)
def _monitor(self) -> None:
data_retrieval_exception = None
data = None
data_retrieval_failed = True
try:
data = self._get_data()
data_retrieval_failed = False
except (ReqConnectionError, ReadTimeout):
data_retrieval_exception = NodeIsDownException(
self.node_config.node_name)
self.logger.error("Error when retrieving data from %s",
self.node_config.node_http_url)
self.logger.exception(data_retrieval_exception)
except (IncompleteRead, ChunkedEncodingError, ProtocolError):
data_retrieval_exception = DataReadingException(
self.monitor_name, self.node_config.node_name)
self.logger.error("Error when retrieving data from %s",
self.node_config.node_http_url)
self.logger.exception(data_retrieval_exception)
except (InvalidURL, InvalidSchema, MissingSchema):
data_retrieval_exception = InvalidUrlException(
self.node_config.node_http_url)
self.logger.error("Error when retrieving data from %s",
self.node_config.node_http_url)
self.logger.exception(data_retrieval_exception)
try:
processed_data = self._process_data(data_retrieval_failed,
[data_retrieval_exception],
[data])
except Exception as error:
self.logger.error("Error when processing data obtained from %s",
self.node_config.node_http_url)
self.logger.exception(error)
# Do not send data if we experienced processing errors
return
self._send_data(processed_data)
if not data_retrieval_failed:
# Only output the gathered data if there was no error
self.logger.info(self._display_data(
processed_data['result']['data']))
# Send a heartbeat only if the entire round was successful
heartbeat = {
'component_name': self.monitor_name,
'is_alive': True,
'timestamp': datetime.now().timestamp()
}
self._send_heartbeat(heartbeat)
|
140661
|
from html.parser import HTMLParser
from urllib import request
import os.path
import re
import json
import sys
class ImgListScraper( HTMLParser ):
IMG_URL = "http://i.imgur.com/{hash}{ext}"
def __init__( self, *args, **kwargs ):
super().__init__( *args, **kwargs )
self.in_javascript = False
self.data = None
def handle_starttag( self, tag, attrs ):
attrs = dict( attrs )
if tag == "script" and attrs['type'] == "text/javascript":
self.in_javascript = True
def handle_data( self, data ):
if self.in_javascript:
img_block = False
for line in data.splitlines():
if line.find("ImgurAlbum") > -1:
img_block = True
elif img_block and line.strip().startswith("images:"):
data = line.strip()[ len( "images: " ) : -1 ]
self.data = json.loads( data )
img_block = False
self.in_javascript = False
def img_urls( self ):
for image in self.data['items']:
yield self.IMG_URL.format( **{
'hash': image['hash'],
'ext': image['ext']
})
def download_image( url, folder ):
path = os.path.join( folder, url.split("/")[-1] )
res = request.urlopen( url )
with open( path, 'wb' ) as f:
f.write( res.read() )
res.close()
def download_album( album_url, folder ):
print( "Scraping album..." )
scraper = ImgListScraper()
html = request.urlopen( album_url ).read().decode( 'utf8' )
scraper.feed( html )
total = scraper.data['count']
for ( pos, img_url ) in enumerate( scraper.img_urls() ):
print( "downloading {img_url} ({pos} of {total})".format(
img_url = img_url,
pos = pos,
total = total ) )
download_image( img_url, folder )
if __name__ == '__main__':
if len( sys.argv ) < 3:
print( "Usage: {script} ALBUM_URL FOLDER".format( script = sys.argv[0]
) )
else:
download_album( sys.argv[1], sys.argv[2] )
|
140693
|
from abc import ABCMeta, abstractmethod
from tgt_grease.core import Logging, GreaseContainer
from datetime import datetime
import sys
import os
import traceback
class Command(object):
"""Abstract class for commands in GREASE
Attributes:
__metaclass__ (ABCMeta): Metadata class object
purpose (str): The purpose of the command
help (str): Help string for the command line
__author__ (str): Authorship string
__version__ (str): Command Version
os_needed (str): If a specific OS is needed then set this
ioc (GreaseContainer): IOC container for access to system resources
variable_storage (pymongo.collection): collection object for command
"""
###
# Command Metadata information
###
purpose = "Default"
help = """
No Help Information Provided
"""
__author__ = "<NAME>"
__version__ = "1.0.0"
os_needed = None
__metaclass__ = ABCMeta
def __init__(self, Logger=None):
if Logging and isinstance(Logger, Logging):
self.ioc = GreaseContainer(Logger)
else:
self.ioc = GreaseContainer()
self.variable_storage = self.ioc.getMongo()\
.Client()\
.get_database(self.ioc.getConfig().get('Connectivity', 'MongoDB').get('db', 'grease'))\
.get_collection(self.__class__.__name__)
self.start_time = datetime.utcnow()
self.exec_data = {'execVal': False, 'retVal': False, 'data': {}}
self.__failures = 0
@property
def failures(self):
return self.__failures
@failures.setter
def failures(self, val):
self.__failures = val
def getExecVal(self):
"""Get the execution attempt success
Returns:
bool: If the command executed without exception
"""
return self.exec_data.get('execVal', False)
def getRetVal(self):
"""Get the execution boolean return state
Returns:
bool: the boolean return value of execute
"""
return self.exec_data.get('retVal', False)
def getData(self):
"""Get any data the execute method wanted to put into telemetry
Returns:
dict: The Key/Value pairs from the execute method execution
"""
return self.exec_data.get('data', {})
def setData(self, Key, Data):
"""Put Data into the data object to be inserted into telemetry
Args:
Key (str): Key for the data to be stored
Data (object): JSON-able object to store
Returns:
None: Void Method to put data
"""
self.exec_data['data'][Key] = Data
def __del__(self):
# close mongo connection
self.ioc.getMongo().Close()
def safe_execute(self, context=None):
"""Attempt execution and prevent MOST exceptions
Args:
context (dict): context for the command to use
Returns:
None: Void method to attempt exceptions
"""
if not context:
context = {}
try:
try:
self.exec_data['execVal'] = True
self.exec_data['retVal'] = bool(self.execute(context))
except BaseException:
self.exec_data['execVal'] = False
exc_type, exc_obj, exc_tb = sys.exc_info()
tb = traceback.format_exception(exc_type, exc_obj, exc_tb)
self.ioc.getLogger().error(
"Failed to execute [{0}] execute got exception!".format(self.__class__.__name__),
additional={
'file': os.path.split(str(str(tb[2]).split('"')[1]))[1],
'type': str(exc_type),
'line': str(str(tb[2]).split(",")[1]).split(' ')[2]
}
)
except:
self.ioc.getLogger().error(
"Failed to execute [{0}] execute got exception!".format(self.__class__.__name__),
)
except:
self.ioc.getLogger().error(
"Failed to execute [{0}] execute major exception".format(self.__class__.__name__),
)
@abstractmethod
def execute(self, context):
"""Base Execute Method
This method should *always* be overridden in child classes. This is the code that will run when your command
is called. If this method is not implemented then the class will fail loading.
Args:
context (dict): context for the command to use
Returns:
bool: Command Success
"""
pass
def prevent_retries(self):
"""
Sets a flag in the command's return data that will signal to stop retrying, even before the default
retry limit is met.
"""
self.setData("no_retry", True)
|
140721
|
import torch
import torch.onnx
from models.slim import Slim
x = torch.randn(1, 3, 160, 160)
model = Slim()
model.load_state_dict(torch.load("../pretrained_weights/slim_160_latest.pth", map_location="cpu"))
model.eval()
torch.onnx.export(model, x, "../pretrained_weights/slim_160_latest.onnx", input_names=["input1"], output_names=['output1'])
|
140742
|
import sys
from set_up import Setup
from estimator import CommonEstimator
import json
import h5py
#from utils import get_memory_usage
import numpy as np
SEED = 12939 #from random.org
np.random.seed(SEED)
print('python main.py fpType fpSize estimators.json dataset')
fpType = sys.argv[1]
fpSize = int(sys.argv[2])
trainingSetSize = int(sys.argv[3])
json_name = sys.argv[4]
dataset = sys.argv[5]
print('Running:')
print(f'python main.py {fpType} {fpSize} {json_name} {dataset}')
estimators = json.load(open(json_name, 'r'))['estimators']
if __name__=='__main__':
#setup the data:
setup = Setup(fpType, dataset, verbose=True)
try:
setup.write_fingerprints()
except:
print('Already written fpfile')
setup.load_fingerprints()
setup.load_scores()
feature_matrix = setup.fold_to_size(fpSize)
#evaluation stuff goes here:
for estimator in estimators:
for repeat in range(5):
setup.random_split(trainingSetSize)
common_estimator = CommonEstimator(estimator, cutoff=0.3, verbose=setup.verbose)
print(setup.train_idx.shape)
print(setup.scores.shape)
common_estimator.fit(feature_matrix[setup.train_idx], setup.scores[setup.train_idx])
pred = common_estimator.chunked_predict(feature_matrix[setup.test_idx])
setup.write_results(pred, fpSize, trainingSetSize, estimator['name'], repeat)
|
140766
|
from six.moves.urllib.parse import quote_plus
from . import get_cookies
def setup_session(openid, password, username=None,
check_url=None,
session=None, verify=False):
"""
A special call to get_cookies.setup_session that is tailored for
ESGF credentials.
username should only be necessary for a CEDA openid.
"""
session = get_cookies.setup_session(_uri(openid),
username=username,
password=password,
check_url=check_url,
session=session,
verify=verify)
# Connections can be kept alive on the ESGF:
session.headers.update([('Connection', 'keep-alive')])
return session
def _uri(openid):
"""
Create ESGF authentication url.
This function might be sensitive to a
future evolution of the ESGF security.
"""
def generate_url(dest_url):
dest_node = _get_node(dest_url)
try:
url = (dest_node +
'/esg-orp/j_spring_openid_security_check.htm?'
'openid_identifier=' +
quote_plus(openid))
except TypeError:
raise UserWarning('OPENID was not set. '
'ESGF connection cannot succeed.')
if _get_node(openid) == 'https://ceda.ac.uk':
return [url, None]
else:
return url
return generate_url
def _get_node(url):
return '/'.join(url.split('/')[:3]).replace('http:', 'https:')
|
140812
|
import json
from telethon import events, Button
from asyncio import exceptions
from .. import jdbot, chat_id, BOT_SET_JSON_FILE_USER, BOT_SET, ch_name
from .utils import split_list, logger, press_event
@jdbot.on(events.NewMessage(from_users=chat_id, pattern='^/set$'))
async def bot_set(event):
SENDER = event.sender_id
try:
msg = await jdbot.send_message(chat_id, '请稍后,正在查询')
with open(BOT_SET_JSON_FILE_USER, 'r', encoding='utf-8') as f:
myset = json.load(f)
info = '您目前设置如下:\n'
for i in myset:
if '命令别名' in i:
continue
else:
info = info + f'\t\t- {i}-->{myset[i]} \n'
info = info + '请点击您要设置的项目,选择后,输入要设置的值,重启生效,垃圾话以 | 进行区隔,黑名单以空格或逗号或顿号区隔'
btn = [Button.inline(i, i) for i in myset if not isinstance(myset[i],dict)]
btn.append(Button.inline('取消', data='cancel'))
btn = split_list(btn, 3)
async with jdbot.conversation(SENDER, timeout=90) as conv:
msg = await jdbot.edit_message(msg, info, buttons=btn, link_preview=False)
convdata = await conv.wait_event(press_event(SENDER))
res = bytes.decode(convdata.data)
if res == 'cancel':
msg = await jdbot.edit_message(msg, '对话已取消')
conv.cancel()
else:
await jdbot.delete_messages(chat_id, msg)
msg = await conv.send_message(f'请输入您要修改的{res}\n如果需要取消,请输入`cancel`或`取消`\n如需自定义或快速修改,请直接修改config/botset.json\n如果为True或False首字符大写\n```{myset[res]}```')
data = await conv.get_response()
if data.raw_text == 'cancel' or data.raw_text == '取消':
await jdbot.delete_messages(chat_id,msg)
await jdbot.send_message(chat_id, '对话已取消')
conv.cancel()
else:
markup = [Button.inline('确认',data='yes'),Button.inline('取消',data='cancel')]
await jdbot.delete_messages(chat_id,msg)
msg = await jdbot.send_message(chat_id, f'是否确认将 ** {res} ** 设置为 **{data.raw_text}**', buttons=markup)
convdata2 = await conv.wait_event(press_event(SENDER))
res2 = bytes.decode(convdata2.data)
if res2 == 'yes':
myset[res] = data.raw_text
with open(BOT_SET_JSON_FILE_USER, 'w+', encoding='utf-8') as f:
json.dump(myset, f)
await jdbot.delete_messages(chat_id, msg)
msg = await jdbot.send_message(chat_id, '已完成修改,重启后生效')
else:
conv.cancel()
await jdbot.delete_messages(chat_id, msg)
msg = await jdbot.send_message(chat_id, '对话已取消')
return
except exceptions.TimeoutError:
msg = await jdbot.edit_message(msg, '选择已超时,对话已停止')
except Exception as e:
msg = await jdbot.edit_message(msg, f'something wrong,I\'m sorry\n{str(e)}')
logger.error(f'something wrong,I\'m sorry\n{str(e)}')
@jdbot.on(events.NewMessage(from_users=chat_id, pattern='^/setname$'))
async def bot_setname(event):
SENDER = event.sender_id
try:
msg = await jdbot.send_message(chat_id, '请稍后,正在查询')
with open(BOT_SET_JSON_FILE_USER, 'r', encoding='utf-8') as f:
myset = json.load(f)
info = '您目前命令别名设置如下:\n'
for i in myset['命令别名']:
info = info + f'\t\t- {i}-->{myset["命令别名"][i]} \n'
info = info + '请点击您要设置的项目,选择后,输入要设置的值,重启生效\n**请注意尽量不要重复,否则可能发生未知错误**'
btn = [Button.inline(i, i) for i in myset['命令别名']]
btn.append(Button.inline('取消', data='cancel'))
btn = split_list(btn, 3)
async with jdbot.conversation(SENDER, timeout=90) as conv:
msg = await jdbot.edit_message(msg, info, buttons=btn, link_preview=False)
convdata = await conv.wait_event(press_event(SENDER))
res = bytes.decode(convdata.data)
if res == 'cancel':
msg = await jdbot.edit_message(msg, '对话已取消')
conv.cancel()
else:
await jdbot.delete_messages(chat_id, msg)
msg = await conv.send_message(f'请输入您要修改的{res}\n如果需要取消,请输入`cancel`或`取消`\n如需自定义或快速修改,请直接修改config/botset.json\n如果为True或False首字符大写\n```{myset["命令别名"][res]}```')
data = await conv.get_response()
if data.raw_text == 'cancel' or data.raw_text == '取消':
await jdbot.delete_messages(chat_id,msg)
msg = await jdbot.send_message(chat_id, '对话已取消')
conv.cancel()
return
else:
markup = [Button.inline('确认',data='yes'),Button.inline('取消',data='cancel')]
await jdbot.delete_messages(chat_id,msg)
msg = await jdbot.send_message(chat_id, f'是否确认将 ** {res} ** 设置为 **{data.raw_text}**', buttons=markup)
convdata2 = await conv.wait_event(press_event(SENDER))
res2 = bytes.decode(convdata2.data)
if res2 == 'yes':
myset['命令别名'][res] = data.raw_text
with open(BOT_SET_JSON_FILE_USER, 'w+', encoding='utf-8') as f:
json.dump(myset, f)
await jdbot.delete_messages(chat_id, msg)
msg = await jdbot.send_message(chat_id, '已完成修改,重启后生效')
else:
conv.cancel()
await jdbot.delete_messages(chat_id, msg)
msg = await jdbot.send_message(chat_id, '对话已取消')
return
except exceptions.TimeoutError:
msg = await jdbot.edit_message(msg, '选择已超时,对话已停止')
except Exception as e:
msg = await jdbot.edit_message(msg, f'something wrong,I\'m sorry\n{str(e)}')
logger.error(f'something wrong,I\'m sorry\n{str(e)}')
if ch_name:
jdbot.add_event_handler(bot_set, events.NewMessage(
from_users=chat_id, pattern=BOT_SET['命令别名']['set']))
jdbot.add_event_handler(bot_setname, events.NewMessage(
from_users=chat_id, pattern=BOT_SET['命令别名']['setname']))
|
140821
|
import json
import numpy as np
import os
from photogrammetry_importer.types.camera import Camera
from photogrammetry_importer.types.point import Point
from photogrammetry_importer.file_handlers.utility import (
check_radial_distortion,
)
from photogrammetry_importer.blender_utility.logging_utility import log_report
class MeshroomFileHandler:
"""Class to read and write :code:`Meshroom` files and workspaces."""
# Note: *.SfM files are actually just *.JSON files.
@staticmethod
def _get_element(data_list, id_string, query_id):
result = None
for ele in data_list:
if int(ele[id_string]) == query_id:
result = ele
break
assert result is not None
return result
@classmethod
def _parse_cameras_from_json_data(
cls,
json_data,
image_dp,
image_fp_type,
suppress_distortion_warnings,
op,
):
cams = []
image_index_to_camera_index = {}
is_valid_file = (
"views" in json_data
and "intrinsics" in json_data
and "poses" in json_data
)
if not is_valid_file:
log_report(
"ERROR",
"FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain the"
+ " SfM reconstruction results: view, intrinsics and poses.",
op,
)
return cams, image_index_to_camera_index
views = json_data["views"] # is a list of dicts (view)
intrinsics = json_data["intrinsics"] # is a list of dicts (intrinsic)
extrinsics = json_data["poses"] # is a list of dicts (extrinsic)
# IMPORTANT:
# Views contain the number of input images
# Extrinsics may contain only a subset of views!
# (Not all views are necessarily contained in the reconstruction)
for rec_index, extrinsic in enumerate(extrinsics):
camera = Camera()
view_index = int(extrinsic["poseId"])
image_index_to_camera_index[view_index] = rec_index
corresponding_view = cls._get_element(views, "poseId", view_index)
camera.image_fp_type = image_fp_type
camera.image_dp = image_dp
camera._absolute_fp = str(corresponding_view["path"])
camera._relative_fp = os.path.basename(
str(corresponding_view["path"])
)
camera._undistorted_relative_fp = str(extrinsic["poseId"]) + ".exr"
if image_dp is None:
camera._undistorted_absolute_fp = None
else:
camera._undistorted_absolute_fp = os.path.join(
image_dp, camera._undistorted_relative_fp
)
camera.width = int(corresponding_view["width"])
camera.height = int(corresponding_view["height"])
id_intrinsic = int(corresponding_view["intrinsicId"])
intrinsic_params = cls._get_element(
intrinsics, "intrinsicId", id_intrinsic
)
focal_length = float(intrinsic_params["pxFocalLength"])
cx = float(intrinsic_params["principalPoint"][0])
cy = float(intrinsic_params["principalPoint"][1])
if (
"distortionParams" in intrinsic_params
and len(intrinsic_params["distortionParams"]) > 0
):
# TODO proper handling of distortion parameters
radial_distortion = float(
intrinsic_params["distortionParams"][0]
)
else:
radial_distortion = 0.0
if not suppress_distortion_warnings:
check_radial_distortion(
radial_distortion, camera._relative_fp, op
)
camera_calibration_matrix = np.array(
[[focal_length, 0, cx], [0, focal_length, cy], [0, 0, 1]]
)
camera.set_calibration(
camera_calibration_matrix, radial_distortion
)
extrinsic_params = extrinsic["pose"]["transform"]
cam_rotation_list = extrinsic_params["rotation"]
camera.set_rotation_with_rotation_mat(
np.array(cam_rotation_list, dtype=float).reshape(3, 3).T
)
camera.set_camera_center_after_rotation(
np.array(extrinsic_params["center"], dtype=float)
)
camera.view_index = view_index
cams.append(camera)
return cams, image_index_to_camera_index
@staticmethod
def _parse_points_from_json_data(
json_data, image_index_to_camera_index, op
):
points = []
is_valid_file = "structure" in json_data
if not is_valid_file:
log_report(
"ERROR",
"FILE FORMAT ERROR: Incorrect SfM/JSON file. Must contain "
+ " the SfM reconstruction results: structure.",
op,
)
return points
structure = json_data["structure"]
for json_point in structure:
custom_point = Point(
coord=np.array(json_point["X"], dtype=float),
color=np.array(json_point["color"], dtype=int),
id=int(json_point["landmarkId"]),
scalars=[],
)
points.append(custom_point)
return points
@classmethod
def parse_meshroom_sfm_file(
cls,
sfm_ifp,
image_idp,
image_fp_type,
suppress_distortion_warnings,
op=None,
):
"""Parse a :code:`Meshroom` (:code:`.sfm` or :code:`.json`) file.
Parse different file formats created with the
:code:`StructureFromMotion` / :code:`ConvertSfMFormat` node in
:code:`Meshroom`.
"""
log_report("INFO", "parse_meshroom_sfm_file: ...", op)
log_report("INFO", "sfm_ifp: " + sfm_ifp, op)
input_file = open(sfm_ifp, "r")
json_data = json.load(input_file)
(
cams,
image_index_to_camera_index,
) = cls._parse_cameras_from_json_data(
json_data,
image_idp,
image_fp_type,
suppress_distortion_warnings,
op,
)
if "structure" in json_data:
points = cls._parse_points_from_json_data(
json_data, image_index_to_camera_index, op
)
else:
points = []
log_report("INFO", "parse_meshroom_sfm_file: Done", op)
return cams, points
@staticmethod
def _get_latest_node(json_graph, node_type):
i = 0
while node_type + "_" + str(i + 1) in json_graph:
i = i + 1
if i == 0:
return None
else:
return json_graph[node_type + "_" + str(i)]
@classmethod
def _get_node(cls, json_graph, node_type, node_number, op):
if node_number == -1:
return cls._get_latest_node(json_graph, node_type)
else:
node_key = node_type + "_" + str(node_number)
if node_key in json_graph:
return json_graph[node_key]
else:
log_report(
"ERROR",
"Invalid combination of node type (i.e. "
+ node_type
+ ") "
+ "and node number (i.e. "
+ str(node_number)
+ ") provided",
op,
)
assert False
@staticmethod
def _get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list):
if isinstance(fn_or_fn_list, str):
fn_list = [fn_or_fn_list]
else:
fn_list = fn_or_fn_list
if data_node is None:
return None
node_type = data_node["nodeType"]
uid_0 = data_node["uids"]["0"]
data_fp = None
for fn in fn_list:
possible_data_fp = os.path.join(cache_dp, node_type, uid_0, fn)
if os.path.isfile(possible_data_fp):
data_fp = possible_data_fp
break
return data_fp
@classmethod
def _get_node_data_fp(
cls, cache_dp, json_graph, node_type, node_number, fn_or_fn_list, op
):
data_node = cls._get_node(json_graph, node_type, node_number, op)
data_fp = cls._get_data_fp_of_node(cache_dp, data_node, fn_or_fn_list)
return data_fp
@staticmethod
def _get_data_dp_of_node(cache_dp, data_node):
if data_node is None:
return None
node_type = data_node["nodeType"]
uid_0 = data_node["uids"]["0"]
return os.path.join(cache_dp, node_type, uid_0)
@classmethod
def _get_node_data_dp(
cls, cache_dp, json_graph, node_type, node_number, op
):
data_node = cls._get_node(json_graph, node_type, node_number, op)
data_dp = cls._get_data_dp_of_node(cache_dp, data_node)
return data_dp
@classmethod
def _get_sfm_fp(
cls, sfm_node_type, cache_dp, json_graph, sfm_node_number, op
):
if sfm_node_type == "ConvertSfMFormatNode":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"ConvertSfMFormat",
sfm_node_number,
["sfm.sfm", "sfm.json"],
op,
)
elif sfm_node_type == "StructureFromMotionNode":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"StructureFromMotion",
sfm_node_number,
"cameras.sfm",
op,
)
elif sfm_node_type == "AUTOMATIC":
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"ConvertSfMFormat",
sfm_node_number,
["sfm.sfm", "sfm.json"],
op,
)
if sfm_fp is None:
sfm_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"StructureFromMotion",
sfm_node_number,
"cameras.sfm",
op,
)
else:
log_report("ERROR", "Selected SfM node is not supported", op)
assert False
return sfm_fp
@classmethod
def _get_mesh_fp(
cls, mesh_node_type, cache_dp, json_graph, mesh_node_number, op
):
if mesh_node_type == "Texturing":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Texturing",
mesh_node_number,
"texturedMesh.obj",
op,
)
elif mesh_node_type == "MeshFiltering":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"MeshFiltering",
mesh_node_number,
"mesh.obj",
op,
)
elif mesh_node_type == "Meshing":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Meshing",
mesh_node_number,
"mesh.obj",
op,
)
elif mesh_node_type == "AUTOMATIC":
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Texturing",
mesh_node_number,
"texturedMesh.obj",
op,
)
if mesh_fp is None:
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"MeshFiltering",
mesh_node_number,
"mesh.obj",
op,
)
if mesh_fp is None:
mesh_fp = cls._get_node_data_fp(
cache_dp,
json_graph,
"Meshing",
mesh_node_number,
"mesh.obj",
op,
)
else:
log_report("ERROR", "Select Mesh node is not supported!", op)
assert False
return mesh_fp
@classmethod
def _get_image_dp(cls, cache_dp, json_graph, prepare_node_number, op):
prepare_dp = cls._get_node_data_dp(
cache_dp,
json_graph,
"PrepareDenseScene",
prepare_node_number,
op,
)
return prepare_dp
@classmethod
def parse_meshrom_mg_file(
cls,
mg_fp,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op=None,
):
"""Parse a :code:`Meshroom` project file (:code:`.mg`)."""
cache_dp = os.path.join(os.path.dirname(mg_fp), "MeshroomCache")
json_data = json.load(open(mg_fp, "r"))
json_graph = json_data["graph"]
sfm_fp = cls._get_sfm_fp(
sfm_node_type, cache_dp, json_graph, sfm_node_number, op
)
mesh_fp = cls._get_mesh_fp(
mesh_node_type, cache_dp, json_graph, mesh_node_number, op
)
image_dp = cls._get_image_dp(
cache_dp, json_graph, prepare_node_number, op
)
if sfm_fp is not None:
log_report("INFO", "Found the following sfm file: " + sfm_fp, op)
else:
log_report(
"INFO",
"Request target SfM result does not exist in this meshroom"
" project.",
op,
)
if mesh_fp is not None:
log_report("INFO", "Found the following mesh file: " + mesh_fp, op)
else:
log_report(
"INFO",
"Request target mesh does not exist in this meshroom project.",
op,
)
return sfm_fp, mesh_fp, image_dp
@classmethod
def parse_meshroom_file(
cls,
meshroom_ifp,
use_workspace_images,
image_dp,
image_fp_type,
suppress_distortion_warnings,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op=None,
):
"""Parse a :code:`Meshroom` file.
Supported file formats are :code:`.mg`, :code:`.sfm` or :code:`.json`.
"""
log_report("INFO", "parse_meshroom_file: ...", op)
log_report("INFO", "meshroom_ifp: " + meshroom_ifp, op)
ext = os.path.splitext(meshroom_ifp)[1].lower()
if ext == ".mg":
(
meshroom_ifp,
mesh_fp,
image_idp_workspace,
) = cls.parse_meshrom_mg_file(
meshroom_ifp,
sfm_node_type,
sfm_node_number,
mesh_node_type,
mesh_node_number,
prepare_node_number,
op,
)
if (
use_workspace_images
and image_idp_workspace is not None
and os.path.isdir(image_idp_workspace)
):
image_dp = image_idp_workspace
log_report("INFO", "Using image directory in workspace.", op)
else:
assert ext == ".json" or ext == ".sfm"
mesh_fp = None
if meshroom_ifp is not None:
cams, points = cls.parse_meshroom_sfm_file(
meshroom_ifp,
image_dp,
image_fp_type,
suppress_distortion_warnings,
op,
)
else:
log_report(
"WARNING",
"Meshroom project does not contain cameras or points. Have"
" you saved the project (i.e. the *.mg file)?",
op,
)
cams = []
points = []
log_report("INFO", "parse_meshroom_file: Done", op)
return cams, points, mesh_fp, image_dp
|
140826
|
from pymtl3 import *
class RegisterFile( Component ):
def construct( s, Type, nregs=32, rd_ports=1, wr_ports=1,
const_zero=False ):
addr_type = mk_bits( max( 1, clog2( nregs ) ) )
s.raddr = [ InPort( addr_type ) for i in range( rd_ports ) ]
s.rdata = [ OutPort( Type ) for i in range( rd_ports ) ]
s.waddr = [ InPort( addr_type ) for i in range( wr_ports ) ]
s.wdata = [ InPort( Type ) for i in range( wr_ports ) ]
s.wen = [ InPort( Bits1 ) for i in range( wr_ports ) ]
s.regs = [ Wire( Type ) for i in range(nregs) ]
@update
def up_rf_read():
for i in range( rd_ports ):
s.rdata[i] @= s.regs[ s.raddr[i] ]
if const_zero:
@update_ff
def up_rf_write_constzero():
for i in range( wr_ports ):
if s.wen[i] & (s.waddr[i] != 0):
s.regs[ s.waddr[i] ] <<= s.wdata[i]
else:
@update_ff
def up_rf_write():
for i in range( wr_ports ):
if s.wen[i]:
s.regs[ s.waddr[i] ] <<= s.wdata[i]
|
140832
|
from typing import List, Dict
import numpy as np
import torch
from tqdm import tqdm
from utils.fov_expansion import Expander
from inversion.video.video_config import VideoConfig
from utils.common import tensor2im, get_identity_transform
def postprocess_and_smooth_inversions(results: Dict, net, opts: VideoConfig):
result_latents = np.array(list(results["result_latents"].values()))
# average fine layers
result_latents[:, 9:, :] = result_latents[:, 9:, :].mean(axis=0)
# smooth latents and landmarks transforms
smoothed_latents, smoothed_transforms = smooth_latents_and_transforms(result_latents,
results["landmarks_transforms"],
opts=opts)
# generate the smoothed video frames
result_images_smoothed = []
expander = Expander(G=net.decoder)
print("Generating smoothed frames...")
for latent, trans in tqdm(zip(smoothed_latents, smoothed_transforms)):
with torch.no_grad():
if trans is None:
trans = get_identity_transform()
im = expander.generate_expanded_image(ws=latent.unsqueeze(0),
landmark_t=trans.cpu().numpy(),
pixels_left=opts.expansion_amounts[0],
pixels_right=opts.expansion_amounts[1],
pixels_top=opts.expansion_amounts[2],
pixels_bottom=opts.expansion_amounts[3])
result_images_smoothed.append(np.array(tensor2im(im[0])))
return result_images_smoothed
def smooth_latents_and_transforms(result_latents: np.ndarray, result_landmarks_transforms: List[torch.tensor],
opts: VideoConfig):
smoothed_latents = smooth_ws(result_latents)
smoothed_latents = torch.from_numpy(smoothed_latents).float().cuda()
if opts.landmarks_transforms_path is not None:
smoothed_transforms = smooth_ws(torch.cat([t.unsqueeze(0) for t in result_landmarks_transforms]))
else:
smoothed_transforms = [None] * len(smoothed_latents)
return smoothed_latents, smoothed_transforms
def smooth_ws(ws: np.ndarray):
ws_p = ws[2:-2] + 0.75 * ws[3:-1] + 0.75 * ws[1:-3] + 0.25 * ws[:-4] + 0.25 * ws[4:]
ws_p = ws_p / 3
return ws_p
def smooth_s(s):
batched_s = {}
for c in s[0]:
bathced_c = torch.cat([s[i][c] for i in range(len(s))])
batched_s[c] = bathced_c
new_s = {}
for c in batched_s:
new_s[c] = smooth_ws(batched_s[c])
new_smooth_s = []
for i in range(new_s['input'].shape[0]):
curr_s = {c: new_s[c][i].unsqueeze(0) for c in new_s}
new_smooth_s.append(curr_s)
return new_smooth_s
|
140863
|
import FWCore.ParameterSet.Config as cms
#Tracks without extra and hits
#AOD content
RecoTrackerAOD = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep recoTracks_ctfWithMaterialTracksP5_*_*',
'keep recoTracks_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep recoTracks_rsWithMaterialTracksP5_*_*',
'keep recoTracks_cosmictrackfinderP5_*_*',
'keep recoTracks_beamhaloTracks_*_*',
'keep recoTracks_splittedTracksP5_*_*',
'keep recoTracks_ctfWithMaterialTracksP5Top_*_*',
'keep recoTracks_rsWithMaterialTracksP5Top_*_*',
'keep recoTracks_cosmictrackfinderP5Top_*_*',
'keep recoTracks_ctfWithMaterialTracksP5Bottom_*_*',
'keep recoTracks_rsWithMaterialTracksP5Bottom_*_*',
'keep recoTracks_cosmictrackfinderP5Bottom_*_*',
'keep recoTracks_regionalCosmicTracks_*_*',
'keep *_dedxHitInfo_*_*',
'keep *_dedxHarmonic2_*_*',
'keep *_dedxHitInfoCTF_*_*',
'keep *_dedxHarmonic2CTF_*_*',
'keep *_dedxHitInfoCosmicTF_*_*',
'keep *_dedxHarmonic2CosmicTF_*_*')
)
#RECO content
RecoTrackerRECO = cms.PSet(
outputCommands = cms.untracked.vstring(
'keep recoTrackExtras_ctfWithMaterialTracksP5_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5LHCNavigation_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5_*_*',
'keep recoTrackExtras_cosmictrackfinderP5_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5_*_*',
'keep recoTrackExtras_beamhaloTracks_*_*',
'keep TrackingRecHitsOwned_beamhaloTracks_*_*',
'keep recoTrackExtras_splittedTracksP5_*_*',
'keep TrackingRecHitsOwned_splittedTracksP5_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5Top_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5Top_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5Top_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5Top_*_*',
'keep recoTrackExtras_cosmictrackfinderP5Top_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5Top_*_*',
'keep recoTrackExtras_ctfWithMaterialTracksP5Bottom_*_*',
'keep TrackingRecHitsOwned_ctfWithMaterialTracksP5Bottom_*_*',
'keep recoTrackExtras_rsWithMaterialTracksP5Bottom_*_*',
'keep TrackingRecHitsOwned_rsWithMaterialTracksP5Bottom_*_*',
'keep recoTrackExtras_cosmictrackfinderP5Bottom_*_*',
'keep TrackingRecHitsOwned_cosmictrackfinderP5Bottom_*_*',
'keep recoTrackExtras_regionalCosmicTracks_*_*',
'keep TrackingRecHitsOwned_regionalCosmicTracks_*_*',
'keep *_dedxTruncated40_*_*',
'keep *_dedxTruncated40CTF_*_*',
'keep *_dedxTruncated40CosmicTF_*_*',
'keep recoTracks_cosmicDCTracks_*_*',
'keep recoTrackExtras_cosmicDCTracks_*_*',
'keep TrackingRecHitsOwned_cosmicDCTracks_*_*')
)
RecoTrackerRECO.outputCommands.extend(RecoTrackerAOD.outputCommands)
#Full Event content
RecoTrackerFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoTrackerFEVT.outputCommands.extend(RecoTrackerRECO.outputCommands)
|
140890
|
from django.conf import settings
from mapentity.registry import registry
from . import models
app_name = 'maintenance'
urlpatterns = registry.register(models.Intervention, menu=settings.INTERVENTION_MODEL_ENABLED)
urlpatterns += registry.register(models.Project, menu=settings.PROJECT_MODEL_ENABLED)
|
140901
|
import logging
from urllib.parse import urlparse
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import StatementReference, User, Statement, Issue
from dbas.input_validator import is_integer
from dbas.lib import get_profile_picture, get_enabled_arguments_as_query, \
get_enabled_premises_as_query
LOG = logging.getLogger(__name__)
def get_references_for_argument(uid, main_page):
"""
Returns all references for the premises group of given argument
:param uid: uid of the argument
:param main_page: current overview page
:return: dict
"""
LOG.debug("%s", uid)
if not is_integer(uid):
return {}, {}
db_arguments = get_enabled_arguments_as_query()
db_argument = db_arguments.filter_by(uid=uid).first()
if not db_argument:
return {}, {}
db_premises = get_enabled_premises_as_query()
db_premises = db_premises.filter_by(premisegroup_uid=db_argument.premisegroup_uid).all()
data = {}
text = {}
for premise in db_premises:
tmp_uid = premise.statement_uid
references_array = __get_references_for_statement(tmp_uid, main_page)[tmp_uid]
data[premise.statement_uid] = references_array
text[premise.statement_uid] = premise.get_text()
if db_argument.conclusion_uid is not None:
tmp_uid = db_argument.conclusion_uid
references_array = __get_references_for_statement(tmp_uid, main_page)[tmp_uid]
data[tmp_uid] = references_array
db_statement = DBDiscussionSession.query(Statement).get(tmp_uid)
text[tmp_uid] = db_statement.get_text()
else:
d, t = get_references_for_argument(db_argument.argument_uid, main_page)
data.update(d)
text.update(t)
return data, text
def get_references_for_statements(uids, main_page):
"""
Returns all references for the current given statements
:param uids: uids of the statement
:param main_page: current overview page
:return: dict
"""
data = {}
text = {}
for uid in uids:
references_array = __get_references_for_statement(uid, main_page)[uid]
data[uid] = references_array
db_statement = DBDiscussionSession.query(Statement).get(uid)
text[uid] = db_statement.get_text()
return data, text
def __get_references_for_statement(uid, main_page):
"""
Returns all references for the current given statement
:param uid: uid of the statement
:param main_page: current overview page
:return: dict
"""
LOG.debug("%s", uid)
db_references = DBDiscussionSession.query(StatementReference).filter_by(statement_uid=uid).all()
references_array = [__get_values_of_reference(ref, main_page) for ref in db_references]
return {uid: references_array}
def __get_values_of_reference(reference: StatementReference, main_page) -> dict:
"""
Creates dictionary with all values of the column
:param reference: Current database row
:param main_page: current overview page
:return: Dictionary with all columns
"""
user: User = DBDiscussionSession.query(User).get(int(reference.author_uid))
img_path: str = get_profile_picture(user, 20, True)
name: str = user.global_nickname
link: str = main_page + '/user/' + str(user.uid)
return {'uid': reference.uid,
'reference': reference.text,
'host': reference.host,
'path': reference.path,
'author': {'img': img_path,
'name': name,
'link': link},
'created': str(reference.created.humanize),
'statement_text': reference.get_statement_text()}
def set_reference(text: str, url: str, user: User, statement: Statement, issue: Issue) -> bool:
"""
Creates a new reference for a statement.
:param issue: The issue of the referenced statement.
:param text: Text of the reference.
:param url: The url for the reference.
:param user: User who referenced the statement.
:param statement: Statement which should be referenced.
:return: Boolean
"""
parsed_url: url = urlparse(url)
host: str = '{}://{}'.format(parsed_url.scheme, parsed_url.netloc)
path: str = '{}?{}'.format(parsed_url.path, parsed_url.query)
DBDiscussionSession.add(StatementReference(text, host, path, user, statement, issue))
DBDiscussionSession.flush()
return True
def get_references(uids, is_argument, application_url) -> dict:
"""
Returns references for an argument or statement.
:param uids: IDs of statements or arguments as list
:param is_argument: boolean if the ids are for arguments
:param application_url: url of the application
:rtype: dict
:return: prepared collection with error, data and text field
"""
if is_argument:
data, text = get_references_for_argument(uids, application_url)
else:
data, text = get_references_for_statements(uids, application_url)
return {
'data': data,
'text': text
}
|
140911
|
import torch
class AlphaFoldLRScheduler(torch.optim.lr_scheduler._LRScheduler):
""" Implements the learning rate schedule defined in the AlphaFold 2
supplement. A linear warmup is followed by a plateau at the maximum
learning rate and then exponential decay.
Note that the initial learning rate of the optimizer in question is
ignored; use this class' base_lr parameter to specify the starting
point of the warmup.
"""
def __init__(self,
optimizer,
last_epoch: int = -1,
verbose: bool = False,
base_lr: float = 0.,
max_lr: float = 0.001,
warmup_no_steps: int = 1000,
start_decay_after_n_steps: int = 50000,
decay_every_n_steps: int = 50000,
decay_factor: float = 0.95,
):
step_counts = {
"warmup_no_steps": warmup_no_steps,
"start_decay_after_n_steps": start_decay_after_n_steps,
}
for k,v in step_counts.items():
if(v < 0):
raise ValueError(f"{k} must be nonnegative")
if(warmup_no_steps > start_decay_after_n_steps):
raise ValueError(
"warmup_no_steps must not exceed start_decay_after_n_steps"
)
self.optimizer = optimizer
self.last_epoch = last_epoch
self.verbose = verbose
self.base_lr = base_lr
self.max_lr = max_lr
self.warmup_no_steps = warmup_no_steps
self.start_decay_after_n_steps = start_decay_after_n_steps
self.decay_every_n_steps = decay_every_n_steps
self.decay_factor = decay_factor
super(AlphaFoldLRScheduler, self).__init__(
optimizer,
last_epoch=last_epoch,
verbose=verbose,
)
def state_dict(self):
state_dict = {
k:v for k,v in self.__dict__.items() if k not in ["optimizer"]
}
return state_dict
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
if(not self._get_lr_called_within_step):
raise RuntimeError(
"To get the last learning rate computed by the scheduler, use "
"get_last_lr()"
)
step_no = self.last_epoch
if(step_no <= self.warmup_no_steps):
lr = self.base_lr + (step_no / self.warmup_no_steps) * self.max_lr
elif(step_no > self.start_decay_after_n_steps):
steps_since_decay = step_no - self.start_decay_after_n_steps
exp = (steps_since_decay // self.decay_every_n_steps) + 1
lr = self.max_lr * (self.decay_factor ** exp)
else: # plateau
lr = self.max_lr
return [lr for group in self.optimizer.param_groups]
|
140927
|
import time
import PyQt5.QtCore
from nn_sandbox.backend.algorithms import RbfnAlgorithm
from . import Bridge, BridgeProperty
from .observer import Observable
class RbfnBridge(Bridge):
ui_refresh_interval = BridgeProperty(0.0)
dataset_dict = BridgeProperty({})
training_dataset = BridgeProperty([])
testing_dataset = BridgeProperty([])
current_dataset_name = BridgeProperty('')
total_epoches = BridgeProperty(10)
most_correct_rate_checkbox = BridgeProperty(True)
most_correct_rate = BridgeProperty(0.98)
acceptable_range = BridgeProperty(0.5)
initial_learning_rate = BridgeProperty(0.8)
search_iteration_constant = BridgeProperty(10000)
cluster_count = BridgeProperty(3)
test_ratio = BridgeProperty(0.3)
current_iterations = BridgeProperty(0)
current_learning_rate = BridgeProperty(0.0)
best_correct_rate = BridgeProperty(0.0)
current_correct_rate = BridgeProperty(0.0)
test_correct_rate = BridgeProperty(0.0)
has_finished = BridgeProperty(True)
current_neurons = BridgeProperty([])
def __init__(self):
super().__init__()
self.rbfn_algorithm = None
@PyQt5.QtCore.pyqtSlot()
def start_rbfn_algorithm(self):
self.rbfn_algorithm = ObservableRbfnAlgorithm(
self,
self.ui_refresh_interval,
dataset=self.dataset_dict[self.current_dataset_name],
total_epoches=self.total_epoches,
most_correct_rate=self._most_correct_rate,
acceptable_range=self.acceptable_range,
initial_learning_rate=self.initial_learning_rate,
search_iteration_constant=self.search_iteration_constant,
cluster_count=self.cluster_count,
test_ratio=self.test_ratio
)
self.rbfn_algorithm.start()
@PyQt5.QtCore.pyqtSlot()
def stop_rbfn_algorithm(self):
self.rbfn_algorithm.stop()
@property
def _most_correct_rate(self):
if self.most_correct_rate_checkbox:
return self.most_correct_rate
return None
class ObservableRbfnAlgorithm(Observable, RbfnAlgorithm):
def __init__(self, observer, ui_refresh_interval, **kwargs):
self.has_initialized = False
Observable.__init__(self, observer)
RbfnAlgorithm.__init__(self, **kwargs)
self.has_initialized = True
self.ui_refresh_interval = ui_refresh_interval
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name == 'current_iterations' and self.has_initialized:
self.notify(name, value)
self.notify('current_neurons', [{
'mean': neuron.mean.tolist(),
'standard_deviation': float(neuron.standard_deviation),
'synaptic_weight': float(neuron.synaptic_weight)
} for neuron in self._neurons if not neuron.is_threshold])
self.notify('test_correct_rate', self.test())
if name in ('best_correct_rate', 'current_correct_rate'):
self.notify(name, value)
if name in ('training_dataset', 'testing_dataset') and value is not None:
self.notify(name, value.tolist())
def run(self):
self.notify('has_finished', False)
self.notify('test_correct_rate', 0)
super().run()
self.notify('current_neurons', [{
'mean': neuron.mean.tolist(),
'standard_deviation': float(neuron.standard_deviation),
'synaptic_weight': float(neuron.synaptic_weight)
} for neuron in self._neurons if not neuron.is_threshold])
self.notify('test_correct_rate', self.test())
self.notify('has_finished', True)
def _iterate(self):
super()._iterate()
# the following line keeps the GUI from blocking
time.sleep(self.ui_refresh_interval)
@property
def current_learning_rate(self):
ret = super().current_learning_rate
self.notify('current_learning_rate', ret)
return ret
|
140947
|
import tempfile
from pathlib import Path
from typing import List
import numpy as np
from .lib.indexing import IndexCreate
from .embed import SentenceEncoder, EncodeFile
from .lib.text_processing import Token, BPEfastApply
def lines_to_index(lang: str, lines: List, model_path: str, bpe_code_path: str, use_cpu: bool = False, batch_size: int = 32):
"""Suitable for small amounts of data."""
with tempfile.TemporaryDirectory() as tmpdirname:
target = str(Path(tmpdirname) / "source")
with open(target, "w") as fout:
fout.write("\n".join(lines))
return text_file_pipeline(
lang, target, model_path, bpe_code_path, use_cpu, returns="index", batch_size=batch_size
)
def lines_to_embeddings(lang: str, lines: List, model_path: str, bpe_code_path: str, use_cpu: bool = False, batch_size: int = 32):
"""Suitable for small amounts of data."""
with tempfile.TemporaryDirectory() as tmpdirname:
target = str(Path(tmpdirname) / "source")
with open(target, "w") as fout:
fout.write("\n".join(lines))
return text_file_pipeline(
lang, target, model_path, bpe_code_path, use_cpu, returns="embeddings", batch_size=batch_size
)
def text_file_pipeline(lang: str, input_path: str, model_path: str, bpe_code_path: str, use_cpu: bool, batch_size: int, returns="index"):
"""Suitable for small amounts of data."""
encoder = SentenceEncoder(
model_path,
max_sentences=batch_size,
max_tokens=10000,
cpu=use_cpu)
with tempfile.TemporaryDirectory() as tmpdirname:
tmpdir = Path(tmpdirname)
Token(
input_path,
str(tmpdir / "token"),
lang=lang,
romanize=False,
lower_case=True, gzip=False,
verbose=True)
BPEfastApply(
str(tmpdir / "token"),
str(tmpdir / "bpe"),
bpe_code_path,
verbose=True, over_write=True)
EncodeFile(
encoder,
str(tmpdir / "bpe"),
str(tmpdir / "enc"),
verbose=True, over_write=True)
if returns == "embeddings":
return np.fromfile(str(tmpdir / "enc"), dtype=np.float32, count=-1)
data, index = IndexCreate(
str(tmpdir / "enc"), 'FlatL2',
verbose=True, save_index=False)
return data, index
|
141034
|
from toga_cocoa.libs import NSObject, objc_method
class TogaData(NSObject):
@objc_method
def copyWithZone_(self):
# TogaData is used as an immutable reference to a row
# so the same object can be returned as a copy.
self.retain()
return self
|
141099
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def check_domain_availability(self, DomainName: str, IdnLangCode: str = None) -> Dict:
pass
def check_domain_transferability(self, DomainName: str, AuthCode: str = None) -> Dict:
pass
def delete_tags_for_domain(self, DomainName: str, TagsToDelete: List) -> Dict:
pass
def disable_domain_auto_renew(self, DomainName: str) -> Dict:
pass
def disable_domain_transfer_lock(self, DomainName: str) -> Dict:
pass
def enable_domain_auto_renew(self, DomainName: str) -> Dict:
pass
def enable_domain_transfer_lock(self, DomainName: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_contact_reachability_status(self, domainName: str = None) -> Dict:
pass
def get_domain_detail(self, DomainName: str) -> Dict:
pass
def get_domain_suggestions(self, DomainName: str, SuggestionCount: int, OnlyAvailable: bool) -> Dict:
pass
def get_operation_detail(self, OperationId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_domains(self, Marker: str = None, MaxItems: int = None) -> Dict:
pass
def list_operations(self, SubmittedSince: datetime = None, Marker: str = None, MaxItems: int = None) -> Dict:
pass
def list_tags_for_domain(self, DomainName: str) -> Dict:
pass
def register_domain(self, DomainName: str, DurationInYears: int, AdminContact: Dict, RegistrantContact: Dict, TechContact: Dict, IdnLangCode: str = None, AutoRenew: bool = None, PrivacyProtectAdminContact: bool = None, PrivacyProtectRegistrantContact: bool = None, PrivacyProtectTechContact: bool = None) -> Dict:
pass
def renew_domain(self, DomainName: str, CurrentExpiryYear: int, DurationInYears: int = None) -> Dict:
pass
def resend_contact_reachability_email(self, domainName: str = None) -> Dict:
pass
def retrieve_domain_auth_code(self, DomainName: str) -> Dict:
pass
def transfer_domain(self, DomainName: str, DurationInYears: int, AdminContact: Dict, RegistrantContact: Dict, TechContact: Dict, IdnLangCode: str = None, Nameservers: List = None, AuthCode: str = None, AutoRenew: bool = None, PrivacyProtectAdminContact: bool = None, PrivacyProtectRegistrantContact: bool = None, PrivacyProtectTechContact: bool = None) -> Dict:
pass
def update_domain_contact(self, DomainName: str, AdminContact: Dict = None, RegistrantContact: Dict = None, TechContact: Dict = None) -> Dict:
pass
def update_domain_contact_privacy(self, DomainName: str, AdminPrivacy: bool = None, RegistrantPrivacy: bool = None, TechPrivacy: bool = None) -> Dict:
pass
def update_domain_nameservers(self, DomainName: str, Nameservers: List, FIAuthKey: str = None) -> Dict:
pass
def update_tags_for_domain(self, DomainName: str, TagsToUpdate: List = None) -> Dict:
pass
def view_billing(self, Start: datetime = None, End: datetime = None, Marker: str = None, MaxItems: int = None) -> Dict:
pass
|
141131
|
import os
from abc import abstractmethod
from time import sleep
from typing import TYPE_CHECKING
from pyramid.httpexceptions import HTTPBadGateway
from weaver.formats import CONTENT_TYPE_APP_JSON
from weaver.utils import get_cookie_headers, get_settings, request_extra
from weaver.wps.utils import get_wps_output_dir, get_wps_output_url
if TYPE_CHECKING:
from typing import Dict
from pywps.app import WPSRequest
from weaver.typedefs import CWL_RuntimeInputsMap
class WpsProcessInterface(object):
"""
Common interface for WpsProcess to be used in ``CWL`` jobs.
"""
@abstractmethod
def execute(self,
workflow_inputs, # type: CWL_RuntimeInputsMap
out_dir, # type: str
expected_outputs, # type: Dict[str, str]
): # type: (...) -> None
"""
Execute a remote process using the given inputs.
The function is expected to monitor the process and update the status.
Retrieve the expected outputs and store them in the ``out_dir``.
:param workflow_inputs: `CWL` job dict
:param out_dir: directory where the outputs must be written
:param expected_outputs: expected value outputs as `{'id': 'value'}`
"""
raise NotImplementedError
def __init__(self, request):
# type: (WPSRequest) -> None
self.request = request
if self.request.http_request:
self.cookies = get_cookie_headers(self.request.http_request.headers)
else:
self.cookies = {}
self.headers = {"Accept": CONTENT_TYPE_APP_JSON, "Content-Type": CONTENT_TYPE_APP_JSON}
self.settings = get_settings()
def make_request(self, method, url, retry, status_code_mock=None, **kwargs):
response = request_extra(method, url=url, settings=self.settings,
headers=self.headers, cookies=self.cookies, **kwargs)
# TODO: Remove patch for Geomatys unreliable server
if response.status_code == HTTPBadGateway.code and retry:
sleep(10)
response = self.make_request(method, url, False, **kwargs)
if response.status_code == HTTPBadGateway.code and status_code_mock:
response.status_code = status_code_mock
return response
def host_file(self, file_name):
weaver_output_url = get_wps_output_url(self.settings)
weaver_output_dir = get_wps_output_dir(self.settings)
file_name = os.path.realpath(file_name.replace("file://", "")) # in case CWL->WPS outputs link was made
if not file_name.startswith(weaver_output_dir):
raise Exception("Cannot host files outside of the output path : {0}".format(file_name))
return file_name.replace(weaver_output_dir, weaver_output_url)
|
141132
|
import os
import cv2
import numpy as np
def convert(size, box):
'''
convert (xmin, ymin, xmax, ymax) to (cx/w, cy/h, bw/w, bw/h)
param:
size: tuple (im_width, im_height)
box: list [xmin, ymin, xmax, ymax]
return:
tuple (cx/w, cy/h, bw/w, bw/h)
'''
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[2]) / 2.0
y = (box[1] + box[3]) / 2.0
w = box[2] - box[0]
h = box[3] - box[1]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return [x,y,w,h]
def gen_train_list(imgs_file, target_file):
tf = open(target_file, 'w')
imgs_f = open(imgs_file, 'r')
imgs = imgs_f.readlines()
for i in imgs:
boxes = np.array([float(j) for j in i.strip().split(' ')[1:]]).reshape(-1,4)
box_list = []
for b in boxes:
bb = convert((960.0, 960.0), b) + [0.0]
box_list.append(bb)
box_string = ' '.join([str(j)[:6] for j in np.array(box_list).reshape(-1,)])
img_label = i.strip().split(' ')[0] + ' ' + box_string
tf.write(img_label + '\n')
if __name__ == '__main__':
imgs_file = '../../data/img_list/img_label_list.txt'
target_file = '../../data/train_list/train_list.txt'
gen_train_list(imgs_file, target_file)
imgs_file = '../../data/img_list/img_label_list_cv4aug4.txt'
target_file = '../../data/train_list/train_list_cv4aug4.txt'
gen_train_list(imgs_file, target_file)
|
141152
|
import torch
def reference_orgqr(v, tau, checks=True):
"""
Unoptimized differentiable implementation of ORGQR.
Memory consumption linear in matrix rank.
Intended for use only within the test suite.
"""
if checks:
assert torch.is_tensor(v) and torch.is_tensor(tau)
assert v.dim() == 2 and tau.dim() == 1
assert v.shape[0] >= v.shape[1]
assert v.shape[1] == tau.numel()
assert v.device == tau.device
out = torch.eye(*v.shape, dtype=v.dtype, device=v.device)
for i in reversed(range(tau.numel())):
t = tau[i]
u = v[:, i].unsqueeze(1)
out = out - (t * u.T).mm(out) * u
return out
def reference_orgqr_batch(v, tau, checks=True):
"""
Unoptimized differentiable implementation of batched ORGQR.
Memory consumption linear in matrix rank.
Intended for use only within the test suite.
"""
if checks:
assert torch.is_tensor(v) and torch.is_tensor(tau)
assert v.dim() == 3 and tau.dim() == 2
assert v.shape[1] >= v.shape[2]
assert v.shape[2] == tau.shape[1]
assert v.device == tau.device
out = torch.eye(*v.shape[1:], dtype=v.dtype, device=v.device).unsqueeze(0).repeat(v.shape[0], 1, 1)
for i in reversed(range(tau.shape[1])):
t = tau[:, i].view(-1, 1, 1)
u = v[:, :, i].unsqueeze(2)
out = out - (t * u).permute(0, 2, 1).bmm(out) * u
return out
def pytorch_orgqr_with_roundtrip(v, tau, checks=True):
if checks:
assert torch.is_tensor(v) and torch.is_tensor(tau)
assert v.dim() == 2 and tau.dim() == 1
assert v.shape[0] >= v.shape[1]
assert v.shape[1] == tau.numel()
assert v.device == tau.device
if v.device == 'cpu':
return torch.orgqr(v, tau)
device = v.device
v, tau = v.cpu(), tau.cpu()
out = torch.orgqr(v, tau).to(device)
return out
|
141167
|
import hooks
import logging
from rpc.jsonrpc import Dsuccess, Derror
log = logging.getLogger('social.network')
from util import try_this, Storage
from common import AccountBase, profile, UpdateMixin, FromNetMixin, pref
class SocialNetwork(UpdateMixin, AccountBase, FromNetMixin):
filters = {}
header_funcs = []
timer = Null
def __init__(self, enabled = True, **options):
AccountBase.__init__(self, **options)
UpdateMixin.__init__(self, **options)
FromNetMixin.__init__(self, **options)
self.enabled = enabled
self._dirty_error = True # The next error is new
@property
def dirty(self):
return self._dirty
@property
def display_name(self):
return try_this(lambda: getattr(self, pref('social.display_attr')), self.username)
def _decryptedpw(self):
return profile.plain_pw(self.password)
def update_info(self, **info):
force = info.pop('force', None)
self._dirty_error = True
for k, v in info.iteritems():
setattr(self, k, v)
self.notify()
# if self.OFFLINE and self.enabled:
# self.update_now()
# Tell the server.
profile.update_account(self, force = force)
def get_options(self):
try:
get_opts = super(SocialNetwork, self).get_options
except AttributeError:
opts = {}
else:
opts = get_opts()
#updatefreq is not user settable, so we don't need to store it
opts.pop('updatefreq', None)
return opts
@property
def icon(self):
from gui import skin
from util import try_this
return try_this(lambda: skin.get('serviceicons.%s' % self.protocol), None)
def error_link(self):
reason = self.Reasons
if self.protocol_info().get('needs_password', True):
bplinkref = (_('Edit Account'), lambda *a: profile.account_manager.edit(self, True))
else:
bplinkref =(_('Retry'), lambda *a: self.Connect())
linkref = {
reason.BAD_PASSWORD : <PASSWORD>,
reason.CONN_FAIL : (_('Retry'), lambda *a: self.Connect()),
reason.OTHER_USER : (_('Reconnect'), lambda *a: self.update_now()),
reason.CONN_LOST : (_('Retry'), lambda *a: self.update_now()),
reason.WILL_RECONNECT : (_('Retry'), lambda *a: self.update_now()),
reason.NONE : None,
}
if self.offline_reason in linkref:
return linkref[self.offline_reason]
else:
log.debug('Couldn\'t find offline reason %r in linkref dictionary. Returning None for error_link',
self.offline_reason)
return None
@property
def service(self):
raise NotImplementedError
@property
def protocol(self):
raise NotImplementedError
def Connect(self, *a, **k):
raise NotImplementedError
# self.change_state(self.Statuses.ONLINE)
def Disconnect(self, *a, **k):
raise NotImplementedError
# self.change_state(self.Statuses.OFFLINE)
# disconnect = Disconnect
def disconnect(self, *a, **k):
raise NotImplementedError
def observe_count(self,callback):
return NotImplemented
#self.add_gui_observer(callback, 'count')
def observe_state(self, callback):
return NotImplemented
#self.add_gui_observer(callback, 'enabled')
def unobserve_count(self,callback):
return NotImplemented
#self.remove_gui_observer(callback, 'count')
def unobserve_state(self,callback):
return NotImplemented
#self.remove_gui_observer(callback)
import weakref
weak_socialfeeds = weakref.WeakValueDictionary()
def on_dirty(ctx):
try:
feed = weak_socialfeeds[ctx]
except KeyError:
log.warning('SocialFeed marked dirty but not in weak dictionary: %r', ctx)
else:
feed.set_dirty()
hooks.register('social.feed.mark_dirty', on_dirty)
class SocialFeed(object):
'''
allows plugins to use social.feed.* hooks to inject things into social feeds
'''
def __init__(self, id_, feed_context, get_feed_items, render_items, set_dirty=None):
assert hasattr(render_items, '__call__')
assert hasattr(get_feed_items, '__call__')
self.id = id_ # globally unique, i.e. account_id + name + subtype. Must be hashable
self.context = feed_context # for use by whatever is creating the SocialFeed
self.get_feed_items = get_feed_items
self.render_items = render_items
self.iterators = {}
hooks.notify('social.feed.created', self.id)
self.set_dirty_cb = set_dirty
weak_socialfeeds[self.id] = self
def set_dirty(self):
if self.set_dirty_cb is not None:
self.set_dirty_cb()
else:
log.warning('%r dirty hook called, but has no callback', self)
def get_iterator(self):
iterator_info = Storage(id=self.id,
context=self.context,
iterator=self.get_feed_items())
# allow plugins to wrap/transform the generator
return hooks.reduce('social.feed.iterator', iterator_info).iterator
def new_ids(self, ids):
hooks.notify('social.feed.updated', self.id, ids)
def jscall_initialize_feed(self, webview, _id):
self.iterators.pop(webview, None)
def jscall_next_item(self, webview, id):
try:
it = self.iterators[webview]
except KeyError:
it = self.iterators[webview] = self.get_iterator()
try:
item = it.next()
except StopIteration:
Derror(id, webview)
else:
Dsuccess(id, webview, html=self.render_items([item], self.context))
|
141173
|
import boto3
import pandas as pd
import numpy as np
# user analysis
def get_user_log():
s3 = boto3.resource('s3', region_name='us-east-2')
bucket = s3.Bucket('nlprankings')
logs = pd.DataFrame(columns=['datetime','IP','startYear','endYear','num_uni','num_author','CL','TACL','ACL_C','NAACL_C','EMNLP_C',
'CoNLL_C','EACL_C','COLING','IJCNLP','WKSPDEMO'])
access_num = 0
for obj in bucket.objects.filter(Prefix="log/"):
if '.txt' in obj.key:
datetime = obj.key[4:-4]
body = obj.get()['Body'].read()
log_info = body.decode("utf-8").split(',')
if len(log_info) > 14:
s = pd.Series([datetime] + log_info, index=logs.columns)
logs = logs.append(s, ignore_index=True)
access_num += 1
print(access_num)
logs.to_csv('../dat/log_info.csv')
def user_analysis(logs):
logs['date'] = logs.datetime.apply(lambda x: x.split(' ')[0])
print(len(logs)) # num of accesses
# print(logs.date.nunique()) # num of dates
# print(logs.IP.nunique()) # num of uniq IP
#
# # timeframe checked
# print(logs.groupby(['startYear', 'endYear']).size()) # .reset_index(name='pubCounts')
#
# # weights
# for l in logs.groupby(['CL','TACL','ACL_C','NAACL_C','EMNLP_C','CoNLL_C',
# 'EACL_C','COLING','IJCNLP','WKSPDEMO']).size().reset_index(name='Counts').values.tolist():
# print(l)
uniq_access = logs.groupby(['IP', 'date']).size().reset_index(name='count')
re_access = uniq_access.groupby('IP').size().reset_index(name='count')
re_access = re_access[re_access['count'] <= 15]
print(re_access)
re_access_count = re_access.groupby('count').size().reset_index(name='users')
re_access_count['percent'] = re_access_count.users.apply(lambda x: x/1219)
print(re_access_count)
plt.figure(figsize=(15,10))
plt.hist(re_access['count'], bins=15)
plt.ylabel('count', fontsize=12)
plt.xlabel('number of re-visits', fontsize=12)
plt.show()
def get_dataset(df, CL, TACL, ACL_C, NAACL_C, EMNLP_C, CoNLL_C, EACL_C, COLING, IJCNLP, WKSPDEMO):
def get_score(venue, type, numAuthor):
score = {'journal': 3, 'conference': 3, 'workshop': 1, 'demonstration': 1}
if venue == 'CL':
venue_score = CL
elif venue == 'TACL':
venue_score = TACL
elif venue == 'ACL' and type == 'conference':
venue_score = ACL_C
elif venue == 'NAACL' and type == 'conference':
venue_score = NAACL_C
elif venue == 'EMNLP' and type == 'conference':
venue_score = EMNLP_C
elif venue == 'CoNLL' and type == 'conference':
venue_score = CoNLL_C
elif venue == 'EACL' and type == 'conference':
venue_score = EACL_C
elif venue == 'COLING':
venue_score = COLING
elif venue == 'IJCNLP':
venue_score = IJCNLP
elif venue in ['workshop', 'demonstration']:
venue_score = WKSPDEMO
else:
venue_score = score[type]
return 1 / numAuthor * venue_score
df['score'] = df.apply(lambda x: get_score(x.venue, x.type, x.numAuthor), axis=1)
return df
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
import matplotlib.pyplot as plt
def university_trend_clustering(df):
trend_df = df.groupby(['university', 'year'])['score'].sum().reset_index()
trend_df = trend_df.pivot(index='university',columns='year',values='score')
trend_df = trend_df.fillna(0)
trend_df.columns.name = None
Z = linkage(trend_df, 'ward')
plt.figure(figsize=(40,60))
plt.xlabel('distance')
dendrogram(Z, orientation='right', labels=trend_df.index, leaf_font_size=12)
plt.show()
# k=3
# clusters = fcluster(Z, k, criterion='maxclust')
# clusters = list(clusters)
#
#
# trend_df['total'] = trend_df.sum(axis=1)
# trend_df['rank'] = trend_df['total'].rank(ascending=False)
# trend_df['cluster'] = clusters
# print(trend_df)
# print(trend_df[(trend_df['cluster'] == 1) & (trend_df['rank'] < 30)]['rank'])
#
# print(clusters.count(1))
# c1 = ['University of Maryland', 'University of California, Berkeley',
# 'University of Illinois at Urbana-Champaign', 'Information Sciences Institute']
#
# pub2016 = df[(df['university'].isin(c1)) & (df['year'] == 2016) & (~df['type'].isin(['workshop', 'demonstration']))]
#
#
#
# for p in pub2016.values.tolist():
# # print(p)
# with open('../dat/acl_anthology/json/'+p[4]+'.json') as f:
# pub = pd.read_json(f).set_index('id')
# records = pub.to_dict()
# print(p[-2],records['title'][p[3]])
# trend_df = trend_df.reset_index()
#
# c1_df = trend_df[trend_df['university'].isin(c1)]
#
# c1_df = c1_df.drop(columns=['total', 'rank', 'cluster'])
# c1_df = c1_df.set_index('university').T
# c1_df = c1_df.reset_index()
#
# # print(c1_df.columns)
#
# palette = plt.get_cmap('Set1')
#
# num = 0
# plt.figure()
# for column in c1_df.drop(columns=['index'], axis=1):
# num += 1
# plt.plot(c1_df['index'], c1_df[column], marker='', color=palette(num), linewidth=1, alpha=0.9, label=column)
#
# # Add legend
# plt.legend(ncol=2, loc='upper center', bbox_to_anchor=(0.5, -0.15))
#
# # Add titles
# plt.xlabel("Year")
# plt.ylabel("Score")
# plt.xticks(np.arange(2010,2020))
# plt.show()
return
def find_venue(pub_id):
if 'W' in pub_id or 'D19-5' in pub_id or 'D19-6' in pub_id:
return pub_id[:-2]
else:
return pub_id[:-3]
from matplotlib.ticker import MaxNLocator
def university_attended(df, k):
df = df.drop(columns=['Unnamed: 0'])
df_author = df.groupby('authorID')['score'].sum().reset_index()
df_author = df_author.sort_values(by='score', ascending=False)
df_author = df_author.head(n=k)
topk_authors = df_author['authorID'].tolist()
df = df[df['authorID'].isin(topk_authors)]
df = df.groupby(['authorID', 'university']).size().reset_index(name='count')
df = df.groupby('university').size().reset_index(name='count')
df = df.sort_values(by='count', ascending=False).reset_index()
df = df.drop(columns=['index'])
print(df)
plt.figure(figsize=(20,10))
height = df['count']
bars = df['university']
y_pos = np.arange(len(bars))
plt.barh(y_pos, height)
# Create names on the y-axis
plt.yticks(y_pos, bars)
plt.xlabel('Count')
plt.gca().invert_yaxis()
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
# Show graphic
plt.show()
return
def ranking_overtime(df, k):
topk_u = df.groupby('university')['score'].sum().reset_index()
topk_u = topk_u.sort_values(by='score', ascending=False)
topk_u = topk_u.head(n=k).reset_index()
top = topk_u.university.tolist()
df = df.groupby(['university', 'year'])['score'].sum().reset_index()
df = df.pivot(index='university', columns='year', values='score')
df = df.fillna(0)
df.columns.name = None
df = df.reset_index()
df['total'] = df.sum(axis=1)
for year in range(2010,2020):
df[str(year) + '_Rank'] = df[year].rank(ascending=False)
print(df)
df = df[df['university'].isin(top)]
df = df.drop(columns=list(range(2010,2020)))
df = df.sort_values(by='total', ascending=False)
df = df.drop(columns=['total'])
df.loc[:, df.columns != 'university'] = df.loc[:, df.columns != 'university'].astype(int)
df['2010-2019'] = df['2010_Rank'] - df['2019_Rank']
for d in df.values.tolist():
print(d)
print(df['2010-2019'].mean())
print(sum(sum([df['2010-2019'] < 0])))
print(sum(sum([df['2010-2019'] > 0])))
print(sum(sum([df['2010-2019'] == 0])))
def university_ranking(df):
print(df.columns)
df = df.groupby('university').agg({'authorID': 'nunique', 'score': 'sum'}).reset_index()
df = df.sort_values(by='score', ascending=False)
df = df.head(n=50)
df = df.round(2)
print(df)
def wc_index(df):
top_nlp = df.groupby('authorID')['score'].sum().reset_index()
top_nlp = top_nlp.sort_values(by='score', ascending=False)
top_nlp = top_nlp.head(n=30).reset_index()
# print(top_nlp['authorID'])
df['index_count'] = df['score'] >= 1
df['index_count'] = df['index_count'].astype(int)
df = df[df['authorID'].isin(top_nlp['authorID'])]
df = df.groupby(['authorID', 'year'])['index_count'].sum().reset_index()
df = df[df['year'] >= 2015]
df = df.pivot(index='authorID',columns='year',values='index_count')
print(df)
return
if __name__ == '__main__':
# get_user_log()
# logs = pd.read_csv('../dat/log_info.csv')
# user_analysis(logs)
df = pd.read_csv('../dat/graph_data.csv')
df = get_dataset(df, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1)
# university_trend_clustering(df)
# university_attended(df, 100) # by top k authors
# ranking_overtime(df, 50)
# university_ranking(df)
wc_index(df)
|
141193
|
import tensorflow as tf
import argparse
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="")
return graph
if __name__ == '__main__':
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
# parser.add_argument("--frozen_model_filename", default="/home/scott/Downloads/ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb", type=str, help="Frozen model file to import")
parser.add_argument("--frozen_model_filename", default="/home/scott/Downloads/faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017/frozen_inference_graph.pb", type=str, help="Frozen model file to import")
args = parser.parse_args()
# We use our "load_graph" function
graph = load_graph(args.frozen_model_filename)
# We can verify that we can access the list of operations in the graph
for op in graph.get_operations():
print(op.name)
# prefix/Placeholder/inputs_placeholder
# ...
# prefix/Accuracy/predictions
# y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')
|
141207
|
import argparse
import warnings
from pytorch_lightning.models.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from test_tube import Experiment
import models
warnings.filterwarnings('ignore')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', choices=['srcnn', 'srgan'], required=True)
parser.add_argument('--scale_factor', type=int, default=4)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--patch_size', type=int, default=96)
parser.add_argument('--gpus', type=str, default='0')
opt = parser.parse_args()
# load model class
if opt.model == 'srcnn':
Model = models.SRCNNModel
elif opt.model == 'srgan':
Model = models.SRGANModel
# add model specific arguments to original parser
parser = Model.add_model_specific_args(parser)
opt = parser.parse_args()
# instantiate experiment
exp = Experiment(save_dir=f'./logs/{opt.model}')
exp.argparse(opt)
model = Model(opt)
# define callbacks
checkpoint_callback = ModelCheckpoint(
filepath=exp.get_media_path(exp.name, exp.version),
)
# instantiate trainer
trainer = Trainer(
experiment=exp,
max_nb_epochs=4000,
add_log_row_interval=50,
check_val_every_n_epoch=10,
checkpoint_callback=checkpoint_callback,
gpus=[int(i) for i in opt.gpus.split(',')]
)
# start training!
trainer.fit(model)
if __name__ == "__main__":
main()
|
141223
|
import numpy as np
########################## method used - "1env_1jet", or "1env_njet" ##################################################
method='1env_1jet'
########################## nature of neural network - "mlp", "mlp_shared" or "cnn" ##################################################
policy_name='mlp'
########################## architecture of NN - default = [128, 64, 64] ##################################################
## this should be like this: [nb of neurons in first layer, nb of neurons in second layer, ...]
arch=[128, 64, 64]
############################################### about the simulation ##############################################################
###### if you change these params, next time you launch a training, set the following param to 1 ############
new_params = 0 ### will recalculate the initial system_state with the new params #TODO - make that automatic
L = 300.0 # length in mm - default: 300
C = 1e-4 # default: 1e⁻4
dx = 10e-2 # default: 1e⁻1
dt = C / dx # C / dx - default value: 1e-3
NUM = int(L/dx) # number of points in numerical resolution
simulation_step_time = 5.0e-2 # time in s of a step of the simulation - default: 5e-2
n_step = int(simulation_step_time / dt) # number of times the numerical scheme will be applied in a single step
# it will affect how much time passes between each step of the environment (= dt*n_step)
simulation_time=20 # time in s of an episode
initial_waiting_time = 200 # the number of s before saving the system state that will serve as the initial system state for all trainings to come
initial_waiting_n_step = int(initial_waiting_time/simulation_step_time)
########################################################################################################################################
############################################### about the environment ##############################################################
nb_timestep_per_simulation = int(simulation_time/simulation_step_time) # number of steps per epoch
# nb_total_epoch = 10000 # total number of episodes
# save_every_n_epoch = 100 # the model is saved every n episodes
total_nb_timesteps = int(3e5)
nb_saves_per_training = 10
nb_epoch = total_nb_timesteps // nb_timestep_per_simulation
save_every_n_epoch = (nb_epoch-1) // nb_saves_per_training
n_cpu=1
threshold_hq = 5 # max value in obs, to not give too high inputs to the nn
#################### about the jets ##################
n_jets=10
JET_MAX_POWER=5
JET_WIDTH_mm=5.0
space_between_jets=10
position_first_jet=150
JET_WIDTH = int(JET_WIDTH_mm/dx)
jets_position = np.array(
[position_first_jet + space_between_jets*i for i in range(n_jets)]) # in mm
jets_position = np.array(jets_position/dx, dtype="int")
# we can add perturbation jets to challenge a policy that would adapted only to the case of a an unpertubated simulation
perturbation_jets_position=[]
perturbation_jets_power = JET_MAX_POWER
#################### about the obs/reward ###################
cut_obs_before_jet = 1.0 # to change the size of the jet without changing the position of its left extremity - default: 1
size_obs=20
size_obs_to_reward=20
reward_param = 5.66 # chosen so that a no jet policy gives a reward of ~0
obs_param = 1
nan_punition=-500.0 # reward given when the simulation collapses
true_reset_every_n_episodes = False
############################################################################################################################
########################################## about rendering ##################################################################
render = False
MAX_TIMEFRAME_CONTROL_PLOT = 64 # Max number of points to plot the control+h/time
MAX_TIMEFRAME_FULL_CONTROL_PLOT = 48
POSITION_JET_PLOT = 0.5 # Where to plot the jets and sensors
POSITION_REWARD_SPOTS = 0.4
POSITION_CONTROL_SPOTS = 0.6
N_PLOTS = 3 # nb of plots
show_control = False
# It's where we start the plotting (we're most interested in the zone where waves are already fully formed)
start_h_plot = 0
RENDER_PERIOD=1
SAVE_PERIOD = 1000
obs_at_jet_render_param = 4.0
reward_multiplier_render = 1
########################################################################################################################################
########################################## parameters that should not be changed ###################################################
obs_h, obs_q = True, True
delta = 0.1
noise_mag = 1e-4
hq_base_value = 1.0
max_h = 1 # Important for plot and obs space
max_q=3
normalize_value_q=1
normalize_value_h=1
########################################################################################################################################
# misc
tensorboard_integration = True
monitor_reward = True
is_dummy_vec_env = False
|
141229
|
import numpy as np
from sklearn.preprocessing import Imputer
# Represent the unknown value by np.nan in numpy
data_origin = [[30, 100],
[20, 50],
[35, np.nan],
[25, 80],
[30, 70],
[40, 60]]
# Imputation with the mean value
imp_mean = Imputer(missing_values='NaN', strategy='mean')
imp_mean.fit(data_origin)
data_mean_imp = imp_mean.transform(data_origin)
print(data_mean_imp)
# Imputation with the median value
imp_median = Imputer(missing_values='NaN', strategy='median')
imp_median.fit(data_origin)
data_median_imp = imp_median.transform(data_origin)
print(data_median_imp)
# New samples
new = [[20, np.nan],
[30, np.nan],
[np.nan, 70],
[np.nan, np.nan]]
new_mean_imp = imp_mean.transform(new)
print(new_mean_imp)
# Effects of discarding missing values and imputation
from sklearn import datasets
dataset = datasets.load_diabetes()
X_full, y = dataset.data, dataset.target
# Simulate a corrupted data set by adding 25% missing values
m, n = X_full.shape
m_missing = int(m * 0.25)
print(m, m_missing)
# Randomly select m_missing samples
np.random.seed(42)
missing_samples = np.array([True] * m_missing + [False] * (m - m_missing))
np.random.shuffle(missing_samples)
# For each missing sample, randomly select 1 out of n features
missing_features = np.random.randint(low=0, high=n, size=m_missing)
# Represent missing values by nan
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = np.nan
# Discard samples containing missing values
X_rm_missing = X_missing[~missing_samples, :]
y_rm_missing = y[~missing_samples]
# Estimate R^2 on the data set with missing samples removed
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
score_rm_missing = cross_val_score(regressor, X_rm_missing, y_rm_missing).mean()
print('Score with the data set with missing samples removed: {0:.2f}'.format(score_rm_missing))
# Imputation with mean value
imp_mean = Imputer(missing_values='NaN', strategy='mean')
X_mean_imp = imp_mean.fit_transform(X_missing)
# Estimate R^2 on the data set with missing samples removed
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean()
print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp))
# Estimate R^2 on the full data set
regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=500)
score_full = cross_val_score(regressor, X_full, y).mean()
print('Score with the full data set: {0:.2f}'.format(score_full))
# # Imputation with median value
# imp_mean = Imputer(missing_values='NaN', strategy='median')
# X_mean_imp = imp_mean.fit_transform(X_missing)
# # Estimate R^2 on the data set with missing samples removed
# regressor = RandomForestRegressor(random_state=42, max_depth=10, n_estimators=100)
# score_mean_imp = cross_val_score(regressor, X_mean_imp, y).mean()
# print('Score with the data set with missing values replaced by mean: {0:.2f}'.format(score_mean_imp))
|
141231
|
from pprint import pprint
from deutschland import autobahn
from deutschland.autobahn.api import default_api
autobahn_api_instance = default_api.DefaultApi()
try:
# Auflistung aller Autobahnen
api_response = autobahn_api_instance.list_autobahnen()
pprint(api_response)
# Details zu einer Ladestation
station_id = "RUxFQ1RSSUNfQ0hBUkdJTkdfU1RBVElPTl9fMTczMzM=" # str |
api_response = autobahn_api_instance.get_charging_station(station_id)
pprint(api_response)
except autobahn.ApiException as e:
print("Exception when calling DefaultApi->get_charging_station: %s\n" % e)
|
141233
|
import torch
from manopth.demo import generate_random_hand
def test_generate_random_hand():
batch_size = 3
hand_info = generate_random_hand(batch_size=batch_size, ncomps=6)
verts = hand_info['verts']
joints = hand_info['joints']
assert verts.shape == (batch_size, 778, 3)
assert joints.shape == (batch_size, 21, 3)
|
141247
|
add_library('opencv_processing')
src = loadImage("test.jpg")
size(src.width, src.height, P2D)
opencv = OpenCV(this, src)
opencv.findCannyEdges(20, 75)
canny = opencv.getSnapshot()
opencv.loadImage(src)
opencv.findScharrEdges(OpenCV.HORIZONTAL)
scharr = opencv.getSnapshot()
opencv.loadImage(src)
opencv.findSobelEdges(1, 0)
sobel = opencv.getSnapshot()
with pushMatrix():
scale(0.5)
image(src, 0, 0)
image(canny, src.width, 0)
image(scharr, 0, src.height)
image(sobel, src.width, src.height)
text("Source", 10, 25)
text("Canny", src.width / 2 + 10, 25)
text("Scharr", 10, src.height / 2 + 25)
text("Sobel", src.width / 2 + 10, src.height / 2 + 25)
|
141279
|
mx = 0
for i in xrange(1, 100):
j = 1
while True:
num = i * j
s = str(num)
if s.count('1') + s.count('0') == len(s):
mx = max(mx, j)
break
j += 1
print mx
|
141297
|
import inspect
import os
from pathlib import Path
from typing import Any, Dict, List, cast
import schema_salad.metaschema as cg_metaschema
from schema_salad import codegen
from schema_salad.avro.schema import Names
from schema_salad.schema import load_schema
from .test_java_codegen import cwl_file_uri, metaschema_file_uri
def test_cwl_gen(tmp_path: Path) -> None:
src_target = tmp_path / "src.py"
python_codegen(cwl_file_uri, src_target)
assert os.path.exists(src_target)
with open(src_target) as f:
assert "class Workflow(Process)" in f.read()
def test_meta_schema_gen(tmp_path: Path) -> None:
src_target = tmp_path / "src.py"
python_codegen(metaschema_file_uri, src_target)
assert os.path.exists(src_target)
with open(src_target) as f:
assert "class RecordSchema(Savable):" in f.read()
def test_meta_schema_gen_up_to_date(tmp_path: Path) -> None:
src_target = tmp_path / "src.py"
python_codegen(metaschema_file_uri, src_target)
assert os.path.exists(src_target)
with open(src_target) as f:
assert f.read() == inspect.getsource(cg_metaschema)
def python_codegen(file_uri: str, target: Path) -> None:
document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
file_uri
)
assert isinstance(avsc_names, Names)
schema_raw_doc = metaschema_loader.fetch(file_uri)
schema_doc, schema_metadata = metaschema_loader.resolve_all(
schema_raw_doc, file_uri
)
codegen.codegen(
"python",
cast(List[Dict[str, Any]], schema_doc),
schema_metadata,
document_loader,
target=str(target),
)
|
141319
|
import numpy as np
import tensorflow as tf
import scipy
import lib_gcnn.graph as graph
class GraphCNN(object):
"""
A graph CNN for text classification. Composed of graph convolutional + max-pooling layer(s) and a
softmax layer.
filter_name = Filter name (i.e. "chebyshev", "spline", "fourier")
L = List of graph Laplacians.
K = List of filter sizes i.e. support sizes (no. of hops)
(Polynomial orders for Chebyshev; K[i] = L[i].shape[0] for non-param Fourier)
F = List of no. of features (per filter).
P = List of pooling sizes (per filter).
FC = List of fully-connected layers.
Paper for Chebyshev Filter: https://arxiv.org/abs/1606.09375
Paper for Spline Filter: https://arxiv.org/abs/1312.6203
Code adapted from https://github.com/mdeff/cnn_graph
"""
def __init__(self, filter_name, L, K, F, P, FC, batch_size, num_vertices, num_classes, l2_reg_lambda):
# Sanity checks
assert len(L) >= len(F) == len(K) == len(P) # verify consistency w.r.t. the no. of GCLs
assert np.all(np.array(P) >= 1) # all pool sizes >= 1
p_log2 = np.where(np.array(P) > 1, np.log2(P), 0)
assert np.all(np.mod(p_log2, 1) == 0) # all pool sizes > 1 should be powers of 2
assert len(L) >= 1 + np.sum(p_log2) # enough coarsening levels for pool sizes
# Retrieve convolutional filter
assert filter_name == "chebyshev" or filter_name == "spline" or filter_name == "fourier"
self.graph_conv = getattr(self, "graph_conv_" + filter_name)
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.float32, [batch_size, num_vertices], name="input_x")
self.input_y = tf.placeholder(tf.int32, [batch_size], name="input_y")
self.train_flag = tf.placeholder(tf.bool, name="train_flag")
self.dropout_keep_prob = tf.placeholder_with_default(1.0, shape=[], name="dropout_keep_prob")
# Keeping track of L2 regularization loss
l2_loss = tf.constant(0.0)
# Keep the useful Laplacians only (may be zero)
M_0 = L[0].shape[0]
j = 0
L_tmp = []
for p_i in P:
L_tmp.append(L[j])
j += int(np.log2(p_i)) if p_i > 1 else 0
L = L_tmp
# Expand dims for convolution operation
x = tf.expand_dims(self.input_x, 2) # B x V x F=1
# Graph convolution + max-pool layer(s)
for i in range(len(K)):
with tf.variable_scope("conv-maxpool-{}".format(i)):
with tf.variable_scope("conv-{}-{}".format(K[i], F[i])):
# Graph convolution operation
x = self.graph_conv(x, L[i], K[i], F[i])
# Add bias & apply non-linearity
b = tf.Variable(tf.constant(0.1, shape=[1, 1, F[i]]), name="b")
x = tf.nn.relu(x + b, name="relu")
with tf.variable_scope("maxpool-{}".format(P[i])):
# Graph max-pooling operation
x = self.graph_max_pool(x, P[i])
# Add dropout
with tf.variable_scope("dropout"):
x = tf.nn.dropout(x, self.dropout_keep_prob)
# Reshape x for fully-connected layers
with tf.variable_scope("reshape"):
B, V, F = x.get_shape()
B, V, F = int(B), int(V), int(F)
x = tf.reshape(x, [B, V * F])
# Add fully-connected layers (if any)
for i, num_units in enumerate(FC):
with tf.variable_scope("fc-{}-{}".format(i, num_units)):
W = tf.get_variable("W",
shape=[x.get_shape().as_list()[1], num_units],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_units]), name="b")
l2_loss += tf.nn.l2_loss(W)
x = tf.nn.xw_plus_b(x, W, b)
x = tf.layers.batch_normalization(x, training=self.train_flag)
x = tf.nn.relu(x)
x = tf.nn.dropout(x, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.variable_scope("output"):
W = tf.get_variable("W",
shape=[x.get_shape().as_list()[1], num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
self.scores = tf.nn.xw_plus_b(x, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
self.predictions = tf.cast(self.predictions, tf.int32)
# Calculate mean cross-entropy loss
with tf.variable_scope("loss"):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Calculate accuracy
with tf.variable_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, self.input_y)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def graph_conv_chebyshev(self, x, L, K, F_out):
"""
Graph convolutional operation.
"""
# K = Chebyshev polynomial order & support size
# F_out = No. of output features (per vertex)
# B = Batch size
# V = No. of vertices
# F_in = No. of input features (per vertex)
B, V, F_in = x.get_shape()
B, V, F_in = int(B), int(V), int(F_in)
# Rescale Laplacian and store as a TF sparse tensor (copy to not modify the shared L)
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
L = tf.cast(L, tf.float32)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # V x F_in x B
x0 = tf.reshape(x0, [V, F_in * B]) # V x F_in*B
x = tf.expand_dims(x0, 0) # 1 x V x F_in*B
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x V x F_in*B
return tf.concat([x, x_], axis=0) # K x V x F_in*B
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # V x F_in*B
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, V, F_in, B]) # K x V x F_in x B
x = tf.transpose(x, perm=[3, 1, 2, 0]) # B x V x F_in x K
x = tf.reshape(x, [B * V, F_in * K]) # B*V x F_in*K
# Compose linearly F_in features to get F_out features
W = tf.Variable(tf.truncated_normal([F_in * K, F_out], stddev=0.1), name="W")
x = tf.matmul(x, W) # B*V x F_out
x = tf.reshape(x, [B, V, F_out]) # B x V x F_out
return x
def graph_conv_spline(self, x, L, K, F_out):
"""
Graph convolution operation.
"""
B, V, F_in = x.get_shape()
B, V, F_in = int(B), int(V), int(F_in)
# Fourier basis
lamb, U = graph.fourier(L)
U = tf.constant(U.T, dtype=tf.float32) # V x V
# Spline basis
basis = self.bspline_basis(K, lamb, degree=3) # V x K
basis = tf.constant(basis, dtype=tf.float32)
# Weight multiplication
W = tf.Variable(tf.truncated_normal([K, F_in * F_out], stddev=0.1), name="W")
W = tf.matmul(basis, W) # V x F_out*F_in
W = tf.reshape(W, [V, F_out, F_in])
return self.filter_in_fourier(x, L, K, F_out, U, W)
def graph_conv_fourier(self, x, L, K, F_out):
"""
Graph convolution operation.
"""
assert K == L.shape[0] # artificial but useful to compute number of parameters
B, V, F_in = x.get_shape()
B, V, F_in = int(B), int(V), int(F_in)
# Fourier basis
_, U = graph.fourier(L)
U = tf.constant(U.T, dtype=tf.float32)
# Weights
W = tf.Variable(tf.truncated_normal([V, F_out, F_in], stddev=0.1), name="W")
return self.filter_in_fourier(x, L, K, F_out, U, W)
def graph_max_pool(self, x, p):
"""
Graph max pooling operation. p must be 1 or a power of 2.
"""
if p > 1:
x = tf.expand_dims(x, 3) # B x V x F x 1
x = tf.nn.max_pool(x, ksize=[1, p, 1, 1], strides=[1, p, 1, 1], padding="SAME")
return tf.squeeze(x, [3]) # B x V/p x F
else:
return x
def filter_in_fourier(self, x, L, K, F_out, U, W):
B, V, F_in = x.get_shape()
B, V, F_in = int(B), int(V), int(F_in)
x = tf.transpose(x, perm=[1, 2, 0]) # V x F_in x B
# Transform to Fourier domain
x = tf.reshape(x, [V, F_in * B]) # V x F_in*B
x = tf.matmul(U, x) # V x F_in*B
x = tf.reshape(x, [V, F_in, B]) # V x F_in x B
# Filter
x = tf.matmul(W, x) # for each feature
x = tf.transpose(x) # B x F_out x V
x = tf.reshape(x, [B * F_out, V]) # B*F_out x V
# Transform back to graph domain
x = tf.matmul(x, U) # B*F_out x V
x = tf.reshape(x, [B, F_out, V]) # B x F_out x V
return tf.transpose(x, perm=[0, 2, 1]) # B x V x F_out
def bspline_basis(self, K, x, degree=3):
"""
Return the B-spline basis.
K: Number of control points.
x: Evaluation points or number of evenly distributed evaluation points.
degree: Degree of the spline. Cubic spline by default.
"""
if np.isscalar(x):
x = np.linspace(0, 1, x)
# Evenly distributed knot vectors
kv1 = x.min() * np.ones(degree)
kv2 = np.linspace(x.min(), x.max(), K - degree + 1)
kv3 = x.max() * np.ones(degree)
kv = np.concatenate((kv1, kv2, kv3))
# Cox-DeBoor recursive function to compute one spline over x
def cox_deboor(k, d):
# Test for end conditions, the rectangular degree zero spline
if (d == 0):
return ((x - kv[k] >= 0) & (x - kv[k + 1] < 0)).astype(int)
denom1 = kv[k + d] - kv[k]
term1 = 0
if denom1 > 0:
term1 = ((x - kv[k]) / denom1) * cox_deboor(k, d - 1)
denom2 = kv[k + d + 1] - kv[k + 1]
term2 = 0
if denom2 > 0:
term2 = ((-(x - kv[k + d + 1]) / denom2) * cox_deboor(k + 1, d - 1))
return term1 + term2
# Compute basis for each point
basis = np.column_stack([cox_deboor(k, degree) for k in range(K)])
basis[-1, -1] = 1
return basis
|
141342
|
import unittest
import pandas as pd
from osu.local.beatmap.beatmapIO import BeatmapIO
from analysis.osu.std.map_data import StdMapData
class TestStdMapData(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.beatmap = BeatmapIO.open_beatmap('unit_tests\\maps\\osu\\test\\abraker - unknown (abraker) [250ms].osu')
map_data = [
pd.DataFrame(
[
[ 100, 0, 0, StdMapData.TYPE_PRESS, StdMapData.TYPE_SLIDER ],
[ 200, 0, 0, StdMapData.TYPE_HOLD, StdMapData.TYPE_SLIDER ],
[ 300, 0, 0, StdMapData.TYPE_HOLD, StdMapData.TYPE_SLIDER ],
[ 400, 0, 0, StdMapData.TYPE_RELEASE, StdMapData.TYPE_SLIDER ],
],
columns=['time', 'x', 'y', 'type', 'object']),
pd.DataFrame(
[
[ 1100, 0, 0, StdMapData.TYPE_PRESS, StdMapData.TYPE_CIRCLE ],
[ 1101, 0, 0, StdMapData.TYPE_RELEASE, StdMapData.TYPE_CIRCLE ],
],
columns=['time', 'x', 'y', 'type', 'object']),
pd.DataFrame(
[
[ 2100, 0, 0, StdMapData.TYPE_PRESS, StdMapData.TYPE_CIRCLE ],
[ 2101, 0, 0, StdMapData.TYPE_RELEASE, StdMapData.TYPE_CIRCLE ],
],
columns=['time', 'x', 'y', 'type', 'object']),
]
cls.map_data = pd.concat(map_data, axis=0, keys=range(len(map_data)), names=[ 'hitobject', 'aimpoint' ])
@classmethod
def tearDown(cls):
pass
def test_std_hitobject_to_aimpoints(self):
for hitobject in self.beatmap.hitobjects:
aimpoint_data = StdMapData.std_hitobject_to_aimpoints(hitobject)
def test_get_map_data(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
def test_get_num_hitobjects(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
num_hitobjects = StdMapData.get_num_hitobjects(map_data)
def test_get_presses(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
presses = StdMapData.get_presses(map_data)
def test_get_releases(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
presses = StdMapData.get_releases(map_data)
def test_get_scorepoint_before(self):
# Time: Before start
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 0)
self.assertEqual(scorepoint_data, None)
# Time: At first aimpoint
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 100)
self.assertEqual(scorepoint_data, None)
# Time: At second aimpoint
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 200)
self.assertEqual(scorepoint_data['time'], 100)
# Time: At slider release
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 400)
self.assertEqual(scorepoint_data['time'], 300)
# Time: At 2nd hitobject
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 1100)
self.assertEqual(scorepoint_data['time'], 400)
# Time: At last hitobject
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 2100)
self.assertEqual(scorepoint_data['time'], 1101)
# Time: After last hitobject
scorepoint_data = StdMapData.get_scorepoint_before(self.map_data, 2200)
self.assertEqual(scorepoint_data['time'], 2101)
def test_get_scorepoint_after(self):
# Time: Before start
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 0)
self.assertEqual(scorepoint_data['time'], 100)
# Time: At first aimpoint
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 100)
self.assertEqual(scorepoint_data['time'], 200)
# Time: At second aimpoint
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 200)
self.assertEqual(scorepoint_data['time'], 300)
# Time: At slider release
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 400)
self.assertEqual(scorepoint_data['time'], 1100)
# Time: At 2nd hitobject
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 1100)
self.assertEqual(scorepoint_data['time'], 1101)
# Time: At last hitobject
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 2100)
self.assertEqual(scorepoint_data['time'], 2101)
# Time: After last hitobject
scorepoint_data = StdMapData.get_scorepoint_after(self.map_data, 2200)
self.assertEqual(scorepoint_data, None)
'''
def test_get_next_hitobject_idx(self):
beatmap = BeatmapIO.open_beatmap('unit_tests\\maps\\osu\\playable\\Within Temptation - The Unforgiving (Armin) [Marathon].osu')
map_data = StdMapData.get_map_data(beatmap.hitobjects)
idx = -1
while idx != None:
idx = StdMapData.get_next_hitobject_idx(map_data, idx)
'''
def test_get_visible_at(self):
for time in range(-1000, 10000, 100):
visible = StdMapData.get_visible_at(self.map_data, time, 400)
def test_get_note_before(self):
# Time: Before start
hitobject_data = StdMapData.get_note_before(self.map_data, 0)
self.assertEqual(hitobject_data, None)
# Time: At first aimpoint
hitobject_data = StdMapData.get_note_before(self.map_data, 100)
self.assertEqual(hitobject_data, None)
# Time: At second aimpoint
hitobject_data = StdMapData.get_note_before(self.map_data, 200)
self.assertEqual(hitobject_data.iloc[0]['time'], 100)
# Time: At slider release
hitobject_data = StdMapData.get_note_before(self.map_data, 400)
self.assertEqual(hitobject_data.iloc[0]['time'], 100)
# Time: At 2nd hitobject
hitobject_data = StdMapData.get_note_before(self.map_data, 1100)
self.assertEqual(hitobject_data.iloc[0]['time'], 100)
# Time: At last hitobject
hitobject_data = StdMapData.get_note_before(self.map_data, 2100)
self.assertEqual(hitobject_data.iloc[0]['time'], 1100)
# Time: After last hitobject
hitobject_data = StdMapData.get_note_before(self.map_data, 2101)
self.assertEqual(hitobject_data.iloc[0]['time'], 2100)
def test_get_note_after(self):
# Time: Before start
hitobject_data = StdMapData.get_note_after(self.map_data, 0)
self.assertEqual(hitobject_data.iloc[0]['time'], 100)
# Time: At first aimpoint
hitobject_data = StdMapData.get_note_after(self.map_data, 100)
self.assertEqual(hitobject_data.iloc[0]['time'], 1100)
# Time: At second aimpoint
hitobject_data = StdMapData.get_note_after(self.map_data, 200)
self.assertEqual(hitobject_data.iloc[0]['time'], 1100)
# Time: At slider release
hitobject_data = StdMapData.get_note_after(self.map_data, 400)
self.assertEqual(hitobject_data.iloc[0]['time'], 1100)
# Time: At 2nd hitobject
hitobject_data = StdMapData.get_note_after(self.map_data, 1100)
self.assertEqual(hitobject_data.iloc[0]['time'], 2100)
# Time: At last hitobject
hitobject_data = StdMapData.get_note_after(self.map_data, 2100)
self.assertEqual(hitobject_data, None)
# Time: After last hitobject
hitobject_data = StdMapData.get_note_after(self.map_data, 2101)
self.assertEqual(hitobject_data, None)
def test_time_slice(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
map_data = StdMapData.time_slice(map_data, 1000, 2000, True)
self.assertGreaterEqual(map_data['time'].values[0], 1000)
self.assertLessEqual(map_data['time'].values[0], 2000)
def test_start_times(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
start_times = StdMapData.start_times(map_data)
def test_end_times(self):
map_data = StdMapData.get_map_data(self.beatmap.hitobjects)
end_times = StdMapData.end_times(map_data)
|
141361
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.decomposition import FastICA
from ..utils.parallel import ParallelBackend, get_backend
from ..utils.kde import kde
from ..utils.cubic import cubic_spline
from ..utils.sobol import multivariate_normal
from ..utils.random import get_generator
from itertools import starmap
import copy
import warnings
try:
from getdist import plots, MCSamples
HAS_GETDIST = True
except Exception:
HAS_GETDIST = False
__all__ = ['SIT']
# TODO: vectorize this
# TODO: update when sklearn supports random_generator
# https://github.com/scikit-learn/scikit-learn/issues/16988
# TODO: do not activate the backend if not use_parallel
class SIT:
"""
Sliced Iterative Transform.
Parameters
----------
n_iter : positive int, optional
Number of iterations to perform. Set to 10 by default.
parallel_backend : None, int, Pool, Client or ParallelBackend, optional
The backend for parallelization. If `None`, will use the bayesfast
global parallel backend. Otherwise, will be passed to initialize
a ParallelBackend.
bw_factor : positive float, optional
Multiplicative factor for the kde bandwidth. Set to 1. by default.
m_ica : positive int, optional
Max number of points used to compute FastICA. Set to 20000 by default.
random_generator : None, int, array_like[ints], SeedSequence, BitGenerator or Generator, optional
The numpy random generator. If `None`, will use the bayesfast global
random generator. Otherwise, will be passed to
`numpy.random.default_rng` to initialize a random generator.
m_plot : int, optional
Max number of dims for triangle_plot. If non-positive, will be
interpreted as no limits. Set to 8 by default.
cubic_options :dict, optional
Additional keyword arguments for the cubic spline. Set to {} by default.
ica_options : dict, optional
Additional keyword arguments for FastICA. Set to {'max_iter': 100} by
default.
mvn_generator : None or callable, optional
Random number generator for the multivairate normal distribution. Should
have signature `(mean, cov, size) -> samples`. If `None`, will use
`bayesfast.utils.sobol.multivariate_normal`. Set to `None` by default.
"""
def __init__(self, n_iter=10, parallel_backend=None, bw_factor=1.,
m_ica=20000, random_generator=None, m_plot=8,
cubic_options=None, ica_options=None, mvn_generator=None):
self._data = None
self._cubic = []
self.n_iter = n_iter
self.parallel_backend = parallel_backend
self.bw_factor = bw_factor
self.m_ica = m_ica
self.random_generator = random_generator
self.m_plot = m_plot
self.cubic_options = cubic_options
self.ica_options = ica_options
self.mvn_generator = mvn_generator
def __getstate__(self):
"""We need this to make self._parallel_backend work correctly."""
self_dict = self.__dict__.copy()
self_dict['_parallel_backend'] = None
return self_dict
@property
def data(self):
return self._data
@property
def data_init(self):
return self._data_init
@property
def dim(self):
return self._data.shape[-1]
@property
def weights(self):
return self._weights
@property
def n_iter(self):
return self._n_iter
@n_iter.setter
def n_iter(self, n):
try:
n = int(n)
assert n > 0
except Exception:
raise ValueError('n_iter should be a positive int.')
self._n_iter = n
@property
def i_iter(self):
return len(self._cubic)
def add_iter(self, n):
self.n_iter = self.n_iter + n
@property
def parallel_backend(self):
if self._parallel_backend is None:
return get_backend()
else:
return self._parallel_backend
@parallel_backend.setter
def parallel_backend(self, backend):
if backend is None:
self._parallel_backend = None
else:
self._parallel_backend = ParallelBackend(backend)
@property
def bw_factor(self):
return self._bw_factor
@bw_factor.setter
def bw_factor(self, bw):
try:
bw = float(bw)
assert bw > 0
except Exception:
raise ValueError('bw_factor should be a positive float.')
self._bw_factor = bw
@property
def m_ica(self):
return self._m_ica
@m_ica.setter
def m_ica(self, m):
try:
m = int(m)
assert m > 0
except Exception:
raise ValueError('m_ica should be a positive int.')
self._m_ica = m
@property
def random_generator(self):
if self._random_generator is None:
return get_generator()
else:
return self._random_generator
@random_generator.setter
def random_generator(self, generator):
if generator is None:
self._random_generator = None
else:
self._random_generator = np.random.default_rng(generator)
@property
def m_plot(self):
return self._m_plot
@m_plot.setter
def m_plot(self, m):
try:
m = int(m)
except Exception:
raise ValueError('m_plot should be an int.')
self._m_plot = m
@property
def cubic_options(self):
return self._cubic_options
@cubic_options.setter
def cubic_options(self, co):
try:
if co is None:
co = {}
self._cubic_options = dict(co)
except Exception:
raise ValueError('cubic_options should be a dict.')
@property
def ica_options(self):
return self._ica_options
@ica_options.setter
def ica_options(self, io):
try:
if io is None:
io = {'max_iter': 100}
self._ica_options = dict(io)
except Exception:
raise ValueError('ica_options should be a dict.')
@property
def mvn_generator(self):
return self._mvn_generator
@mvn_generator.setter
def mvn_generator(self, mg):
if mg is None:
mg = multivariate_normal
if callable(mg):
self._mvn_generator = mg
else:
raise ValueError('invalid value for mvn_generator.')
def _gaussianize_1d(self, x):
k = kde(x, bw_factor=self._bw_factor, weights=self._weights)
c = cubic_spline(x, lambda xx: norm.ppf(k.cdf(xx)),
**self._cubic_options)
return c
def _gaussianize_nd(self, x):
map_result = self.parallel_backend.map(self._gaussianize_1d, x.T)
self._cubic.append(map_result)
y = np.array([map_result[i](x[:, i]) for i in range(self.dim)]).T
return y
def _ica(self, x):
io = self._ica_options.copy()
if not 'random_state' in io:
io['random_state'] = self.random_generator.integers(0, 2**32)
ica = FastICA(**io)
if self._m_ica is None:
ica.fit(x)
else:
n_ica = min(x.shape[0], self.m_ica)
ica.fit(x[self.random_generator.choice(x.shape[0], n_ica, False)])
y = ica.transform(x)
m = np.mean(x, axis=0)
s = np.std(y, axis=0)
y /= s
A = ica.components_ / s[:, np.newaxis]
B = np.linalg.inv(A)
return y, A, B, m
def _init_data(self, data, weights):
if data is None:
if self._data is None:
raise ValueError('you have not given me the data to fit.')
else:
try:
data = np.array(data)
assert data.size > 0
except Exception:
raise ValueError('invalid value for data.')
if data.ndim == 2:
self._data = data
elif data.ndim >= 3:
self._data = data.reshape((-1, data.shape[-1]))
else:
raise ValueError('invalid shape for data.ndim.')
self._data_init = self._data.copy()
if self.dim == 1:
raise ValueError('I cannot do rotations for only one variable.')
_n = self._data.shape[0]
if weights is not None:
try:
weights = np.asarray(weights)
assert weights.shape == (_n,)
except Exception:
raise ValueError('invalid value for weights.')
self._weights = weights
else:
self._weights = np.ones(_n) / _n
self._cubic = []
self._A = np.zeros((0, self.dim, self.dim))
self._B = np.zeros((0, self.dim, self.dim))
self._m = np.zeros((0, self.dim))
self._logdetA = np.zeros(0)
def fit(self, data=None, weights=None, n_run=None, plot=0):
self._init_data(data, weights)
try:
plot = int(plot)
except Exception:
raise ValueError('plot should be an int.')
if (not HAS_GETDIST) and (plot != 0):
plot = 0
warnings.warn('you have not installed getdist, so I can only do '
'plot=0.', RuntimeWarning)
if n_run is None:
n_run = self.n_iter - self.i_iter
else:
try:
n_run = int(n_run)
assert n_run > 0
except Exception:
raise ValueError('invalid value for n_run.')
if n_run > self.n_iter - self.i_iter:
self.n_iter = self.i_iter + n_run
with self.parallel_backend:
for i in range(n_run):
if plot != 0 and self.i_iter == 0:
self.triangle_plot()
try:
y, A, B, m = self._ica(self._data)
self._data = self._gaussianize_nd(y)
except Exception:
warnings.warn(
"we found that sometimes it goes wrong, but actually "
"it can work if we use a different random seed, so "
"let's give it one more chance.", RuntimeWarning)
y, A, B, m = self._ica(self._data)
self._data = self._gaussianize_nd(y)
self._A = np.concatenate((self._A, A[np.newaxis]), axis=0)
self._B = np.concatenate((self._B, B[np.newaxis]), axis=0)
self._m = np.concatenate((self._m, m[np.newaxis]), axis=0)
self._logdetA = np.append(self._logdetA,
np.log(np.abs(np.linalg.det(A))))
finite_index = np.isfinite(self._data).all(axis=1)
if len(finite_index) < self._data.shape[0]:
warnings.warn(
'inf encountered for some data points. We will remove '
'these inf points for now.', RuntimeWarning)
self._data = self._data[finite_index, :]
self._weights = self._weights[finite_index]
if (plot > 0) and (not (self.i_iter + 1) % plot):
self.triangle_plot()
if plot < 0:
self.triangle_plot()
def triangle_plot(self):
if not HAS_GETDIST:
raise RuntimeError(
'you need to install getdist to get the triangle plot.')
if 0 < self.m_plot < self.dim:
plot_data = self._data[:, :self.m_plot]
else:
plot_data = self._data
samples = MCSamples(samples=plot_data)
g = plots.getSubplotPlotter()
g.triangle_plot([samples,], filled=True, contour_args={'alpha':0.8},
diag1d_kwargs={'normalized':True})
if self.i_iter:
plt.suptitle("triangle plot after iteration " + str(self.i_iter),
fontsize=plot_data.shape[-1] * 4, ha='left')
else:
plt.suptitle('triangle plot for the initial data',
fontsize=plot_data.shape[-1] * 4, ha='left')
plt.show()
def sample(self, n, use_parallel=False):
try:
n = int(n)
assert n > 0
except Exception:
raise ValueError('n should be a positive int.')
y = self.mvn_generator(np.zeros(self.dim), np.eye(self.dim), n)
x, log_j = self.backward_transform(y, use_parallel)
return x, log_j, y
def _do_evaluate(self, c, x):
return c.evaluate(x)
def _do_derivative(self, c, x):
return c.derivative(x)
def _do_solve(self, c, x):
return c.solve(x)
def forward_transform(self, x, use_parallel=False):
try:
y = np.array(x)
except Exception:
raise ValueError('invalid value for x.')
if y.ndim == 1:
y = y[np.newaxis, :]
if y.shape[-1] != self.dim:
raise ValueError('invalid shape for x.')
_original_shape = y.shape
y = y.reshape((-1, _original_shape[-1]))
log_j = np.zeros(y.shape[0])
with self.parallel_backend:
for i in range(self.i_iter):
y = (y - self._m[i]) @ self._A[i].T
if use_parallel:
map_result = self.parallel_backend.map(
self._do_derivative, self._cubic[i], y.T)
else:
map_result = list(
starmap(self._do_derivative, zip(self._cubic[i], y.T)))
log_j += np.sum(np.log(map_result), axis=0)
if use_parallel:
map_result = self.parallel_backend.map(
self._do_evaluate, self._cubic[i], y.T)
else:
map_result = list(
starmap(self._do_evaluate, zip(self._cubic[i], y.T)))
y = np.array(map_result).T
log_j += np.sum(self._logdetA)
y = y.reshape(_original_shape)
log_j = log_j.reshape(_original_shape[:-1])
return y, log_j
def backward_transform(self, y, use_parallel=False):
try:
x = np.array(y)
except Exception:
raise ValueError('invalid value for y.')
if x.ndim == 1:
x = x[np.newaxis, :]
if x.shape[-1] != self.dim:
raise ValueError('invalid shape for y.')
_original_shape = x.shape
x = x.reshape((-1, _original_shape[-1]))
log_j = np.zeros(x.shape[0])
with self.parallel_backend:
for i in reversed(range(self.i_iter)):
if use_parallel:
map_result = self.parallel_backend.map(
self._do_solve, self._cubic[i], x.T)
else:
map_result = list(
starmap(self._do_solve, zip(self._cubic[i], x.T)))
x = np.array(map_result).T
if use_parallel:
map_result = self.parallel_backend.map(
self._do_derivative, self._cubic[i], x.T)
else:
map_result = list(
starmap(self._do_derivative, zip(self._cubic[i], x.T)))
log_j += np.sum(np.log(map_result), axis=0)
x = x @ self._B[i].T + self._m[i]
log_j += np.sum(self._logdetA)
x = x.reshape(_original_shape)
log_j = log_j.reshape(_original_shape[:-1])
return x, log_j
def logq(self, x, use_parallel=False):
y, log_j = self.forward_transform(x, use_parallel)
return np.sum(norm.logpdf(y), axis=-1) + log_j
|
141371
|
from __future__ import absolute_import
from __future__ import division
# ** This is the file quisk_conf_defaults.py which contains defaults for Quisk. **
#
# Please do not change this configuration file quisk_conf_defaults.py.
# Instead copy one of the other quisk_conf_*.py files to your own
# configuration file and make changes there.
#
# For Linux, your standard configuration file name is .quisk_conf.py in your home directory.
#
# For Windows, your standard comfiguration file name is quisk_conf.py in your Documents folder.
#
# You can specify a different configuration file with the -c or --config command line argument.
#
# Check the config screen to make sure that the correct configuration file is in use.
#
#
# PLEASE DO **NOT** COPY THIS FILE AND USE IT AS A START FOR YOUR CONFIGURATION FILE!
# YOUR CONFIGURATION FILE SHOULD ONLY HAVE LINES THAT DIFFER FROM THIS FILE. QUISK
# IMPORTS THIS FILE FIRST, AND THEN YOUR CONFIG FILE OVERWRITES A FEW ITEMS SUCH AS
# SOUND CARD NAMES.
#
# Quisk imports quisk_conf_defaults.py to set its configuration.
# If you have a configuration file, it then overwrites the defaults
# with your parameters.
#
# Quisk uses a hardware file to control your transceiver and optionally other station hardware.
# Your config file specifies the hardware file to use. Quisk comes with several hardware
# files, and you can write your own hardware file in Python to do anything you want.
#
# Quisk has a custom decimation scheme for each sample rate. The allowable sample rates
# are the four SDR-IQ rates plus 24, 48, 96, 192, 240, 384, 480, and 960 ksps. Other rates
# can be added.
import sys
import wx
# Import the default Hardware module. You can import a different module in
# your configuration file.
import quisk_hardware_model as quisk_hardware
# Module for additional widgets (advanced usage). See n2adr/quisk_widgets.py for an example.
# import n2adr.quisk_widgets as quisk_widgets
quisk_widgets = None
################ Receivers SoftRock USB, Devices controlled by USB that capture samples from a sound card, and (for Tx) play samples to a sound card
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'softrock/hardware_usb.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = 'softrock/widgets_tx.py'
use_sdriq = 0 # Get ADC samples from SDR-IQ is not used
use_rx_udp = 0 # Get ADC samples from UDP is not used
use_soapy = 0 # Get ADC samples from SoapySDR is not used
sample_rate = 48000 # name_of_sound_capt hardware sample rate in Hertz
if sys.platform == "win32":
name_of_sound_capt = "Primary"
name_of_sound_play = "Primary"
elif sys.platform == "darwin":
name_of_sound_capt = "pulse"
name_of_sound_play = "pulse"
else:
name_of_sound_capt = "hw:0" # Name of soundcard capture hardware device.
name_of_sound_play = "hw:0"
channel_i = 0 # Soundcard index of in-phase channel: 0, 1, 2, ...
channel_q = 1 # Soundcard index of quadrature channel: 0, 1, 2, ...
## usb_vendor_id Vendor ID for USB control, integer
# USB devices have a vendor ID and a product ID.
usb_vendor_id = 0x16c0
## usb_product_id Product ID for USB control, integer
# USB devices have a vendor ID and a product ID.
usb_product_id = 0x05dc
# I2C-address of the Si570 in the softrock; Thanks to <NAME>, DB6QS
## si570_i2c_address I2C address, integer
# I2C-address of the Si570 in the softrock.
si570_i2c_address = 0x55
#si570_i2c_address = 0x70
# Thanks to <NAME>, KB8OJH, for this patch for the Si570 (many SoftRock's):
## si570_direct_control Use Si570 direct control, boolean
# If you are using a DG8SAQ interface to set a Si570 clock directly, set
# this to True. Complex controllers which have their own internal
# crystal calibration do not require this.
si570_direct_control = False
#si570_direct_control = True
## si570_xtal_freq Si570 crystal frequency, integer
# This is the Si570 startup frequency in Hz. 114.285MHz is the typical
# value from the data sheet; you can use 'usbsoftrock calibrate' to find
# the value for your device.
si570_xtal_freq = 114285000
## key_poll_msec Key poll time msec, integer
# Softrock hardware must be polled to get the key up/down state. This is the time between
# polls in milliseconds. Use zero to turn off the poll if your SoftRock does not have a key
# jack or USB key control.
key_poll_msec = 0
#key_poll_msec = 5
## key_hang_time Key hang time secs, number
# Softrock transmit hardware uses semi break-in for CW operation. This is the time in
# seconds before changing back to receive.
key_hang_time = 0.7
## repeater_delay Repeater delay secs, number
# The fixed delay for changing the repeater Rx/Tx frequency in seconds.
repeater_delay = 0.25
## rx_max_amplitude_correct Max ampl correct, number
# If you get your I/Q samples from a sound card, you will need to correct the
# amplitude and phase for inaccuracies in the analog hardware. The correction is
# entered using the controls from the "Rx Phase" button on the config screen.
# You must enter a positive number. This controls the range of the control.
rx_max_amplitude_correct = 0.2
## rx_max_phase_correct Max phase correct, number
# If you get your I/Q samples from a sound card, you will need to correct the
# amplitude and phase for inaccuracies in the analog hardware. The correction is
# entered using the controls from the "Rx Phase" button on the config screen.
# You must enter a positive number. This controls the range of the control in degrees.
rx_max_phase_correct = 10.0
## mic_out_volume Tx audio level, number
# The level of the Tx audio sent to the sound card after all processing as a fraction 0.0 to 0.7.
# The level is limited to 0.7 to allow headroom for amplitude and phase adjustments.
mic_out_volume = 0.7
# The bandAmplPhase dictionary gives the amplitude and phase corrections for
# sound card data. The format is a dictionary with key "band", giving a dictionary
# with key "rx" or "tx", giving a list of tuples (VFO, tune, amplitude, phase).
#
# If you use Quisk as a panadapter, the corrections will not depend on the band.
# In that case create a band "panadapter" in your config file, and all corrections
# will be read/written to that band.
bandAmplPhase = {} # Empty dictionary to start
#bandAmplPhase = {'panadapter':{}} # Create "panadapter" band for all corrections
################ Receivers SoftRock Fixed, Fixed frequency devices that capture samples from a sound card, and (for Tx) play samples to a sound card
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'quisk_hardware_fixed.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## fixed_vfo_freq Fixed VFO frequency, integer
# The fixed VFO frequency. That is, the frequency in the center of the screen.
fixed_vfo_freq = 7056000
## rx_max_amplitude_correct Max ampl correct, number
# If you get your I/Q samples from a sound card, you will need to correct the
# amplitude and phase for inaccuracies in the analog hardware. The correction is
# entered using the controls from the "Rx Phase" button on the config screen.
# No correction is 1.00. This controls the range of the control.
rx_max_amplitude_correct = 0.2
## rx_max_phase_correct Max phase correct, number
# If you get your I/Q samples from a sound card, you will need to correct the
# amplitude and phase for inaccuracies in the analog hardware. The correction is
# entered using the controls from the "Rx Phase" button on the config screen.
# No correction is 0.00. This controls the range of the control in degrees.
rx_max_phase_correct = 10.0
## mic_out_volume Tx audio level, number
# The level of the Tx audio sent to the sound card after all processing as a fraction 0.0 to 0.7.
# The level is limited to 0.7 to allow headroom for amplitude and phase adjustments.
mic_out_volume = 0.7
# The bandAmplPhase dictionary gives the amplitude and phase corrections for
# sound card data. The format is a dictionary with key "band", giving a dictionary
# with key "rx" or "tx", giving a list of tuples (VFO, tune, amplitude, phase).
#
# If you use Quisk as a panadapter, the corrections will not depend on the band.
# In that case create a band "panadapter" in your config file, and all corrections
# will be read/written to that band.
bandAmplPhase = {} # Empty dictionary to start
#bandAmplPhase = {'panadapter':{}} # Create "panadapter" band for all corrections
################ Receivers HiQSDR, The original N2ADR hardware and the improved HiQSDR using UDP
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hiqsdr/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
# For the N2ADR 2010 transceiver described in QEX, and for the improved version HiQSDR,
# see the sample config file in the hiqsdr package directory, and set these:
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. Use 1 for the original hardware by N2ADR.
# Use 2 for the HiQSDR.
#use_rx_udp = 2
#use_rx_udp = 1
#use_rx_udp = 17
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# power adjustment range of 20 dB, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
digital_tx_level = 20
## HiQSDR_BandDict IO Bus, dict
# If you use the HiQSDR hardware, set these:
# The HiQSDR_BandDict sets the preselect (4 bits) on the X1 connector.
HiQSDR_BandDict = {
'160':1, '80':2, '40':3, '30':4, '20':5, '15':6, '17':7,
'12':8, '10':9, '6':10, '500k':11, '137k':12 }
## cw_delay CW Delay, integer
# This is the delay for CW from 0 to 255.
cw_delay = 0
## rx_udp_ip IP address, text
# This is the IP address of your hardware.
# For FPGA firmware version 1.4 and newer, and if enabled, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
rx_udp_ip = "192.168.2.196"
#rx_udp_ip = "192.168.1.196"
## rx_udp_port Hardware UDP port, integer
# This is the base UDP port number of your hardware.
rx_udp_port = 0xBC77
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit.
tx_ip = ""
#tx_ip = "disable"
#tx_ip = "192.168.1.201"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
rx_udp_clock = 122880000
## sndp_active Enable setting IP, boolean
# If possible, set the IP address to the address entered.
# For FPGA firmware version 1.4 and newer, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
sndp_active = True
#sndp_active = False
################ Receivers Hermes, The Hermes-Lite Project and possibly other hardware with the Hermes FPGA code.
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hermes/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = 'hermes/quisk_widgets.py'
# Quisk has support for the Hermes-Lite project. This support will be extended to the original Hermes.
# Use the file hermes/quisk_conf.py as a model config file. The Hermes can obtain its IP address from
# DHCP. Set rx_udp_ip to the null string in this case. Or use rx_udp_ip to specify an IP address, but
# be sure it is unique and not in use by a DHCP server. The tx_ip and tx_audio_port are not used.
# Note: Setting the IP fails for the Hermes-Lite.
# You can set these options:
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. Use 10 for the Hermes protocol.
#use_rx_udp = 10
## rx_udp_ip IP change, text
# This item should be left blank. It is used to change the IP address of the hardware to a different
# IP once the hardware is found. Not all Hermes firmware supports changing the IP address.
#rx_udp_ip = ""
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
#rx_udp_port = 1024
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit. This item is normally blank.
tx_ip = ""
#tx_ip = "disable"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz. For HermesLite ver2 use 76800000.
#rx_udp_clock = 73728000
#rx_udp_clock = 61440000
#rx_udp_clock = 76800000
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# limited adjustment range, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
#digital_tx_level = 20
## hermes_code_version Hermes code version, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_code_version = -1
## hermes_board_id Hermes board ID, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_board_id = -1
## hermes_LNA_dB Initial LNA dB, integer
# The initial value for the low noise Rx amplifier gain in dB.
hermes_LNA_dB = 20
## hermes_lowpwr_tr_enable Disable T/R in low power, boolean
# This option only applies to the Hermes Lite 2.
# Normally, the T/R relay and external PTT output switch on and off when keying the transmitter.
# But if you set this option, and if you are in low power mode (final amp off) then the T/R relay
# remains in receive mode. This is useful for VNA operation as you can use the low power Tx output
# as the generator and the normal connector as the detector.
# Changes are immediate (no need to restart).
hermes_lowpwr_tr_enable = False
#hermes_lowpwr_tr_enable = True
## hermes_bias_adjust Enable bias adjust, boolean
# This option only applies to the Hermes Lite 2.
# Below are controls that adjust the bias on the power output transistors. Before you enable adjustment,
# make sure you know the correct drain current and how to monitor the current.
# Then set this to True. When you are finished, set it back to False. The bias adjustment
# is stored in the hardware only when the "Write" button is pressed.
# Changes are immediate (no need to restart).
hermes_bias_adjust = False
#hermes_bias_adjust = True
## hermes_power_amp Enable power amp, boolean
# This option only applies to the Hermes Lite 2.
# When True, the power amp is turned on. Otherwise, the low power output is used.
# Changes are immediate (no need to restart).
hermes_power_amp = False
#hermes_power_amp = True
## power_meter_calib_name Power meter calibration, text choice
# This is the calibration table used to convert the power sensor voltage measured by the ADC to the transmit power display.
# It is a table of ADC codes and the corresponding measured power level. If you have a power meter, you can create your own
# table by selecting "New". Then enter ten or more power measurements from low to full power.
# For the Hermes-Lite version E3 filter board, use the built-in table "HL2FilterE3".
# Changes are immediate (no need to restart).
power_meter_calib_name = 'HL2FilterE3'
## hermes_disable_sync Disable Power Supply Sync, boolean
# This option only applies to the Hermes Lite 2.
# When True, the FPGA will not generate a switching frequency for the power supply to
# move the harmonics out of amateur bands.
# Changes are immediate (no need to restart).
hermes_disable_sync = False
#hermes_disable_sync = True
# These are known power meter calibration tables. This table is not present in the JSON settings file.
power_meter_std_calibrations = {}
power_meter_std_calibrations['HL2FilterE3'] = [[ 0, 0.0 ], [ 25.865384615384617, 0.0025502539351328003 ], [ 101.02453987730061, 0.012752044999999998 ],
[ 265.2901234567901, 0.050600930690879994 ], [ 647.9155844155844, 0.21645831264800003 ], [ 1196.5935483870967, 0.66548046472992 ],
[ 1603.7032258064517, 1.1557229391679997 ], [ 2012.3271604938273, 1.811892166688 ], [ 2616.7727272727275, 3.0085848760319993 ],
[ 3173.818181818182, 4.3927428485119995 ], [ 3382.7922077922076, 4.9791328857920005 ], [ 3721.0714285714284, 6.024750791808321 ],
[ 4093.1785714285716, 7.28994845808807 ], [ 4502.496428571429, 8.820837634286566 ], [ 4952.746071428572, 10.673213537486745 ] ]
#power_meter_std_calibrations['HL2FilterE1'] = [[0, 0.0], [9.07, 0.002], [54.98, 0.014], [148.6, 0.057],
# [328.0, 0.208], [611.1, 0.646], [807.0, 1.098], [982.1, 1.6], [1223.3, 2.471], [1517.7, 3.738], [1758.7, 5.02]]
## Hermes_BandDict Rx IO Bus, dict
# The Hermes_BandDict sets the 7 bits on the J16 connector for Rx.
Hermes_BandDict = {
'160':0b0000001, '80':0b1000010, '60':0b1000100, '40':0b1000100, '30':0b1001000, '20':0b1001000, '17':0b1010000,
'15':0b1010000, '12':0b1100000, '10':0b1100000}
## Hermes_BandDictTx Tx IO Bus, dict
# The Hermes_BandDictTx sets the 7 bits on the J16 connector for Tx if enabled.
Hermes_BandDictTx = {'160':0, '80':0, '60':0, '40':0, '30':0, '20':0, '17':0, '15':0, '12':0, '10':0}
## Hermes_BandDictEnTx Enable Tx Filt, boolean
# Enable the separate Rx and Tx settings for the J16 connector.
Hermes_BandDictEnTx = False
#Hermes_BandDictEnTx = True
## AlexHPF Alex High Pass Filters, list
# This is a list of frequencies and high pass filter settings.
AlexHPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexLPF Alex Low Pass Filters, list
# This is a list of frequencies and low pass filter settings.
AlexLPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexHPF_TxEn Alex HPF Tx Enable, boolean
AlexHPF_TxEn = False
#AlexHPF_TxEn = True
## AlexLPF_TxEn Alex LPF Tx Enable, boolean
AlexLPF_TxEn = False
#AlexLPF_TxEn = True
################ Receivers Red Pitaya, The Red Pitaya Project by <NAME>. This uses the Hermes FPGA code.
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hermes/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. Use 10 for the Hermes protocol.
#use_rx_udp = 10
## rx_udp_ip IP change, text
# This item should be left blank. It is used to change the IP address of the hardware to a different
# IP once the hardware is found. Not all Hermes firmware supports changing the IP address.
#rx_udp_ip = ""
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
#rx_udp_port = 1024
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit. This item is normally blank.
tx_ip = ""
#tx_ip = "disable"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
#rx_udp_clock = 125000000
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# limited adjustment range, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
#digital_tx_level = 20
## hermes_code_version Hermes code version, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_code_version = -1
## hermes_board_id Hermes board ID, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_board_id = -1
## hermes_LNA_dB Initial LNA dB, integer
# The initial value for the low noise Rx amplifier gain in dB.
hermes_LNA_dB = 20
## Hermes_BandDict Hermes Bus, dict
# The Hermes_BandDict sets the 7 bits on the J16 connector.
Hermes_BandDict = {
'160':0b0000001, '80':0b0000010, '60':0b0000100, '40':0b0001000, '30':0b0010000, '20':0b0100000, '15':0b1000000}
## Hermes_BandDictTx Tx IO Bus, dict
# The Hermes_BandDictTx sets the 7 bits on the J16 connector for Tx if enabled.
Hermes_BandDictTx = {'160':0, '80':0, '60':0, '40':0, '30':0, '20':0, '17':0, '15':0, '12':0, '10':0}
## Hermes_BandDictEnTx Enable Tx Filt, boolean
# Enable the separate Rx and Tx settings for the J16 connector.
Hermes_BandDictEnTx = False
#Hermes_BandDictEnTx = True
## AlexHPF Alex High Pass Filters, list
# This is a list of frequencies and high pass filter settings.
AlexHPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexLPF Alex Low Pass Filters, list
# This is a list of frequencies and low pass filter settings.
AlexLPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexHPF_TxEn Alex HPF Tx Enable, boolean
AlexHPF_TxEn = False
#AlexHPF_TxEn = True
## AlexLPF_TxEn Alex LPF Tx Enable, boolean
AlexLPF_TxEn = False
#AlexLPF_TxEn = True
################ Receivers SoapySDR, The SoapySDR interface to multiple hardware SDRs.
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'soapypkg/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## use_soapy Use SoapySDR, integer
# Enter 1 to turn on SoapySDR.
#use_soapy = 1
# Further items are present in the radio dictionary with names soapy_*
################ Receivers SdrIQ, The SDR-IQ receiver by RfSpace
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'sdriqpkg/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
#
# For the SDR-IQ the soundcard is not used for capture.
## use_sdriq Hardware by RF-Space, integer choice
# This is the type of hardware. For the SdrIQ, use_sdriq is 1.
#use_sdriq = 1
## sdriq_name Serial port, text
# The name of the SDR-IQ serial port to open.
#sdriq_name = "/dev/ft2450"
#sdriq_name = "/dev/ttyUSB2"
#sdriq_name = "COM6"
## sdriq_clock Clock frequency Hertz, number
# This is the clock frequency of the hardware in Hertz.
#sdriq_clock = 66666667.0
## sdriq_decimation Decimation, integer choice
# This is the decimation from the SDR-IQ clock. Decimation by 1250, 600, 500, 360 results in a
# sample rate of 53333, 111111, 133333, 185185 samples per second.
#sdriq_decimation = 1250
#sdriq_decimation = 600
#sdriq_decimation = 500
#sdriq_decimation = 360
################ Receivers Odyssey, The Odyssey project using a UDP protocol similar to the HiQSDR
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hiqsdr/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. The Odyssey uses type 2.
#use_rx_udp = 2
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# power adjustment range of 20 dB, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
digital_tx_level = 20
## HiQSDR_BandDict IO Bus, dict
# This sets the preselect (4 bits) on the X1 connector.
HiQSDR_BandDict = {
'160':1, '80':2, '40':3, '30':4, '20':5, '15':6, '17':7,
'12':8, '10':9, '6':10, '500k':11, '137k':12 }
## cw_delay CW Delay, integer
# This is the delay for CW from 0 to 255.
cw_delay = 0
## rx_udp_ip IP address, text
# This is the IP address of your hardware.
# For FPGA firmware version 1.4 and newer, and if enabled, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
rx_udp_ip = "192.168.2.160"
#rx_udp_ip = "192.168.1.196"
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
rx_udp_port = 48247
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit.
tx_ip = ""
#tx_ip = "disable"
#tx_ip = "192.168.1.201"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
rx_udp_clock = 122880000
## sndp_active Enable setting IP, boolean
# If possible, set the IP address to the address entered.
# For FPGA firmware version 1.4 and newer, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
sndp_active = True
#sndp_active = False
## radio_sound_ip IP sound play, text
# This option sends radio playback sound to a UDP device. Some SDR hardware devices have an
# audio codec that can play radio sound with less latency than a soundcard. The sample rate
# is the same as the soundcard sample rate, but probably you will want 48000 sps. The UDP
# data consists of two bytes of zero, followed by the specified number of samples. Each
# sample consists of two bytes (a short) of I data and two bytes of Q data in little-endian order.
# For radio_sound_nsamples = 360, the total number of UDP data bytes is 1442.
#radio_sound_ip = "192.168.2.160"
## radio_sound_port UDP port play, integer
# The UDP port of the radio sound play device.
#radio_sound_port = 48250
## radio_sound_nsamples Num play samples, integer
# The number of play samples per UDP block.
#radio_sound_nsamples = 360
## radio_sound_mic_ip IP microphone, text
# This option receives microphone samples from a UDP device. The UDP
# data consists of two bytes of zero, followed by the specified number of samples. Each
# sample consists of two bytes (a short) of monophonic microphone data in little-endian order.
# For radio_sound_mic_nsamples = 720, the total number of UDP data bytes is 1442.
#radio_sound_mic_ip = "192.168.2.160"
## radio_sound_mic_port UDP port mic, integer
# The UDP port of the microphone device.
#radio_sound_mic_port = 48251
## radio_sound_mic_nsamples Num mic samples, integer
# The number of mic samples per UDP block.
#radio_sound_mic_nsamples = 720
## radio_sound_mic_boost Mic boost, boolean
# Use False for no microphone boost, or True for +20 dB boost.
#radio_sound_mic_boost = False
#radio_sound_mic_boost = True
################ Receivers Odyssey2, The Odyssey-2 project using the HPSDR Hermes protocol
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hermes/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = 'hermes/quisk_widgets.py'
# Use the file hermes/quisk_conf.py as a model config file. The Hermes can obtain its IP address from
# DHCP. Set rx_udp_ip to the null string in this case. Or use rx_udp_ip to specify an IP address, but
# be sure it is unique and not in use by a DHCP server.
# You can set these options:
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. Use 10 for the Hermes protocol.
#use_rx_udp = 10
## rx_udp_ip IP change, text
# This item should be left blank. It is used to change the IP address of the hardware to a different
# IP once the hardware is found. Not all Hermes firmware supports changing the IP address.
#rx_udp_ip = ""
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
#rx_udp_port = 1024
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit. This item is normally blank.
tx_ip = ""
#tx_ip = "disable"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz. For Odyssey use 122880000.
#rx_udp_clock = 122880000
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# limited adjustment range, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
#digital_tx_level = 20
## hermes_code_version Hermes code version, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# Hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_code_version = -1
## hermes_board_id Hermes board ID, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# Hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_board_id = -1
## hermes_LNA_dB Initial LNA dB, integer
# The initial value for the low noise Rx amplifier gain in dB.
hermes_LNA_dB = 20
## Hermes_BandDict Hermes Bus, dict
# The Hermes_BandDict sets the 7 bits on the J16 connector.
Hermes_BandDict = {
'160':0b0000001, '80':0b0000010, '60':0b0000100, '40':0b0001000, '30':0b0010000, '20':0b0100000, '15':0b1000000}
## Hermes_BandDictTx Tx IO Bus, dict
# The Hermes_BandDictTx sets the 7 bits on the J16 connector for Tx if enabled.
Hermes_BandDictTx = {'160':0, '80':0, '60':0, '40':0, '30':0, '20':0, '17':0, '15':0, '12':0, '10':0}
## Hermes_BandDictEnTx Enable Tx Filt, boolean
# Enable the separate Rx and Tx settings for the J16 connector.
Hermes_BandDictEnTx = False
#Hermes_BandDictEnTx = True
## AlexHPF Alex High Pass Filters, list
# This is a list of frequencies and high pass filter settings.
AlexHPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexLPF Alex Low Pass Filters, list
# This is a list of frequencies and low pass filter settings.
AlexLPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexHPF_TxEn Alex HPF Tx Enable, boolean
AlexHPF_TxEn = False
#AlexHPF_TxEn = True
## AlexLPF_TxEn Alex LPF Tx Enable, boolean
AlexLPF_TxEn = False
#AlexLPF_TxEn = True
################ Receivers Afedri, The Afedri SDR receiver with the Ethernet interface.
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'afedrinet/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## rx_udp_ip IP address, text
# This is the IP address of your hardware. Enter 0.0.0.0 to search for the address.
#rx_udp_ip = "0.0.0.0"
#rx_udp_ip = "192.168.0.200"
#rx_udp_ip = "192.168.1.196"
## rx_udp_port Hardware UDP port, integer
# This is the base UDP port number of your hardware.
#rx_udp_port = 50000
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
#rx_udp_clock = 80000000
## default_rf_gain Default RF gain, integer
# This is the RF gain when starting.
#default_rf_gain = 11
################ Sound Devices. Quisk recognizes eight sound capture and playback devices.
# Playback devices:
# name_of_sound_play Play radio sound on speakers or headphones
# playback_rate The sample rate, normally 48000, 96000 or 192000
# name_of_mic_play For sound card modes (like SoftRock), play I/Q transmit audio
# mic_playback_rate The sample rate
# mic_play_chan_I Channel number 0, 1, ... for I samples
# mic_play_chan_Q Channel number 0, 1, ... for Q samples
# tx_channel_delay Channel number for delay, or -1
# digital_output_name Output monophonic digital samples to another program
# sample_playback_name Output digital I/Q samples to another program
# digital_rx1_name Output monophonic digital samples from Rx1 to another program
# Capture devices:
# microphone_name The monophonic microphone source
# mic_sample_rate The sample rate; must be 48000
# mic_channel_I The channel number for samples
# mic_channel_Q Not used.
# name_of_sound_capt For sound card modes (like SoftRock), capture I/Q samples
# sample_rate The sample rate
# channel_i Channel number 0, 1, ... for I samples
# channel_q Channel number 0, 1, ... for Q samples
# channel_delay Channel number for delay, or -1
# digital_input_name Receive monophonic digital samples from another program
#
# Unused devices have the null string "" as the name. For example, name_of_sound_play="" for a panadapter.
#
# On Linux, Quisk can access your sound card through ALSA, PortAudio or PulseAudio.
# On Windows, Quisk uses DirectX for sound card access.
## channel_i Sample channel I, integer
# Soundcard index of in-phase channel: 0, 1, 2, ...
channel_i = 0
#channel_i = 1
## channel_q Sample channel Q, integer
# Soundcard index of quadrature channel: 0, 1, 2, ...
channel_q = 1
#channel_q = 0
# Thanks to <NAME> for this fix:
## channel_delay Rx channel delay, integer
# The H101 hardware using the PCM2904 chip has a one-sample delay between
# channels, which must be fixed in software. If you have this problem,
# change channel_delay to either channel_i or channel_q. Use -1 for no delay.
channel_delay = -1
#channel_delay = 0
#channel_delay = 1
## tx_channel_delay Tx channel delay, integer
# This is for mic playback (SoftRock transmit)
tx_channel_delay = -1
#tx_channel_delay = 0
#tx_channel_delay = 1
## playback_rate Playback rate, integer choice
# This is the received radio sound playback rate. The default will
# be 48 kHz for the SDR-IQ and UDP port samples, and sample_rate for sound
# card capture. Set it yourself for other rates or hardware.
# The playback_rate must be 24000, 48000, 96000 or 192000.
# The preferred rate is 48000 for use with digital modes and transmit of recorded audio.
#playback_rate = 48000
#playback_rate = 24000
#playback_rate = 96000
#playback_rate = 192000
## lin_sample_playback_name Sample playback name, text
# This option sends the raw I/Q samples to another program using a loopback device (Linux) or
# a Virtual Audio Cable (Windows). The sample rate is the same as the hardware sample rate.
# Read the samples from the loopback device with another program.
lin_sample_playback_name = ""
#lin_sample_playback_name = "hw:Loopback,0"
## win_sample_playback_name Sample playback name, text
# This option sends the raw I/Q samples to another program using a loopback device (Linux) or
# a Virtual Audio Cable (Windows). The sample rate is the same as the hardware sample rate.
# Read the samples from the loopback device with another program.
win_sample_playback_name = ""
#win_sample_playback_name = "COM6"
sample_playback_name = ""
# When you use the microphone input, the mic_channel_I and Q are the two capture
# microphone channels. Quisk uses a monophonic mic, so audio is taken from the I
# channel, and the Q channel is (currently) ignored. It is OK to set the same
# channel number for both, and this is necessary for a USB mono mic. The mic sample rate
# should be 48000 to enable digital modes and the sound recorder to work, but 8000 can be used.
# Mic samples can be sent to an Ethernet device (use tx_ip and name_of_mic_play = "")
# or to a sound card (use name_of_mic_play="hw:1" or other device).
#
# If mic samples are sent to a sound card for Tx, the samples are tuned to the audio
# transmit frequency, and are set to zero unless the key is down. You must set both
# microphone_name and name_of_mic_play even for CW. For softrock hardware, you usually
# capture radio samples and play Tx audio on one soundcard; and capture the mic and play radio
# sound on the other sound card at 48000 sps. For example:
# name_of_sound_capt = "hw:0" # high quality sound card at 48, 96, or 192 ksps
# name_of_sound_play = "hw:1" # lower quality sound card at 48 ksps
# microphone_name = name_of_sound_play
# name_of_mic_play = name_of_sound_capt
## lin_name_of_sound_play Play radio sound, text
# Name of device to play demodulated radio audio.
lin_name_of_sound_play = "hw:0"
## win_name_of_sound_play Play radio sound, text
# Name of device to play demodulated radio audio.
win_name_of_sound_play = "Primary"
## lin_name_of_sound_capt Capture audio samples, text
# Name of device to capture samples from an audio device.
lin_name_of_sound_capt = "hw:0"
## win_name_of_sound_capt Capture audio samples, text
# Name of device to capture samples from an audio device.
win_name_of_sound_capt = "Primary"
## sample_rate Sample rate, integer
# The sample rate when capturing samples from a sound card.
#sample_rate = 48000
#sample_rate = 96000
#sample_rate = 192000
# Microphone capture:
## lin_microphone_name Microphone name, text
# Name of microphone capture device (or "hw:1")
lin_microphone_name = ""
## win_microphone_name Microphone name, text
# Name of microphone capture device (or "hw:1")
win_microphone_name = ""
microphone_name = ''
## mic_sample_rate Mic sample rate, integer choice
# Microphone capture sample rate in Hertz, should be 48000, can be 8000
mic_sample_rate = 48000
#mic_sample_rate = 8000
## mic_channel_I Mic channel I, integer
# Soundcard index of mic capture audio channel
mic_channel_I = 0
## mic_channel_Q Mic channel Q, integer
# Soundcard index of ignored capture channel
mic_channel_Q = 0
## lin_name_of_mic_play Mic play name, text
# Tx audio samples sent to soundcard (SoftRock).
# Name of play device if Tx audio I/Q is sent to a sound card.
lin_name_of_mic_play = ""
## win_name_of_mic_play Mic play name, text
# Tx audio samples sent to soundcard (SoftRock).
# Name of play device if Tx audio I/Q is sent to a sound card.
win_name_of_mic_play = ""
name_of_mic_play = ""
## mic_playback_rate Mic playback rate, integer
# Playback rate must be a multiple 1, 2, ... of mic_sample_rate
mic_playback_rate = 48000
#mic_playback_rate = 24000
#mic_playback_rate = 96000
#mic_playback_rate = 192000
## mic_play_chan_I Mic play channel I, integer
# Soundcard index of Tx audio I play channel
mic_play_chan_I = 0
#mic_play_chan_I = 1
## mic_play_chan_Q Mic play channel Q, integer
# Soundcard index of Tx audio Q play channel
mic_play_chan_Q = 1
#mic_play_chan_Q = 0
## lin_digital_input_name Digital input name, text
# Input audio from an external program for use with digital modes. The input must be
# stereo at 48000 sps, and you must set mic_sample_rate to 48000 also.
lin_digital_input_name = ""
## win_digital_input_name Digital input name, text
# Input audio from an external program for use with digital modes. The input must be
# stereo at 48000 sps, and you must set mic_sample_rate to 48000 also.
win_digital_input_name = ""
digital_input_name = ""
## lin_digital_output_name Digital output name, text
# Output audio to an external program for use with digital modes. The output is
# stereo at the same sample rate as the radio sound playback.
lin_digital_output_name = ""
## win_digital_output_name Digital output name, text
# Output audio to an external program for use with digital modes. The output is
# stereo at the same sample rate as the radio sound playback.
win_digital_output_name = ""
digital_output_name = ""
## lin_digital_rx1_name Digital sub-receiver 1 output name, text
# Output audio to an external program for use with digital modes.
lin_digital_rx1_name = ""
## win_digital_rx1_name Digital sub-receiver 1 output name, text
# Output audio to an external program for use with digital modes.
win_digital_rx1_name = ""
digital_rx1_name = ""
## digital_output_level Digital output level, number
# This is the volume control 0.0 to 1.0 for digital playback to fldigi, etc.
# Changes are immediate (no need to restart).
digital_output_level = 0.7
# Sound card names:
#
# In PortAudio, soundcards have an index number 0, 1, 2, ... and a name.
# The name can be something like "HDA NVidia: AD198x Analog (hw:0,0)" or
# "surround41". In Quisk, all PortAudio device names start with "portaudio".
# A device name like "portaudio#6" directly specifies the index. A name like
# "portaudio:text" means to search for "text" in all available devices. And
# there is a default device "portaudiodefault". So these portaudio names are useful:
#name_of_sound_capt = "portaudio:(hw:0,0)" # First sound card
#name_of_sound_capt = "portaudio:(hw:1,0)" # Second sound card, etc.
#name_of_sound_capt = "portaudio#1" # Directly specified index
#name_of_sound_capt = "portaudiodefault" # May give poor performance on capture
#
# In ALSA, soundcards have these names. The "hw" devices are the raw
# hardware devices, and should be used for soundcard capture.
#name_of_sound_capt = "hw:0" # First sound card
#name_of_sound_capt = "hw:1" # Second sound card, etc.
#name_of_sound_capt = "plughw"
#name_of_sound_capt = "plughw:1"
#name_of_sound_capt = "default"
#
# It is usually best to use ALSA names because they provide minimum latency. But
# you may need to use PulseAudio to connect to other programs such as wsjt-x.
#
# Pulseaudio support was added by <NAME>. Many thanks!
# More pulse audio support was added by <NAME>, KM4DSJ. Many thanks!
#
# For PulseAudio devices, use the name "pulse:name" and connect the streams
# to your hardware devices using a PulseAudio control program. The name "pulse"
# alone refers to the "default" device. The PulseAudio names are quite long;
# for example "alsa_output.pci-0000_00_1b.0.analog-stereo". Look on the screen
# Config/Sound to see the device names. There is a description, a PulseAudio name,
# and for ALSA devices, the ALSA name. An example is:
#
# CM106 Like Sound Device Analog Stereo
# alsa_output.usb-0d8c_USB_Sound_Device-00-Device.analog-stereo
# USB Sound Device USB Audio (hw:1,0)
#
# Instead of the long PulseAudio name, you can enter a substring of any of
# these three strings.
#
# Use the default pulse device for radio sound:
#name_of_sound_play = "pulse"
# Use a PulseAudio name for radio sound:
#name_of_sound_play = "pulse:alsa_output.usb-0d8c_USB_Sound_Device-00-Device.analog-stereo"
# Abbreviate the PulseAudio name:
#name_of_sound_play = "pulse:alsa_output.usb"
# Another abbreviation:
#name_of_sound_play = "pulse:CM106"
#
# This controls whether the PulseAudio devices are shown in the device list.
# If you don't have PulseAudio, you must set this to False. Thanks to Simon, S54MI.
## lin_latency_millisecs Play latency msec, integer
# Play latency determines how many samples are in the radio sound play buffer.
# A larger number makes it less likely that you will run out of samples to play,
# but increases latency. It is OK to suffer a certain number of play buffer
# underruns in order to get lower latency.
lin_latency_millisecs = 150
#lin_latency_millisecs = 50
#lin_latency_millisecs = 100
#lin_latency_millisecs = 250
## win_latency_millisecs Play latency msec, integer
# Play latency determines how many samples are in the radio sound play buffer.
# A larger number makes it less likely that you will run out of samples to play,
# but increases latency. It is OK to suffer a certain number of play buffer
# underruns in order to get lower latency.
win_latency_millisecs = 150
#win_latency_millisecs = 50
#win_latency_millisecs = 100
#win_latency_millisecs = 250
latency_millisecs = 150
# If False, no list of PulseAudio devices is available.
# show_pulse_audio_devices Show PulseAudio, boolean
# This controls whether PulseAudio devices are shown in the list of sound devices.
show_pulse_audio_devices = True
#show_pulse_audio_devices = False
################ Options
## max_record_minutes Max minutes record time, number
# Quisk has record and playback buttons to save radio sound. If there is no more room for
# sound, the old sound is discarded and the most recent sound is retained. This controls
# the maximum time of sound storage in minutes for this recorded audio, and also the record
# time for the Tx Audio test screen. If you want to transmit recorded sound, then mic_sample_rate
# must equal playback_rate and both must be 48000.
max_record_minutes = 1.00
# Quisk can save radio sound and samples to files, and can play recorded sound. There is a button on the
# Config/Config screen to set the file names. You can set the initial names with these variables:
file_name_audio = ""
#file_name_audio = "/home/jim/tmp/qaudio.wav"
file_name_samples = ""
#file_name_samples = "C:/tmp/qsamples.wav"
# The file for playback must be 48 ksps, 16-bit, one channel (monophonic); the same as the mic input. When
# you play a file, the PTT button (if any) is pushed. There is a control to repeat the playback. This
# feature is intended to transmit a "CQ CQ" message, for example, during a contest.
file_name_playback = ""
#file_name_playback = "/home/jim/sounds/cqcq_contest.wav"
## do_repeater_offset Use repeater offset, boolean
# Quisk can implement the frequency shift needed for repeaters. If the repeater frequency
# is on the favorites screen, and you tune close (500 Hz) to that frequency, and there
# is an entry in the "offset" column, and the mode is FM, and do_repeater_offset is True,
# then Quisk will shift the Tx frequency by the offset when transmitting. Your hardware
# file must define the method RepeaterOffset(self, offset=None).
do_repeater_offset = False
#do_repeater_offset = True
## correct_smeter S-meter correction in S units, number
# This converts from dB to S-units for the S-meter (it is in S-units).
correct_smeter = 15.5
#correct_smeter = 7.7
#correct_smeter = 21.6
## agc_max_gain Maximum AGC gain, number
# There is a button to turn AGC on or off,
# but AGC still limits the peak amplitude to avoid clipping even if it is off.
# Right click the AGC button to show the adjustment slider. If the slider is at maximum,
# all signals will have the same (maximum) amplitude. For lower values, weak signals
# will be somewhat less loud than strong signals; that is, some variation in signal
# amplitude remains.
# agc_max_gain controls the maximum AGC gain and thus the scale of the AGC slider control. If
# it is too high, all signals reach the same amplitude at much less than 100% slider.
# If it is too low, then all signals fail to have the same amplitude even at 100%. But
# the value is not critical, because you can adjust the slider a bit more.
agc_max_gain = 15000.0
#agc_max_gain = 10000.0
#agc_max_gain = 20000.0
## agc_release_time AGC release time in seconds, number
# This is the AGC release time in seconds. It must be greater than zero. It is the time
# constant for gain recovery after a strong signal disappears.
agc_release_time = 1.0
#agc_release_time = 2.0
#agc_release_time = 0.5
## freq_spacing Frequency rounding spacing, integer
# If freq_spacing is not zero, frequencies are rounded to the freq_base plus the
# freq_spacing; frequency = freq_base + N * freq_spacing. This is useful at
# VHF and higher when Quisk is used with a transverter.
# This option is incompatible with "Frequency round for SSB".
freq_spacing = 0
#freq_spacing = 25000
#freq_spacing = 15000
## freq_round_ssb Frequency round for SSB, integer
# If freq_round_ssb is not zero, when the left mouse button is clicked
# the frequency is rounded for voice modes but not for CW. Mouse wheel etc. are unaffected.
# This is useful for HF when many SSB, AM etc. stations are at multiples of 500 or 1000 Hertz.
# This option is incompatible with "Frequency rounding spacing".
freq_round_ssb = 0
#freq_round_ssb = 1000
## freq_base Frequency rounding base, integer
# If freq_spacing is not zero, frequencies are rounded to the freq_base plus the
# freq_spacing; frequency = freq_base + N * freq_spacing. This is useful at
# VHF and higher when Quisk is used with a transverter.
# This option is incompatible with "Frequency round for SSB".
freq_base = 0
#freq_base = 12500
## cwTone CW tone frequency in Hertz, integer
# This is the CW tone frequency in Hertz.
cwTone = 600
#cwTone = 400
#cwTone = 800
## invertSpectrum Invert the RF spectrum, integer choice
# If your mixing scheme inverts the RF spectrum, set this option to un-invert it.
invertSpectrum = 0 # Do not invert
#invertSpectrum = 1 # Invert spectrum
# This is a list of mixer settings. It only works for Linux; it has no effect in Windows.
# Use "amixer -c 1 contents" to get a list of mixer controls and their numid's for
# card 1 (or "-c 0" for card 0). Then make a list of (device_name, numid, value)
# for each control you need to set. For a decimal fraction, use a Python float; for example,
# use "1.0", not the integer "1".
#mixer_settings = [
# ("hw:1", 2, 0.80), # numid of microphone volume control, volume 0.0 to 1.0;
# ("hw:1", 1, 1) # numid of capture on/off control, turn on with 1;
# ]
## modulation_index FM modulation index, number
# For FM transmit, this is the modulation index.
modulation_index = 1.67
## pulse_audio_verbose_output Debug PortAudio, integer choice
# Use 1 to turn on PulseAudio debug and status messages. This allows for debugging of both devices and performance.
pulse_audio_verbose_output = 0
#pulse_audio_verbose_output = 1
## favorites_file_path Path to favorites file, text
# The quisk config screen has a "favorites" tab where you can enter the frequencies and modes of
# stations. The data is stored in this file. If this is blank, the default is the file
# quisk_favorites.txt in the directory where your config file is located.
favorites_file_path = ''
## reverse_tx_sideband Reverse Tx sideband, integer
# Set to 1 if you want to reverse the sideband when transmitting.
# For example, to receive on LSB but transmit on USB. This may be necessary for satellite operation
# depending on the mixing scheme.
# Changes are immediate (no need to restart).
reverse_tx_sideband = 0
#reverse_tx_sideband = 1
## dc_remove_bw DC remove bandwidth, integer
# This is the 3 dB bandwidth of the filter centered at zero Hertz that is used to remove DC bias.
# Choose a bandwidth that suppresses DC and low frequency noise.
# Enter 1 to select a different filter based on block removal.
# Enter zero to disable the filter.
# Changes are immediate (no need to restart).
#dc_remove_bw = 0
#dc_remove_bw = 1
#dc_remove_bw = 20
#dc_remove_bw = 50
dc_remove_bw = 100
#dc_remove_bw = 200
#dc_remove_bw = 400
################ Remote
# DX cluster telent login data, thanks to DJ4CM. Must have station_display_lines > 0.
## dxClHost Dx cluster host name, text
# The Dx cluster options log into a Dx cluster server, and put station information
# on the station window under the graph and waterfall screens.
# dxClHost is the telnet host name.
dxClHost = ''
#dxClHost = 'example.host.net'
## dxClPort Dx cluster port number, integer
# The Dx cluster options log into a Dx cluster server, and put station information
# on the station window under the graph and waterfall screens.
# dxClPort is the telnet port number.
dxClPort = 7373
## user_call_sign Call sign for Dx cluster, text
# The Dx cluster options log into a Dx cluster server, and put station information
# on the station window under the graph and waterfall screens.
# user_call_sign is your call sign which may be needed for login.
user_call_sign = ''
## dxClPassword Password for Dx cluster, text
# The Dx cluster options log into a Dx cluster server, and put station information
# on the station window under the graph and waterfall screens.
# dxClPassword is the telnet password for the server.
dxClPassword = ''
#dxClPassword = '<PASSWORD>'
## dxClExpireTime Dx cluster expire minutes, integer
# The Dx cluster options log into a Dx cluster server, and put station information
# on the station window under the graph and waterfall screens.
# dxClExpireTime is the time in minutes until DX Cluster entries are removed.
dxClExpireTime = 20
## IQ_Server_IP Pulse server IP address, text
#IP Adddress for remote PulseAudio IQ server.
IQ_Server_IP = ""
## hamlib_ip IP address for Hamlib Rig 2, text
# You can control Quisk from Hamlib. Set the Hamlib rig to 2 and the device for rig 2 to
# localhost:4575. Or choose a different name and port here. Set the same name and port
# in the controlling program.
# hamlib_ip is the IP name or address.
hamlib_ip = "localhost"
## hamlib_port IP port for Hamlib, integer
# You can control Quisk from Hamlib. For direct control, set the external program to rig 2
# "Hamlib NET rigctl", and set the Quisk hamlib port to 4532. To use the rigctld program to control
# Quisk, set the Quisk hamlib port to 4575. To turn off Hamlib control, set the Quisk port to zero.
#hamlib_port = 4575
hamlib_port = 4532
#hamlib_port = 0
## digital_xmlrpc_url URL for control by XML-RPC, text
# This option is used by the digital modes that send audio to an external
# program, and receive audio to transmit. Set Fldigi to upper sideband, XML-RPC control.
digital_xmlrpc_url = "http://localhost:7362"
#digital_xmlrpc_url = ""
## lin_hamlib_com1_name CAT serial port name, text
# Enter a name to create a serial port so that an external program like N1MM+ or WSJT-X can
# control Quisk. Then enter that name into the other program and specify a radio of type "Flex". This is addition to the
# "Hamlib NET rigctl" mechanism which is based on a network connection. Leave this blank
# to turn off the serial port. The port settings are 9600 baud, 8 bits of data, no parity and one stop bit,
# although other settings are OK too.
# On Linux, the serial port names are of the form "/tmp/QuiskTTYx"
# where "x" is 0, 1, 2, etc. Quisk will create the serial port when it starts.
lin_hamlib_com1_name = ""
#lin_hamlib_com1_name = "/tmp/QuiskTTY0"
## lin_hamlib_com2_name CAT serial-2 name, text
# This is a second serial port for external control of Quisk. Use a different serial port name.
lin_hamlib_com2_name = ""
#lin_hamlib_com2_name = "/tmp/QuiskTTY1"
## win_hamlib_com1_name CAT serial port name, text
# Enter the name of the serial port that Quisk uses to connect to an external program like N1MM+ or WSJT-X.
# You must first create a pair of virtual serial ports with a program like vspMgr or HHD Software.
# Then enter the second name into the other program and specify a radio of type "Flex". This control method is in addition to the
# "Hamlib NET rigctl" mechanism which is based on a network connection. Leave this blank
# to turn off the serial port. The port settings are 9600 baud, 8 bits of data, no parity and one stop bit.
win_hamlib_com1_name = ""
#win_hamlib_com1_name = "COM5"
#win_hamlib_com1_name = "COM6"
## win_hamlib_com2_name CAT serial-2 name, text
# This is a second serial port for external control of Quisk. Use a different serial port pair.
win_hamlib_com2_name = ""
#win_hamlib_com2_name = "COM15"
#win_hamlib_com2_name = "COM16"
hamlib_com1_name = ""
hamlib_com2_name = ""
################ Keys
## hot_key_ptt1 PTT Key 1, keycode
# Set a keyboard shortcut that will press the PTT button.
# For a regular key, use the ord() of the key. For example, ord('a') or ord('b'). For the space bar
# use ord(' '). Then restart Quisk.
# If you do not want a hot key, set this to None.
# Do not choose a key that interferes with other features
# on your system such as system menus.
hot_key_ptt1 = None
#hot_key_ptt1 = ord(' ')
#hot_key_ptt1 = ord('z')
#hot_key_ptt1 = ord('a')
#hot_key_ptt1 = wx.WXK_F5
## hot_key_ptt2 PTT Key 2, keycode
# If the Control or Shift key must be pressed too, set that key modifier here.
# Otherwise, set NORMAL here.
# For example, if you want control-A, set CTRL in "PTT Key 2", and ord('a') in "PTT Key 1".
hot_key_ptt2 = wx.ACCEL_NORMAL
#hot_key_ptt2 = wx.ACCEL_CTRL
#hot_key_ptt2 = wx.ACCEL_SHIFT
#hot_key_ptt2 = wx.ACCEL_CTRL | wx.ACCEL_SHIFT
#hot_key_ptt2 = wx.ACCEL_ALT
## hot_key_ptt_toggle PTT Key Toggle, boolean
# Set to True if you want PTT to remain on when you release the key. A second key press will
# then release PTT. This is toggle mode. If False, you must keep pressing the key, and releasing
# it will release PTT.
# Changes are immediate (no need to restart).
hot_key_ptt_toggle = False
#hot_key_ptt_toggle = True
## hot_key_ptt_if_hidden PTT Key if Hidden, boolean
# Set to True if you want PTT to be active when the Quisk window is not visible.
# Otherwise, the Quisk window must be active and on top.
hot_key_ptt_if_hidden = False
#hot_key_ptt_if_hidden = True
################ Windows
# Station info display configuration, thanks to DJ4CM. This displays a window of station names
# below the graph frequency (X axis).
## station_display_lines Number of station lines, integer
# The number of station info display lines below the graph X axis.
station_display_lines = 1
#station_display_lines = 0
#station_display_lines = 3
## display_fraction Display fraction, number
# This is the fraction of spectrum to display from zero to one. It causes the edges
# of the display to be suppressed. For example, 0.85 displays the central 85% of the spectrum.
display_fraction = 1.00
## default_screen Startup screen, text choice
# Select the default screen when Quisk starts.
default_screen = 'Graph'
#default_screen = 'WFall'
#default_screen = 'Config'
## graph_width Startup graph width, number
# The width of the graph data as a fraction of the total screen size. This
# controls the width of the Quisk window, but
# will be adjusted by Quisk to accommodate preferred FFT sizes.
# It can not be made too small because
# of the space needed for all the buttons.
graph_width = 0.8
## window_width Window width pixels, integer
# The use of startup graph width provides an optimal size for PC screens. But when running
# full screen, for example, on a tablet screen or a dedicated display, greater control
# is required. These options exactly set the Quisk window geometry. When window pixel width
# is used, graph width is ignored. You may need to reduce button_font_size. Use -1
# to ignore this feature, and use graph width.
window_width = -1
#window_width = 640
## window_height Window height pixels, integer
# The use of startup graph width provides an optimal size for PC screens. But when running
# full screen, for example, on a tablet screen or a dedicated display, greater control
# is required. These options exactly set the Quisk window geometry. When window pixel width
# is used, graph width is ignored. You may need to reduce button_font_size. Use -1
# to ignore this feature, and use graph width.
window_height = -1
#window_height = 480
## window_posX Window X position, integer
# The use of startup graph width provides an optimal size for PC screens. But when running
# full screen, for example, on a tablet screen or a dedicated display, greater control
# is required. These options exactly set the Quisk window geometry. When window pixel width
# is used, graph width is ignored. You may need to reduce button_font_size. Use -1
# to ignore this feature, and use graph width.
window_posX = -1
#window_posX = 0
## window_posY Window Y position, integer
# The use of startup graph width provides an optimal size for PC screens. But when running
# full screen, for example, on a tablet screen or a dedicated display, greater control
# is required. These options exactly set the Quisk window geometry. When window pixel width
# is used, graph width is ignored. You may need to reduce button_font_size. Use -1
# to ignore this feature, and use graph width.
window_posY = -1
#window_posY = 0
## button_layout Button layout, text choice
# This option controls how many buttons are displayed on the screen. The large screen
# layout is meant for a PC. The small screen layout is meant for small touch screens, and
# small screens used in embedded systems.
button_layout = 'Large screen'
#button_layout = 'Small screen'
# These are the initial values for the Y-scale and Y-zero sliders for each screen.
# The sliders go from zero to 160.
graph_y_scale = 100
graph_y_zero = 0
waterfall_y_scale = 80 # Initial value; new values are saved for each band
waterfall_y_zero = 40 # Initial value; new values are saved for each band
waterfall_graph_y_scale = 100
waterfall_graph_y_zero = 60
scope_y_scale = 80
scope_y_zero = 0 # Currently doesn't do anything
filter_y_scale = 90
filter_y_zero = 0
# Select the way the waterfall screen scrolls:
# waterfall_scroll_mode = 0 # scroll at a constant rate.
waterfall_scroll_mode = 1 # scroll faster at the top so that a new signal appears sooner.
# Select the initial size in pixels (minimum 1) of the graph at the top of the waterfall.
waterfall_graph_size = 80
# Quisk saves radio settings in a settings file. The default directory is the same as the config
# file, and the file name is quisk_settings.json. You can set a different name here. If you dual
# boot Windows and Linux, you can set the same path in your Windows and Linux config files, so that
# settings are shared. Even if Windows and Linux settings are shared, the sound device names and a
# few other settings are kept separate.
settings_file_path = ''
#settings_file_path = /path/to/my/file/quisk_settings.json
################ Timing
## lin_data_poll_usec Hardware poll usecs, integer
# Quisk polls the hardware for samples at intervals. This is the poll time in microseconds.
# A lower time reduces latency. A higher time is less taxing on the hardware.
#lin_data_poll_usec = 5000
#lin_data_poll_usec = 10000
#lin_data_poll_usec = 15000
#lin_data_poll_usec = 20000
## win_data_poll_usec Hardware poll usecs, integer
# Quisk polls the hardware for samples at intervals. This is the poll time in microseconds.
# A lower time reduces latency. A higher time is less taxing on the hardware.
#win_data_poll_usec = 15000
#win_data_poll_usec = 5000
#win_data_poll_usec = 10000
#win_data_poll_usec = 20000
if sys.platform == "win32":
data_poll_usec = 20000 # poll time in microseconds
else:
data_poll_usec = 5000 # poll time in microseconds
## keyupDelay Keyup delay msecs, integer
# For the Hermes protocol including the Hermes-Lite2, this is the key-up hang time,
# the time in milliseconds 0 to 1023 to hold the T/R relay after the CW key goes up
# or the PTT button goes up. For all
# hardware, it adds a silent period to the audio after key up.
# A large key up delay may be needed to accomodate
# antenna switching or other requirements of your hardware.
# Changes are immediate (no need to restart).
keyupDelay = 23
## fft_size_multiplier FFT size multiplier, integer
# The fft_size is the width of the data on the screen (about 800 to
# 1200 pixels) times the fft_size_multiplier. Multiple FFTs are averaged
# together to achieve your graph refresh rate. If fft_size_multiplier is
# too small you will get many fft errors. You can specify fft_size_multiplier,
# or enter a large number (use 9999) to maximize it, or enter zero to let
# quisk calculate it for you.
# Your fft_size_multiplier should have many small factors. Avoid 7 and 13, and
# use 8 or 12 instead.
# If your hardware can change the decimation, there are further compilcations.
# The FFT size is fixed, and only the average count can change to adjust the
# refresh rate.
fft_size_multiplier = 0
## graph_refresh Graph refresh Hertz, integer
# The graph_refresh is the frequency at which the graph is updated,
# and should be about 5 to 10 Hertz. Higher rates require more processor power.
graph_refresh = 7
################ Controls
## graph_peak_hold_1 Graph peak hold 1, number
# This controls the speed of the graph peak hold for the two settings
# of the Graph button. Lower numbers give a longer time constant.
graph_peak_hold_1 = 0.25
## graph_peak_hold_2 Graph peak hold 2, number
# This controls the speed of the graph peak hold for the two settings
# of the Graph button. Lower numbers give a longer time constant.
graph_peak_hold_2 = 0.10
## use_sidetone Use sidetone, integer choice
# This controls whether Quisk will display a sidetone volume control "Sto",
# and whether Quisk will gererate a CW sidetone.
use_sidetone = 0
#use_sidetone = 1
## add_imd_button Add IMD button, integer choice
# If you want Quisk to add a button to generate a 2-tone IMD test signal,
# set this to 1.
add_imd_button = 0
#add_imd_button = 1
## add_extern_demod Add ext demod button, text
# If you want to write your own I/Q filter and demodulation module, set
# this to the name of the button to add, and change extdemod.c.
add_extern_demod = ""
#add_extern_demod = "WFM"
## add_fdx_button Add FDX button, integer choice
# If you want Quisk to add a full duplex button (transmit and receive at the
# same time), set this to 1.
add_fdx_button = 0
#add_fdx_button = 1
## add_freedv_button Add FreeDv button, integer choice
# These buttons add up to two additional mode buttons after CW, USB, etc.
# Set this to add the FDV mode button for digital voice:
add_freedv_button = 1
#add_freedv_button = 0
## freedv_tx_msg FreeDv Tx message, text
# For freedv, this is the text message to send.
freedv_tx_msg = ''
#freedv_tx_msg = 'N2XXX Jim, New Jersey, USA \n'
# This is the list of FreeDV modes and their index number. The starting mode is the first listed.
freedv_modes = (('Mode 1600', 0), ('Mode 700', 1), ('Mode 700B', 2),
# ('Mode 2400A', 3), ('Mode 2400B', 4), ('Mode 800XA', 5),
('Mode 700C', 6), ('Mode 700D', 7), ('Future8', 8), ('Future9', 9))
# These are the filter bandwidths for each mode. Quisk has built-in optimized filters
# for these values, but you can change them if you want.
FilterBwCW = (200, 400, 600, 1000, 1500, 3000)
FilterBwSSB = (2000, 2200, 2500, 2800, 3000, 3300)
FilterBwAM = (4000, 5000, 6000, 8000, 10000, 9000)
FilterBwFM = (8000, 10000, 12000, 16000, 18000, 20000)
FilterBwIMD = FilterBwSSB
FilterBwDGT = (200, 400, 1500, 3200, 4800, 10000)
FilterBwEXT = (8000, 10000, 12000, 15000, 17000, 20000)
FilterBwFDV = (1500, 2000, 3000, '', '', '')
# If your hardware file defines the method OnButtonPTT(self, event), then Quisk will
# display a PTT button you can press. The method must switch your hardware to
# transmit somehow, for example, by setting a serial port pin to high.
## spot_button_keys_tx Key Tx on Spot, boolean
# If you want the Spot button to key the transmitter immediately when you press it, set this option.
# Your hardware must have a working PTT button for this to work.
spot_button_keys_tx = True
#spot_button_keys_tx = False
# Thanks to Christof, DJ4CM, for button fonts.
################ Fonts
## button_font_size Button font size, integer
# If the Quisk screen is too wide or the buttons are too crowded, perhaps due to a low screen
# resolution, you can reduce the font sizes.
button_font_size = 10
#button_font_size = 9
#button_font_size = 8
## default_font_size Default font size, integer
# These control the font size on the named screen.
default_font_size = 12
## status_font_size Status font size, integer
# These control the font size on the named screen.
status_font_size = 14
## config_font_size Config font size, integer
# These control the font size on the named screen.
config_font_size = 14
## graph_font_size Graph font size, integer
# These control the font size on the named screen.
graph_font_size = 10
## graph_msg_font_size Graph message font size, integer
# These control the font size on the named screen.
graph_msg_font_size = 14
## favorites_font_size Favorites font size, integer
# These control the font size on the named screen.
favorites_font_size = 14
## lin_quisk_typeface Typeface, text
# This controls the typeface used in fonts. The objective is to choose an available font that
# offers good support for the Unicode characters used on buttons and windows.
#lin_quisk_typeface = ''
## win_quisk_typeface Typeface, text
# This controls the typeface used in fonts. The objective is to choose an available font that
# offers good support for the Unicode characters used on buttons and windows.
#win_quisk_typeface = 'Lucida Sans Unicode'
#win_quisk_typeface = 'Arial Unicode MS'
if sys.platform == "win32":
quisk_typeface = 'Lucida Sans Unicode'
#quisk_typeface = 'Arial Unicode MS'
else:
quisk_typeface = ''
## use_unicode_symbols Use Unicode symbols, boolean
# This controls whether the "U" unicode symbols or the "T" text symbols are used on buttons and windows.
# You can change the "U" and "T" symbols to anything you want in your config file.
use_unicode_symbols = True
#use_unicode_symbols = False
# These are the Unicode symbols used in the station window. Thanks to Christof, DJ4CM.
Usym_stat_fav = unichr(0x2605) # Symbol for favorites, a star
Usym_stat_mem = unichr(0x24C2) # Symbol for memory stations, an "M" in a circle
#Usym_stat_dx = unichr(0x2691) # Symbol for DX Cluster stations, a flag
Usym_stat_dx = unichr(0x25B2) # Symbol for DX Cluster stations, a Delta
# These are the text symbols used in the station window.
Tsym_stat_fav = 'F'
Tsym_stat_mem = 'M'
Tsym_stat_dx = 'Dx'
#
# These are the Unicode symbols to display on buttons. Thanks to Christof, DJ4CM.
Ubtn_text_range_dn = unichr(0x2B07) # Down band, left arrow
Ubtn_text_range_up = unichr(0x2B06) # Up band, right arrow
Ubtn_text_play = unichr(0x25BA) # Play button
Ubtn_text_rec = unichr(0x25CF) # Record button, a filled dot
Ubtn_text_file_rec = "File " + unichr(0x25CF) # Record to file
Ubtn_text_file_play = "File " + unichr(0x25BA) # Play from file
Ubtn_text_fav_add = unichr(0x2605) + unichr(0x2191) # Add to favorites
Ubtn_text_fav_recall = unichr(0x2605) + unichr(0x2193) # Jump to favorites screen
Ubtn_text_mem_add = unichr(0x24C2) + unichr(0x2191) # Add to memory
Ubtn_text_mem_next = unichr(0x24C2) + unichr(0x27B2) # Next memory
Ubtn_text_mem_del = unichr(0x24C2) + unichr(0x2613) # Delete from memory
# These are the text symbols to display on buttons.
Tbtn_text_range_dn = "Dn"
Tbtn_text_range_up = "Up"
Tbtn_text_play = "Tmp Play"
Tbtn_text_rec = "Tmp Rec"
Tbtn_text_file_rec = "File Rec"
Tbtn_text_file_play = "File Play"
Tbtn_text_fav_add = ">Fav"
Tbtn_text_fav_recall = "Fav"
Tbtn_text_mem_add = "Save"
Tbtn_text_mem_next = "Next"
Tbtn_text_mem_del = "Del"
## decorate_buttons Decorate buttons, boolean
# This controls whether to add the button decorations that mark cycle and adjust buttons.
decorate_buttons = True
#decorate_buttons = False
btn_text_cycle = unichr(0x21B7) # Character to display on multi-push buttons
btn_text_cycle_small = unichr(0x2193) # Smaller version when there is little space
btn_text_switch = unichr(0x21C4) # Character to switch left-right
## color_scheme Color scheme, text choice
# This controls the color scheme used by Quisk. The default color scheme is A, and you can change this scheme
# in your config file. Other color schemes are available here.
color_scheme = 'A'
#color_scheme = 'B'
#color_scheme = 'C'
## waterfall_palette Waterfall colors, text choice
# This controls the colors used in the waterfall. The default color scheme is A, and you can change this scheme
# in your config file. Other color schemes are available here.
waterfall_palette = 'A'
#waterfall_palette = 'B'
#waterfall_palette = 'C'
################ Colors
# Thanks to <NAME>, KB8RWQ for the patch adding additional color control.
# Thanks to Christof, DJ4CM for the patch adding additional color control.
# Define colors used by all widgets in wxPython colour format.
# This is the default color scheme, color scheme A. You can change these colors in your config file:
color_bg = 'light steel blue' # Lower screen background
color_bg_txt = 'black' # Lower screen text color
color_graph = 'lemonchiffon1' # Graph background
color_config2 = 'lemonchiffon3' # color in tab row of config screen
color_gl = 'grey' # Lines on the graph
color_graphticks = 'black' # Graph ticks
color_graphline = '#005500' # graph data line color
color_graphlabels = '#555555' # graph label color
color_btn = 'steelblue2' # button color
color_check_btn = 'yellow2' # color of a check button when it is checked
color_cycle_btn = 'goldenrod3' # color of a cycle button when it is checked
color_adjust_btn = 'orange3' # color of an adjustable button when it is checked
color_test = 'hot pink' # color of a button used for test (turn off for tx)
color_freq = 'lightcyan1' # background color of frequency and s-meter
color_freq_txt = 'black' # text color of frequency display
color_entry = color_freq # frequency entry box
color_entry_txt = 'black' # text color of entry box
color_enable = 'black' # text color for an enabled button
color_disable = 'white' # text color for a disabled button
color_popchoice = 'maroon' # text color for button that pops up a row of buttons
color_bandwidth = 'lemonchiffon3' # color for bandwidth display; thanks to WB4JFI
color_txline = 'red' # vertical line color for tx in graph
color_rxline = 'green' # vertical line color for rx in graph
color_graph_msg_fg = 'black' # text messages on the graph screen
color_graph_msg_bg = 'lemonchiffon2' # background of text messages on the graph screen
# This color scheme B, a dark color scheme designed by <NAME>, KB8RWQ.
# Additional colors added by N2ADR.
color_scheme_B = {
'color_bg' : '#111111',
'color_bg_txt' : 'white',
'color_graph' : '#111111',
'color_config2' : '#111111',
'color_gl' : '#555555',
'color_graphticks' : '#DDDDDD',
'color_graphline' : '#00AA00',
'color_graphlabels' : '#FFFFFF',
'color_btn' : '#666666',
'color_check_btn' : '#996699',
'color_cycle_btn' : '#666699',
'color_adjust_btn' : '#669999',
'color_test' : 'hot pink',
'color_freq' : '#333333',
'color_freq_txt' : 'white',
'color_entry' : '#333333',
'color_entry_txt' : 'white',
'color_enable' : 'white',
'color_disable' : 'black',
'color_popchoice' : 'maroon',
'color_bandwidth' : '#333333',
'color_txline' : 'red',
'color_rxline' : 'green',
'color_graph_msg_fg' : 'white',
'color_graph_msg_bg' : '#111111',
}
# This is color scheme C:
#######################################################################################
#
# Color scheme designed by Sergio, IK8HTM. 04/06/2016
# '#red red green green blue blue' x00 to xFF
# '#FFFFFF' = white
# '#000000' = black
#
#######################################################################################
color_scheme_C = {
'color_bg' : '#123456',
'color_bg_txt' : '#FFFFFF',
'color_graph' : 'lightcyan3',
'color_config2' : '#0000FF',
'color_gl' : '#555555',
'color_graphticks' : '#DDDDDD',
'color_graphline' : '#00AA00',
'color_graphlabels' : '#000000',
'color_btn' : '#223344',
'color_check_btn' : '#A07315',
'color_cycle_btn' : '#0031C4',
'color_adjust_btn' : '#669999',
'color_test' : '#E73EE7',
'color_freq' : '#333333',
'color_freq_txt' : '#FEF80A',
'color_entry' : '#333333',
'color_entry_txt' : '#FEF80A',
'color_enable' : '#FFFFFF',
'color_disable' : '#000000',
'color_popchoice' : '#D76B00',
'color_bandwidth' : 'lemonchiffon1',
'color_txline' : '#FF0000',
'color_rxline' : '#3CC918',
'color_graph_msg_fg' : '#000000',
'color_graph_msg_bg' : 'lemonchiffon2',
}
#############################################################################################
# These are the palettes for the waterfall. The one used is named waterfallPallette,
# so to use a different one, overwrite this name in your configuration file.
waterfallPalette = (
( 0, 0, 0, 0),
( 36, 85, 0, 255),
( 73, 153, 0, 255),
(109, 255, 0, 128),
(146, 255, 119, 0),
(182, 85, 255, 100),
(219, 255, 255, 0),
(255, 255, 255, 255)
)
digipanWaterfallPalette = (
( 0, 0, 0, 0),
( 32, 0, 0, 62),
( 64, 0, 0, 126),
( 96, 145, 142, 96),
(128, 181, 184, 48),
(160, 223, 226, 105),
(192, 254, 254, 4),
(255, 255, 58, 0)
)
waterfallPaletteB = ( # from <NAME>
(0, 0, 0, 0),
(13, 0, 14, 14),
(26, 0, 40, 40),
(39, 0, 73, 73),
(43, 0, 94, 94),
(56, 0, 115, 121),
(69, 0, 87, 190),
(72, 0, 110, 252),
(85, 0, 166, 252),
(98, 0, 216, 252),
(112, 0, 247, 234),
(125, 2, 255, 124),
(138, 5, 255, 64),
(151, 154, 255, 0),
(164, 219, 255, 0),
(177, 247, 250, 0),
(190, 254, 233, 0),
(214, 254, 185, 0),
(227, 255, 125, 0),
(241, 255, 59, 0),
(255, 255, 0, 0)
)
waterfallPaletteC = ( # from <NAME>
(0, 0, 0, 0),
(32, 0, 25, 25),
(64, 6, 58, 41),
(96, 16, 78, 43),
(128, 29, 120, 41),
(160, 51, 144, 35),
(192, 116, 141, 43),
(224, 195, 198, 35),
(255, 245, 99, 3)
)
# This is the data used to draw colored lines on the frequency X axis to
# indicate CW and Phone sub-bands. You can make it anything you want.
# These are the colors used for sub-bands:
CW = '#FF4444' # General class CW
eCW = '#FF8888' # Extra class CW
Phone = '#4444FF' # General class phone
ePhone = '#8888FF' # Extra class phone
# ARRL band plan special frequencies
Data = '#FF9900'
DxData = '#CC6600'
RTTY = '#FF9900'
SSTV = '#FFFF00'
AM = '#00FF00'
Packet = '#00FFFF'
Beacons = '#66FF66'
Satellite = '#22AA88'
Repeater = '#AA00FF' # Repeater outputs
RepInput = '#AA88FF' # Repeater inputs
Simplex = '#00FF44'
Special = 'hot pink'
Other = '#888888'
# Colors start at the indicated frequency and continue until the
# next frequency. The special color "None" turns off color.
#
################ Bands
# Band plans vary by country, so they can be changed here.
# To change BandPlan in your config file, first remove any frequencies in the range
# you want to change; then add your frequencies; and then sort the list. Or you could just
# replace the whole list.
# These are the suppressed carrier frequencies for 60 meters
freq60 = (5330500, 5346500, 5357000, 5371500, 5403500)
# Band plan
BandPlan = [
# Test display of colors
#[ 0, CW], [ 50000, eCW], [ 100000, Phone], [ 150000, ePhone], [ 200000, Data], [ 250000, DxData], [ 300000, RTTY], [ 350000, SSTV],
#[ 400000, AM], [ 450000, Packet], [ 500000, Beacons], [ 550000, Satellite], [ 600000, Repeater], [ 650000, RepInput], [ 700000, Simplex],
#[ 750000, Other], [ 800000, Special], [ 850000, None],
# 137k
[ 130000, Data],
[ 150000, None],
# 500k
[ 490000, Data],
[ 510000, None],
# 160 meters
[ 1800000, Data],
[ 1809000, Other],
[ 1811000, CW],
[ 1843000, Phone],
[ 1908000, Other],
[ 1912000, Phone],
[ 1995000, Other],
[ 2000000, None],
# 80 meters
[ 3500000, eCW],
[ 3525000, CW],
[ 3570000, Data],
[ 3589000, DxData],
[ 3591000, Data],
[ 3600000, ePhone],
[ 3790000, Other],
[ 3800000, Phone],
[ 3844000, SSTV],
[ 3846000, Phone],
[ 3880000, AM],
[ 3890000, Phone],
[ 4000000, None],
# 60 meters
[ freq60[0], Phone],
[ freq60[0] + 2800, None],
[ freq60[1], Phone],
[ freq60[1] + 2800, None],
[ freq60[2], Phone],
[ freq60[2] + 2800, None],
[ freq60[3], Phone],
[ freq60[3] + 2800, None],
[ freq60[4], Phone],
[ freq60[4] + 2800, None],
# 40 meters
[ 7000000, eCW],
[ 7025000, CW],
[ 7039000, DxData],
[ 7041000, CW],
[ 7080000, Data],
[ 7125000, ePhone],
[ 7170000, SSTV],
[ 7172000, ePhone],
[ 7175000, Phone],
[ 7285000, AM],
[ 7295000, Phone],
[ 7300000, None],
# 30 meters
[10100000, CW],
[10130000, RTTY],
[10140000, Packet],
[10150000, None],
# 20 meters
[14000000, eCW],
[14025000, CW],
[14070000, RTTY],
[14095000, Packet],
[14099500, Other],
[14100500, Packet],
[14112000, CW],
[14150000, ePhone],
[14225000, Phone],
[14229000, SSTV],
[14231000, Phone],
[14281000, AM],
[14291000, Phone],
[14350000, None],
# 17 meters
[18068000, CW],
[18100000, RTTY],
[18105000, Packet],
[18110000, Phone],
[18168000, None],
# 15 meters
[21000000, eCW],
[21025000, CW],
[21070000, RTTY],
[21110000, CW],
[21200000, ePhone],
[21275000, Phone],
[21339000, SSTV],
[21341000, Phone],
[21450000, None],
# 12 meters
[24890000, CW],
[24920000, RTTY],
[24925000, Packet],
[24930000, Phone],
[24990000, None],
# 10 meters
[28000000, CW],
[28070000, RTTY],
[28150000, CW],
[28200000, Beacons],
[28300000, Phone],
[28679000, SSTV],
[28681000, Phone],
[29000000, AM],
[29200000, Phone],
[29300000, Satellite],
[29520000, Repeater],
[29590000, Simplex],
[29610000, Repeater],
[29700000, None],
# 6 meters
[50000000, Beacons],
[50100000, Phone],
[54000000, None],
# 4 meters
[70000000, Phone],
[70500000, None],
# 2 meters
[144000000, CW],
[144200000, Phone],
[144275000, Beacons],
[144300000, Satellite],
[144380000, Special],
[144400000, Satellite],
[144500000, RepInput],
[144900000, Other],
[145100000, Repeater],
[145500000, Other],
[145800000, Satellite],
[146010000, RepInput],
[146400000, Simplex],
[146510000, Special], # Simplex calling frequency
[146530000, Simplex],
[146610000, Repeater],
[147420000, Simplex],
[147600000, RepInput],
[148000000, None],
# 1.25 meters
[222000000, Phone],
[222250000, RepInput],
[223400000, Simplex],
[223520000, Data],
[223640000, Repeater],
[225000000, None],
#70 centimeters
[420000000, SSTV],
[432000000, Satellite],
[432070000, Phone],
[432300000, Beacons],
[432400000, Phone],
[433000000, Repeater],
[435000000, Satellite],
[438000000, Repeater],
[445900000, Simplex],
[445990000, Special], # Simplex calling frequency
[446010000, Simplex],
[446100000, Repeater],
[450000000, None],
# 33 centimeters
[902000000, Other],
[928000000, None],
# 23 centimeters
[1240000000, Other],
[1300000000, None],
# 13 centimeters
[2300000000, Other],
[2450000000, None],
# 9 centimeters
[3300000000, Other],
[3500000000, None],
# 5 centimeters
[5650000000, Other],
[5925000000, None],
# 3 centimeters
[10000000000, Other],
[10500000000, None],
]
## BandEdge Band Edge, dict
# For each band, this dictionary gives the lower and upper band edges. Frequencies
# outside these limits will not be remembered as the last frequency in the band.
BandEdge = {
'137k':( 136000, 138000), '500k':( 400000, 600000),
'160':( 1800000, 2000000), '80' :( 3500000, 4000000),
'60' :( 5300000, 5430000), '40' :( 7000000, 7300000),
'30' :(10100000, 10150000), '20' :(14000000, 14350000),
'17' :(18068000, 18168000), '15' :(21000000, 21450000),
'12' :(24890000, 24990000), '10' :(28000000, 29700000),
'6' :( 50000000, 54000000),
'4' :( 70000000, 70500000),
'2' :( 144000000, 148000000),
'1.25' :( 222000000, 225000000),
'70cm' :( 420000000, 450000000),
'33cm' :( 902000000, 928000000),
'23cm' :(1240000000, 1300000000),
'13cm' :(2300000000, 2450000000),
'9cm' :(3300000000, 3500000000),
'5cm' :(5650000000, 5925000000),
'3cm' :(10000000000,10500000000),
}
# For the Time band, this is the center frequency, tuning frequency and mode:
bandTime = [
( 2500000-10000, 10000, 'AM'),
( 3330000-10000, 10000, 'AM'),
( 5000000-10000, 10000, 'AM'),
( 7335000-10000, 10000, 'AM'),
(10000000-10000, 10000, 'AM'),
(14670000-10000, 10000, 'AM'),
(15000000-10000, 10000, 'AM'),
(20000000-10000, 10000, 'AM'),
]
## bandLabels Band Buttons, list
# This is the list of band buttons that Quisk displays, and it should have
# a length of 14 or less. Empty buttons can have a null string "" label.
# Note that the 60 meter band and the Time band have buttons that support
# multiple presses.
bandLabels = [
'Audio', '160', '80', ('60',) * 5, '40', '30', '20', '17',
'15', '12', '10', ('Time',) * len(bandTime)]
# This is a dictionary of shortcut keys for each band. If you do not want a shortcut, use ''. The shortcut
# character will be underlined in the label if present.
bandShortcuts = {'Audio':'', '160':'1', '80':'8', '60':'6', '40':'4', '30':'3', '20':'2', '17':'7',
'15':'5', '12':'1', '10':'0', 'Time':'e', '6':'6', '4':'4', '2':'2', '1.25':'5', '70cm':'7',
'33cm':'3', '23cm':'', '13cm':'', '9cm':'', '5cm':'', '3cm':''}
## bandTransverterOffset Transverter Offset, dict
# If you use a transverter, you need to tune your hardware to a frequency lower than
# the frequency displayed by Quisk. For example, if you have a 2 meter transverter,
# you may need to tune your hardware from 28 to 30 MHz to receive 144 to 146 MHz.
# Enter the transverter offset in Hertz in this dictionary. For this to work, your
# hardware must support it. Currently, the HiQSDR, SDR-IQ and SoftRock are supported.
bandTransverterOffset = {
# '2': 144000000 - 28000000
}
################ Obsolete
filter_display = 1 # Display the filter bandwidth on the graph screen; 0 or 1; thanks to WB4JFI
# For each band, this dictionary gives the initial center frequency, tuning
# frequency as an offset from the center frequency, and the mode. This is
# no longer too useful because the persistent_state feature saves and then
# overwrites these values anyway.
bandState = {'Audio':(0, 0, 'LSB'),
'160':( 1890000, -10000, 'LSB'), '80' :( 3660000, -10000, 'LSB'),
'60' :( 5370000, 1500, 'USB'), '40' :( 7180000, -5000, 'LSB'), '30':(10120000, -10000, 'CWL'),
'Time':( 5000000, 0, 'AM')}
for band in BandEdge:
f1, f2 = BandEdge[band]
if f1 > 13500000:
f = (f1 + f2) // 2
f = (f + 5000) // 10000
f *= 10000
bandState[band] = (f, 10000, 'USB')
# Select the method to test the state of the key; see is_key_down.c
key_method = "" # No keying, or internal method
# key_method = "/dev/parport0" # Use the named parallel port
# key_method = "/dev/ttyS0" # Use the named serial port
# key_method = "192.168.1.44" # Use UDP from this address
#
# Quisk can save its current state in a file on exit, and restore it when you restart.
# State includes band, frequency and mode, but not every item of state (not screen).
# The file is .quisk_init.pkl in the same directory as your config file. If this file
# becomes corrupted, just delete it and it will be reconstructed.
#persistent_state = False
persistent_state = True
# Select the default mode when Quisk starts (overruled by persistent_state):
# default_mode = 'FM'
default_mode = 'USB'
# If you use a soundcard with Ethernet control of the VFO, set these parameters:
rx_ip = "" # Receiver IP address for VFO control
# This determines what happens when you tune by dragging the mouse. The correct
# choice depends on how your hardware performs tuning. You may want to use a
# custom hardware file with a custom ChangeFrequency() method too.
mouse_tune_method = 0 # The Quisk tune frequency changes and the VFO frequency is unchanged.
#mouse_tune_method = 1 # The Quisk tune frequency is unchanged and the VFO changes.
# configurable mouse wheel thanks to DG7MGY
mouse_wheelmod = 50 # Round frequency when using mouse wheel (50 Hz)
|
141373
|
import threading
from data import config
import method
from script.x_developer import developer_tools
class interface(threading.Thread):
scriptLibrary = None;
developerTools = None;
SCA = [
"ONLINE",
"OFFLINE",
];
TERMINAL_COMMAND = [
"sca.exit",
"run.script",
"run.developer",
"sca.library",
"run",
];
TERMINAL_INTERFACE = [
"SCA: ",
" (NOT FOUND)",
" SCRIPT LOOP BREAK",
"STARTING LOOP | ",
": ONLINE",
"SCRIPT PAUSED, NEW START IN 30 SEC",
"TASK IS DONE",
"PRESS[e] TO EXIT SCA, PRESS[p] TO PAUSE THE SCRIPT: ",
"SCRIPT PAUSES, NEW START AFTER [{}] SECONDS",
];
TERMINAL_LOGIN = [
"USERNAME: ",
"PASSWORD: ",
"USER NOT DETECTED, IF YOU WANT RUN SCRIPT WITHOUT LOGIN, PRESS[y] AND ENTER: ",
"CLIENT NOT DETECTED, IF YOU WANT RUN SCRIPT WITHOUT LOGIN, PRESS[y] AND ENTER: ",
"CLIENT TYPE NOT DETECTED, IF YOU WANT RUN SCRIPT WITHOUT LOGIN, PRESS[y] AND ENTER: ",
];
CLIENT_PROCESS = [
"- | OSRS: NOT FOUND",
"+ | OSRS: DETECTED",
];
CLIENT_TYPE = [
"+ | RUNELITE: DETECTED",
"+ | OSBUDDY: DETECTED",
"+ | OSRS: DETECTED",
];
ENGINE_INTERFACE = [
"(DEV) ",
"(DEV)(P): ",
];
ENGINE_INPUT = [
"HOT MUCH TIME GET POSITION COORDINATES: ",
];
OBJECT_DETECTION = [
"WRITE TARGET: ",
"TO START SCRIPT WRITE 'start'",
"ADDED --> ",
"KILL COUNT --> ",
"+ | SYSTEM READY, STARTING SCRIPT@",
"AMES DETECTED[FAILSAFE]@",
]
MODULE = [
"INITIALIZING SCRIPT...",
"SCRIPT STARTING AT: ",
"SCRIPT START!",
];
SCRIPT = [
"CHOOSE SHOP(Varrock: 1)(Falador: 2)(PestControl: 3)(Yanille: 4): "
];
INTERFACE = [
"----------------------------------------------------------------------------------",
];
LOOP = 0;
STATUS = True;
COMMAND = None;
COMMAND2 = None;
COMMAND_ARRAY = None;
def __init__(self):
self.scriptLibrary = method.method();
self.developerTools = developer_tools.developer_tools();
threading.Thread.__init__(self);
def run(self):
while self.STATUS:
self.terminal();
def printLogo(self):
print(" ____ ____ ____ _____ _____ _ _ ____ _ ___ ____ _ __ _ ___ ");
print("/ ___| / ___| _ \| ____| ____| \ | | / ___| | |_ _/ ___| |/ / / \ |_ _| ");
print("\___ \| | | |_) | _| | _| | \| | | | | | | | | | ' / / _ \ | |");
print(" ___) | |___| _ <| |___| |___| |\ | | |___| |___ | | |___| . \ / ___ \ | | ");
print("|____/ \____|_| \_\_____|_____|_| \_|___\____|_____|___\____|_|\_\___/_/ \_\___|");
print(" |_____| |_____| \n");
print(" POWERED BY " + config.CLIENT_TECHNOLOGY + " | WRITTED BY " + config.CLIENT_AUTHOR + " | VERSION " + str(config.CLIENT_VERSION) + "\n\n");
def printClientProcess(self,value):
print("{}".format(self.CLIENT_PROCESS[value]));
def printClientType(self,value):
print("{}".format(self.CLIENT_TYPE[value]));
def printSuc(self):
print(self.TERMINAL_INTERFACE[0]+self.SCA[0]);
def clear(self):
self.LOOP = 0;
while self.LOOP <= 20:
print("\n");
self.LOOP += 1;
def terminal(self):
self.COMMAND = input(self.TERMINAL_INTERFACE[0]);
self.COMMAND_ARRAY = self.COMMAND.split(".");
if self.COMMAND == self.TERMINAL_COMMAND[0]:
quit();
elif self.COMMAND == self.TERMINAL_COMMAND[1]:
self.COMMAND = input(self.TERMINAL_INTERFACE[0]+"run.script --> ");
if self.COMMAND == self.scriptLibrary.getScript(self.COMMAND,None):
self.scriptLibrary.setScript(self.COMMAND,None);
self.STATUS = False;
else:
pass;
elif self.COMMAND == self.TERMINAL_COMMAND[4]:
self.COMMAND2 = int(input(self.TERMINAL_INTERFACE[0]+"run.script --> "));
if self.COMMAND2 == self.scriptLibrary.getScript(None,self.COMMAND2):
self.scriptLibrary.setScript(None,self.COMMAND2);
self.STATUS = False;
else:
pass;
elif self.COMMAND == self.TERMINAL_COMMAND[3]:
self.scriptLibrary.printNameScript();
elif self.COMMAND == self.TERMINAL_COMMAND[2]:
self.developerTools.getPosition();
elif len(self.COMMAND_ARRAY) != 1:
if self.COMMAND == self.TERMINAL_COMMAND[1]+"."+self.COMMAND_ARRAY[2]:
self.scriptLibrary.setScript(self.COMMAND_ARRAY[2],None);
self.STATUS = False;
else:
pass;
else:
pass;
|
141390
|
from dataclasses import dataclass
from typing import Any, Literal, Optional
from aiohttp import ClientSession
@dataclass
class Response:
status: int
body: Any
class Request:
def __init__(self, client_session: Optional[ClientSession] = None) -> None:
self.client_session = client_session
async def close(self):
if self.client_session:
await self.client_session.close()
async def request(
self,
method: Literal["GET", "POST"],
url: str,
return_method: Literal["json", "text", "read"],
**kwargs: Any
):
if not self.client_session:
self.client_session = ClientSession()
async with self.client_session.request(method, url, **kwargs) as response:
return Response(response.status, await getattr(response, return_method)())
async def get(
self, url: str, return_method: Literal["json", "text", "read"], **kwargs: Any
):
return await self.request("GET", url, return_method, **kwargs)
async def post(
self, url: str, return_method: Literal["json", "text", "read"], **kwargs: Any
):
return await self.request("POST", url, return_method, **kwargs)
|
141437
|
from pathlib import Path
import abc
import logging
import io
import importlib
import time
from _collections import OrderedDict
import traceback
import pandas as pd
import numpy as np
import shutil
from graphviz import Digraph
from ibllib.misc import version
import one.params
from one.alf.files import add_uuid_string
from iblutil.io.parquet import np2str
from ibllib.oneibl.registration import register_dataset
from ibllib.oneibl.patcher import FTPPatcher, SDSCPatcher, SDSC_ROOT_PATH, SDSC_PATCH_PATH
from one.util import filter_datasets
_logger = logging.getLogger('ibllib')
class Task(abc.ABC):
log = ""
cpu = 1
gpu = 0
io_charge = 5 # integer percentage
priority = 30 # integer percentage, 100 means highest priority
ram = 4 # RAM needed to run (Go)
one = None # one instance (optional)
level = 0
outputs = None
time_elapsed_secs = None
time_out_secs = None
version = version.ibllib()
log = ''
signature = {'input_files': (), 'output_files': ()} # tuple (filename, collection, required_flag)
def __init__(self, session_path, parents=None, taskid=None, one=None,
machine=None, clobber=True, aws=None, location='server'):
self.taskid = taskid
self.one = one
self.session_path = session_path
self.register_kwargs = {}
if parents:
self.parents = parents
else:
self.parents = []
self.machine = machine
self.clobber = clobber
self.location = location
self.aws = aws
@property
def name(self):
return self.__class__.__name__
def run(self, **kwargs):
"""
--- do not overload, see _run() below---
wraps the _run() method with
- error management
- logging to variable
"""
# if taskid of one properties are not available, local run only without alyx
use_alyx = self.one is not None and self.taskid is not None
if use_alyx:
tdict = self.one.alyx.rest('tasks', 'partial_update', id=self.taskid,
data={'status': 'Started'})
self.log = ('' if not tdict['log'] else tdict['log'] +
'\n\n=============================RERUN=============================\n')
# setup
self.setUp()
# Setup the console handler with a StringIO object
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
str_format = '%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
ch.setFormatter(logging.Formatter(str_format))
_logger.addHandler(ch)
_logger.info(f"Starting job {self.__class__}")
if self.machine:
_logger.info(f"Running on machine: {self.machine}")
_logger.info(f"running ibllib version {version.ibllib()}")
# run
start_time = time.time()
self.status = 0
try:
self.outputs = self._run(**kwargs)
_logger.info(f"Job {self.__class__} complete")
except BaseException:
_logger.error(traceback.format_exc())
_logger.info(f"Job {self.__class__} errored")
self.status = -1
self.time_elapsed_secs = time.time() - start_time
# log the outputs-+
if isinstance(self.outputs, list):
nout = len(self.outputs)
elif self.outputs is None:
nout = 0
else:
nout = 1
_logger.info(f"N outputs: {nout}")
_logger.info(f"--- {self.time_elapsed_secs} seconds run-time ---")
# after the run, capture the log output, amend to any existing logs if not overwrite
new_log = log_capture_string.getvalue()
self.log = new_log if self.clobber else self.log + new_log
log_capture_string.close()
_logger.removeHandler(ch)
# tear down
self.tearDown()
return self.status
def register_datasets(self, one=None, **kwargs):
"""
Register output datasets form the task to Alyx
:param one:
:param jobid:
:param kwargs: directly passed to the register_dataset function
:return:
"""
assert one
if self.location == 'server':
return self._register_datasets_server(one=one, **kwargs)
elif self.location == 'remote':
return self._register_datasets_remote(one=one, **kwargs)
elif self.location == 'SDSC':
return self._register_datasets_SDSC(one=one, **kwargs)
elif self.location == 'AWS':
return self._register_datasets_AWS(one=one, **kwargs)
def _register_datasets_server(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
return register_dataset(self.outputs, one=one, versions=versions, **kwargs)
def _register_datasets_remote(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
ftp_patcher = FTPPatcher(one=one)
return ftp_patcher.create_dataset(path=self.outputs, created_by=self.one.alyx.user,
versions=versions, **kwargs)
def _register_datasets_SDSC(self, one=None, **kwargs):
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
sdsc_patcher = SDSCPatcher(one=one)
return sdsc_patcher.patch_datasets(self.outputs, dry=False, versions=versions,
**kwargs)
def _register_datasets_AWS(self, one=None, **kwargs):
# GO through FTP patcher
if self.outputs:
if isinstance(self.outputs, list):
versions = [self.version for _ in self.outputs]
else:
versions = [self.version]
ftp_patcher = FTPPatcher(one=one)
return ftp_patcher.create_dataset(path=self.outputs, created_by=self.one.alyx.user,
versions=versions, **kwargs)
def rerun(self):
self.run(overwrite=True)
@abc.abstractmethod
def _run(self, overwrite=False):
"""
This is the method to implement
:param overwrite: (bool) if the output already exists,
:return: out_files: files to be registered. Could be a list of files (pathlib.Path),
a single file (pathlib.Path) an empty list [] or None.
Whithin the pipeline, there is a distinction between a job that returns an empty list
and a job that returns None. If the function returns None, the job will be labeled as
"empty" status in the database, otherwise, the job has an expected behaviour of not
returning any dataset.
"""
def setUp(self):
"""
Function to optionally overload to check inputs.
:return:
"""
# if on local server don't do anything
if self.location == 'server':
self._setUp_server()
elif self.location == 'remote':
self._setUp_remote()
elif self.location == 'SDSC':
self._setUp_SDSC()
elif self.location == 'AWS':
self._setUp_AWS()
def _setUp_server(self):
pass
def _setUp_remote(self):
assert self.one
df = self._getData()
self.one._download_datasets(df)
def _setUp_SDSC(self):
assert self.one
df = self._getData()
SDSC_TMP = Path(SDSC_PATCH_PATH.joinpath(self.__class__.__name__))
for _, d in df.iterrows():
file_path = Path(d['session_path']).joinpath(d['rel_path'])
file_uuid = add_uuid_string(file_path, np2str(np.r_[d.name[0], d.name[1]]))
file_link = SDSC_TMP.joinpath(file_path)
file_link.parent.mkdir(exist_ok=True, parents=True)
file_link.symlink_to(
Path(SDSC_ROOT_PATH.joinpath(file_uuid)))
self.session_path = SDSC_TMP.joinpath(d['session_path'])
def _setUp_AWS(self):
assert self.aws
assert self.one
df = self._getData()
self.aws._download_datasets(df)
def tearDown(self):
"""
Function to optionally overload to check results
"""
pass
def _getData(self):
"""
Funtcion to optionally overload to download/ create links to data
Important when running tasks in remote or SDSC locations
:return:
"""
assert self.one
session_datasets = self.one.list_datasets(self.one.path2eid(self.session_path), details=True)
df = pd.DataFrame(columns=self.one._cache.datasets.columns)
for file in self.signature['input_files']:
df = df.append(filter_datasets(session_datasets, filename=file[0], collection=file[1],
wildcards=True, assert_unique=False))
return df
def cleanUp(self):
"""
Function to optionally overload to clean up
:return:
"""
if self.location == 'SDSC':
self._cleanUp_SDSC()
def _cleanUp_SDSC(self):
# Double check we are dealing with the SDSC temp folder
assert SDSC_PATCH_PATH.parts[0:4] == self.session_path.parts[0:4]
shutil.rmtree(self.session_path)
def assert_expected_outputs(self):
"""
After a run, asserts that all signature files are present at least once in the output files
Mainly useful for integration tests
:return:
"""
assert self.status == 0
everthing_is_fine = True
for expected_file in self.signature['output_files']:
actual_files = list(self.session_path.rglob(str(Path(expected_file[1]).joinpath(expected_file[0]))))
if len(actual_files) == 0:
everthing_is_fine = False
_logger.error(f"Signature file expected {expected_file} not found in the output")
if not everthing_is_fine:
for out in self.outputs:
_logger.error(f"{out}")
raise FileNotFoundError("Missing outputs after task completion")
class Pipeline(abc.ABC):
"""
Pipeline class: collection of related and potentially interdependent tasks
"""
tasks = OrderedDict()
one = None
def __init__(self, session_path=None, one=None, eid=None):
assert session_path or eid
self.one = one
if one and one.alyx.cache_mode and one.alyx.default_expiry.seconds > 1:
_logger.warning('Alyx client REST cache active; this may cause issues with jobs')
self.eid = eid
if session_path:
self.session_path = session_path
if not self.eid:
# eID for newer sessions may not be in cache so use remote query
self.eid = one.path2eid(session_path, query_type='remote') if self.one else None
self.label = self.__module__ + '.' + type(self).__name__
def make_graph(self, out_dir=None, show=True):
if not out_dir:
out_dir = self.one.alyx.cache_dir if self.one else one.params.get().CACHE_DIR
m = Digraph('G', filename=str(Path(out_dir).joinpath(self.__module__ + '_graphs.gv')))
m.attr(rankdir='TD')
e = Digraph(name='cluster_' + self.label)
e.attr('node', shape='box')
e.node('root', label=self.label)
e.attr('node', shape='ellipse')
for k in self.tasks:
j = self.tasks[k]
if len(j.parents) == 0:
e.edge('root', j.name)
else:
[e.edge(p.name, j.name) for p in j.parents]
m.subgraph(e)
m.attr(label=r'\n\Pre-processing\n')
m.attr(fontsize='20')
if show:
m.view()
return m
def create_alyx_tasks(self, rerun__status__in=None):
"""
Instantiate the pipeline and create the tasks in Alyx, then create the jobs for the session
If the jobs already exist, they are left untouched. The re-run parameter will re-init the
job by emptying the log and set the status to Waiting
:param rerun__status__in: by default no re-run. To re-run tasks if they already exist,
specify a list of statuses string that will be re-run, those are the possible choices:
['Waiting', 'Started', 'Errored', 'Empty', 'Complete']
to always patch, the string '__all__' can also be provided
:return: list of alyx tasks dictionaries (existing and or created)
"""
rerun__status__in = rerun__status__in or []
if rerun__status__in == '__all__':
rerun__status__in = ['Waiting', 'Started', 'Errored', 'Empty', 'Complete']
assert self.eid
if self.one is None:
_logger.warning("No ONE instance found for Alyx connection, set the one property")
return
tasks_alyx_pre = self.one.alyx.rest('tasks', 'list',
session=self.eid, graph=self.name, no_cache=True)
tasks_alyx = []
# creates all the tasks by iterating through the ordered dict
for k, t in self.tasks.items():
# get the parents alyx ids to reference in the database
if len(t.parents):
pnames = [p.name for p in t.parents]
parents_ids = [ta['id'] for ta in tasks_alyx if ta['name'] in pnames]
else:
parents_ids = []
task_dict = {'executable': f"{t.__module__}.{t.name}", 'priority': t.priority,
'io_charge': t.io_charge, 'gpu': t.gpu, 'cpu': t.cpu,
'ram': t.ram, 'module': self.label, 'parents': parents_ids,
'level': t.level, 'time_out_sec': t.time_out_secs, 'session': self.eid,
'status': 'Waiting', 'log': None, 'name': t.name, 'graph': self.name}
# if the task already exists, patch it otherwise, create it
talyx = next(filter(lambda x: x["name"] == t.name, tasks_alyx_pre), [])
if len(talyx) == 0:
talyx = self.one.alyx.rest('tasks', 'create', data=task_dict)
elif rerun__status__in == '__all__' or talyx['status'] in rerun__status__in:
talyx = self.one.alyx.rest(
'tasks', 'partial_update', id=talyx['id'], data=task_dict)
tasks_alyx.append(talyx)
return tasks_alyx
def run(self, status__in=['Waiting'], machine=None, clobber=True, **kwargs):
"""
Get all the session related jobs from alyx and run them
:param status__in: lists of status strings to run in
['Waiting', 'Started', 'Errored', 'Empty', 'Complete']
:param machine: string identifying the machine the task is run on, optional
:param clobber: bool, if True any existing logs are overwritten, default is True
:param kwargs: arguments passed downstream to run_alyx_task
:return: jalyx: list of REST dictionaries of the job endpoints
:return: job_deck: list of REST dictionaries of the jobs endpoints
:return: all_datasets: list of REST dictionaries of the dataset endpoints
"""
assert self.session_path, "Pipeline object has to be declared with a session path to run"
if self.one is None:
_logger.warning("No ONE instance found for Alyx connection, set the one property")
return
task_deck = self.one.alyx.rest('tasks', 'list', session=self.eid, no_cache=True)
# [(t['name'], t['level']) for t in task_deck]
all_datasets = []
for i, j in enumerate(task_deck):
if j['status'] not in status__in:
continue
# here we update the status in-place to avoid another hit to the database
task_deck[i], dsets = run_alyx_task(tdict=j, session_path=self.session_path,
one=self.one, job_deck=task_deck,
machine=machine, clobber=clobber)
if dsets is not None:
all_datasets.extend(dsets)
return task_deck, all_datasets
def rerun_failed(self, **kwargs):
return self.run(status__in=['Waiting', 'Held', 'Started', 'Errored', 'Empty'], **kwargs)
def rerun(self, **kwargs):
return self.run(status__in=['Waiting', 'Held', 'Started', 'Errored', 'Empty', 'Complete'],
**kwargs)
@property
def name(self):
return self.__class__.__name__
def run_alyx_task(tdict=None, session_path=None, one=None, job_deck=None,
max_md5_size=None, machine=None, clobber=True, location='server'):
"""
Runs a single Alyx job and registers output datasets
:param tdict:
:param session_path:
:param one:
:param job_deck: optional list of job dictionaries belonging to the session. Needed
to check dependency status if the jdict has a parent field. If jdict has a parent and
job_deck is not entered, will query the database
:param max_md5_size: in bytes, if specified, will not compute the md5 checksum above a given
filesize to save time
:param machine: string identifying the machine the task is run on, optional
:param clobber: bool, if True any existing logs are overwritten, default is True
:param location: where you are running the task, 'server' - local lab server, 'remote' - any
compute node/ computer, 'SDSC' - flatiron compute node, 'AWS' - using data from aws s3
:return:
"""
registered_dsets = []
if len(tdict['parents']):
# here we need to check parents status, get the job_deck if not available
if not job_deck:
job_deck = one.alyx.rest('tasks', 'list', session=tdict['session'], no_cache=True)
# check the dependencies
parent_tasks = filter(lambda x: x['id'] in tdict['parents'], job_deck)
parent_statuses = [j['status'] for j in parent_tasks]
# if any of the parent tasks is not complete, throw a warning
if any(map(lambda s: s != 'Complete', parent_statuses)):
_logger.warning(f"{tdict['name']} has unmet dependencies")
# if parents are just waiting, don't do anything, but if they have a failed status
# set the current task status to Held
if any(map(lambda s: s in ['Errored', 'Held', 'Empty'], parent_statuses)):
tdict = one.alyx.rest('tasks', 'partial_update', id=tdict['id'],
data={'status': 'Held'})
return tdict, registered_dsets
# creates the job from the module name in the database
exec_name = tdict['executable']
strmodule, strclass = exec_name.rsplit('.', 1)
classe = getattr(importlib.import_module(strmodule), strclass)
task = classe(session_path, one=one, taskid=tdict['id'], machine=machine, clobber=clobber,
location=location)
# sets the status flag to started before running
one.alyx.rest('tasks', 'partial_update', id=tdict['id'], data={'status': 'Started'})
status = task.run()
patch_data = {'time_elapsed_secs': task.time_elapsed_secs, 'log': task.log,
'version': task.version}
# if there is no data to register, set status to Empty
if task.outputs is None:
patch_data['status'] = 'Empty'
# otherwise register data and set (provisional) status to Complete
else:
try:
registered_dsets = task.register_datasets(one=one, max_md5_size=max_md5_size)
except BaseException:
patch_data['status'] = 'Errored'
patch_data['status'] = 'Complete'
# overwrite status to errored
if status == -1:
patch_data['status'] = 'Errored'
# update task status on Alyx
t = one.alyx.rest('tasks', 'partial_update', id=tdict['id'], data=patch_data)
task.cleanUp()
return t, registered_dsets
|
141446
|
from odoo import fields, models
class Tag(models.Model):
_name = 'todo.task.tag'
_description = 'To-do Tag'
name = fields.Char('Name', translate=True)
# Many2many inverse relationship
task_ids = fields.Many2many(
'todo.task',
string='Tasks')
# Hierarchic relationships:
_parent_store = True
_parent_name = 'parent_id' # the default
parent_id = fields.Many2one(
'todo.task.tag',
'Parent Tag',
ondelete='restrict')
parent_left = fields.Integer('Parent Left', index=True)
parent_right = fields.Integer('Parent Right', index=True)
child_ids = fields.One2many(
'todo.task.tag',
'parent_id',
'Child Tags')
|
141471
|
from streamlink.plugins.ard_mediathek import ARDMediathek
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlARDMediathek(PluginCanHandleUrl):
__plugin__ = ARDMediathek
should_match = [
'http://mediathek.daserste.de/live',
'http://www.ardmediathek.de/tv/Sportschau/'
]
should_not_match = [
'https://daserste.de/live/index.html',
'https://www.daserste.de/live/index.html',
]
|
141504
|
import unittest
from orderbook.src.common.order import Order
from orderbook.src.common.priceTree import PriceTree
from orderbook.src.common.ptreeIterator import ComplexIterator
class TestPriceTree(unittest.TestCase):
def test_PriceTreeInsert(self):
num_elements = 5
price_tree = TestPriceTree.populate_tree(num_elements)
self.assertTrue(len(price_tree.price_map) == num_elements)
self.assertTrue(price_tree.max == num_elements-1)
self.assertTrue(price_tree.min == 0)
def test_PriceTreeIterator(self):
ptree = PriceTree('test')
ptree.insert_price_order(Order('B', 1, 5000, 7500))
ptree.insert_price_order(Order('B', 2, 5000, 7500))
# Same TreeNode Iteration
it = ComplexIterator(ptree.tree.values())
count = 0
while it.hasnext():
next(it)
count += 1
self.assertTrue(count == 2)
# Adding a separate TreeNode (different price)
ptree.insert_price_order(Order('B', 3, 6000, 7500))
ptree.insert_price_order(Order('B', 4, 6000, 7500))
it = ComplexIterator(ptree.tree.values())
count = 0
while it.hasnext():
next(it)
count += 1
self.assertTrue(count == 4)
def test_PriceTreeDelete(self):
ptree = PriceTree('test')
ptree.insert_price_order(Order('B', 1, 5000, 7500))
ptree.insert_price_order(Order('B', 2, 6000, 7500))
ptree.insert_price_order(Order('B', 3, 5000, 7500))
ptree.insert_price_order(Order('B', 4, 6000, 7500))
# Two different prices
self.assertTrue(ptree.price_map.__len__() == 2)
# Four different orders
self.assertTrue(ptree.order_map.__len__() == 4)
# Another two orders
ptree.remove_price(6000)
self.assertTrue(ptree.price_map.__len__() == 1)
self.assertTrue(ptree.order_map.__len__() == 2)
# Remove the rest
ptree.remove_price(5000)
self.assertTrue(ptree.price_map.__len__() == 0)
self.assertTrue(ptree.order_map.__len__() == 0)
@staticmethod
def populate_tree(num_elements):
price_tree = PriceTree('test')
for i in range(0, num_elements):
price_tree.insert_price(i)
return price_tree
|
141532
|
import functools
import statistics
# 3.8 新版功能.
# 将一个类方法转换为特征属性,一次性计算该特征属性的值,然后将其缓存为实例生命周期内的普通属性。 类似于 property() 但增加了缓存功能。
# class DataSet:
# def __init__(self, sequence_of_numbers):
# self._data = sequence_of_numbers
#
# @functools.cached_property
# def stdev(self):
# return statistics.stdev(self._data)
#
# @functools.cached_property
# def variance(self):
# return statistics.variance(self._data)
#
#
# ds = DataSet([1, 2, 3, 4, 5, 6])
# print(ds.stdev) #1.8708286933869707
from functools import cmp_to_key
class Solution:
# @param {integer[]} nums
# @return {string}
def largestNumber(self, nums):
key = cmp_to_key(lambda x, y: int(y) - int(x))
res = ', '.join(sorted(map(str, nums), key=key))
return res or '0'
nums = [-1, -2, 3, 4, 9, 2, 3, 4, 5]
s = Solution()
print(map(str, nums)) # 列表不去重
print(s.largestNumber(nums))
nums = {1, 2, 3, 4, 9, 2, 3, 4, 5}
print(map(str, nums)) # 字典会去重
print(s.largestNumber(nums))
# 一个为函数提供缓存功能的装饰器,缓存 maxsize 组传入参数,在下次以相同参数调用时直接返回上一次的结果。用以节约高开销或I/O函数的调用时间。
@functools.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
print([fib(n) for n in range(16)])
# [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
print(fib.cache_info())
# CacheInfo(hits=28, misses=16, maxsize=None, currsize=16)
# @functools.total_ordering
# 给定一个声明一个或多个全比较排序方法的类,这个类装饰器实现剩余的方法。这减轻了指定所有可能的全比较操作的工作。
# 此类必须包含以下方法之一:__lt__() 、__le__()、__gt__() 或 __ge__()。另外,此类必须支持 __eq__() 方法。
@functools.total_ordering
class Student:
def _is_valid_operand(self, other):
return (hasattr(other, "lastname") and
hasattr(other, "firstname"))
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return ((self.lastname.lower(), self.firstname.lower()) ==
(other.lastname.lower(), other.firstname.lower()))
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return ((self.lastname.lower(), self.firstname.lower()) <
(other.lastname.lower(), other.firstname.lower()))
# 返回一个新的 部分对象,当被调用时其行为类似于 func 附带位置参数 args 和关键字参数 keywords 被调用。
# 如果为调用提供了更多的参数,它们会被附加到 args。 如果提供了额外的关键字参数,它们会扩展并重载 keywords。
from functools import partial
basetwo = partial(int, base=2)
basetwo.__doc__ = 'Convert base 2 string to an int.'
print(basetwo('10010')) # 18
print(int('10010', base=2)) # 18
class Cell(object):
def __init__(self):
self._alive = False
@property
def alive(self):
return self._alive
def set_state(self, state):
self._alive = bool(state)
set_alive = functools.partialmethod(set_state, True)
set_dead = functools.partialmethod(set_state, False)
c = Cell()
print(c.alive) # False
c.set_alive()
print(c.alive) # True
c.set_dead()
print(c.alive) # False
# 将两个参数的 function 从左至右积累地应用到 iterable 的条目,以便将该可迭代对象缩减为单一的值。
print(functools.reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])) # 15 ((((1+2)+3)+4)+5) 的值
"""
@functools.singledispatch
将一个函数转换为 单分派 generic function。
要定义一个泛型函数,应使用 @singledispatch 装饰器进行装饰。 请注意分派是作用于第一个参数的类型,要相应地创建你的函数:
"""
from functools import singledispatch
@singledispatch
def fun(arg, verbose=False):
if verbose:
print("Let me just say,", end=" ")
print(arg)
@fun.register
def _(arg: int, verbose=False):
if verbose:
print("Strength in numbers, eh?", end=" ")
print(arg)
@fun.register
def _(arg: list, verbose=False):
if verbose:
print("Enumerate this:")
for i, elem in enumerate(arg):
print(i, elem)
@fun.register(complex)
def _(arg, verbose=False):
if verbose:
print("Better than complicated.", end=" ")
print(arg.real, arg.imag)
fun(132, True)
print(fun.registry.keys())
print(fun.registry[int])
# class functools.singledispatchmethod(func)
# 将一个方法转换为 单分派 generic function。
# 3.8 新版功能.
# class Negator:
# @functools.singledispatchmethod
# def neg(self, arg):
# raise NotImplementedError("Cannot negate a")
#
# @neg.register
# def _(self, arg: int):
# return -arg
#
# @neg.register
# def _(self, arg: bool):
# return not arg
#
# print(Negator().neg(1))
"""
functools.update_wrapper(wrapper, wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES)
更新一个 wrapper 函数以使其类似于 wrapped 函数。
可选参数为指明原函数的哪些属性要直接被赋值给 wrapper 函数的匹配属性的元组,并且这些 wrapper 函数的属性将使用原函数的对应属性来更新。
这些参数的默认值是模块级常量 WRAPPER_ASSIGNMENTS (它将被赋值给 wrapper 函数的 __module__, __name__, __qualname__, __annotations__ 和 __doc__ 即文档字符串) 以及 WRAPPER_UPDATES (它将更新 wrapper 函数的 __dict__ 即实例字典)。
"""
def wrap(func):
def call_it(*args, **kwargs):
"""wrap func: call_it"""
print('before call')
return func(*args, **kwargs)
return call_it
@wrap
def hello():
"""say hello"""
print("hello world")
from functools import update_wrapper
def wrap2(func):
def call_it(*args, **kwargs):
"""wrap func: call_it2"""
print('before call')
return func(*args, **kwargs)
return update_wrapper(call_it, func)
@wrap2
def hello2():
"""test hello"""
print('hello world2')
if __name__ == '__main__':
hello()
print(hello.__name__) # call_it 而不是输出hello
print(hello.__doc__) # wrap func: call_it 而不是 say hello
print()
hello2()
print(hello2.__name__) # hello2 这才是想要的
print(hello2.__doc__) # test hello 这才是想要的
"""
@functools.wraps(wrapped, assigned=WRAPPER_ASSIGNMENTS, updated=WRAPPER_UPDATES)
这是一个便捷函数,用于在定义包装器函数时发起调用 update_wrapper() 作为函数装饰器。
它等价于 partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated)。
"""
from functools import wraps
def my_decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
print('Calling decorated function')
return f(*args, **kwds)
return wrapper
@my_decorator
def example():
"""Docstring"""
print('Called example function')
example()
# Calling decorated function
# Called example function
print(example.__name__) # example
print(example.__doc__) # Docstring
"""
partial 对象¶
partial 对象是由 partial() 创建的可调用对象。 它们具有三个只读属性:
partial.func
一个可调用对象或函数。 对 partial 对象的调用将被转发给 func 并附带新的参数和关键字。
partial.args
最左边的位置参数将放置在提供给 partial 对象调用的位置参数之前。
partial.keywords
当调用 partial 对象时将要提供的关键字参数。
partial 对象与 function 对象的类似之处在于它们都是可调用、可弱引用的对象并可拥有属性。
但两者也存在一些重要的区别。 例如前者不会自动创建 __name__ 和 __doc__ 属性。 而且,在类中定义的 partial 对象的行为类似于静态方法,并且不会在实例属性查找期间转换为绑定方法。
"""
|
141542
|
import math
import os
import re
import numpy as np
from BluPrintTriboSys import TriboSys
from Constants import PltOpts, SubDir, TexTempl, UnitTex, Unit, PrintOpts, \
PreSol
from cartesian_plot_functions import plt_profile, plt_contact, plt_3d, \
plt_2d_scatt_line, \
plt_energy_ring_on_ring, plt_profile_approx
from generate_latex_output import get_calc_specific_latex_template
from hertz_equations import hertz_displ
from influ_matrix_management import load_influ_mat, cache_influ_mat
from solve_half_space import solve_half_space, pre_solve_half_space
from system_functions import print_it, to_preci, exit_program, save_to_matlab
class RingOnRing(TriboSys):
"""Global tribosystem data"""
def __init__(self, num_planets, sun, planet, global_force,
setup_name='ring-on-ring'):
super().__init__(setup_name, global_force, None)
self.num_planets = num_planets
self.sun = sun
self.planet = planet
self.sun.norm_forces = None
self.init_force = None
self.planet_slip = None
self.rot_velocity = None
self.rot_velocity2 = None
self.sun_rot_vel = None
self.planet_rot_vel = None
self.sliding_vel = None
self.sun.press = None
self.sun.max_press = None
self.sun.rot_vel = None
self.planet.rot_vel = None
self.sun.omega = None
self.planet.omega = None
self.sun.vel = None
self.planet.vel = None
self.slip = None
self.rel_vel = None
self.pv = None
self.influ_mat_db_1 = None
self.press_zone_len = None
def calc_load_distribution(self, ui=None, res_dir=None):
"""Calculate load distribution"""
print_it("calculating load distribution")
self.sun.norm_forces = np.multiply(np.ones(self.num_planets),
self.global_force / self.num_planets)
self.init_force = self.global_force
def get_grid_size(self, ui, res_dir):
"""Determine grid size by running (quick) simulation with simplified
contact bodies"""
print_it('determining grid size', PrintOpts.lvl1.value)
self.sun.simple_clone()
self.sun.clone.make_profile(PreSol.res_x.value, PreSol.res_y.value,
self.init_force)
self.planet.simple_clone()
self.planet.clone.make_slave_to(self.sun.clone)
init_displ = hertz_displ(self.sun.clone.e, self.planet.e,
self.sun.clone.ny, self.planet.ny,
self.sun.clone.r_hertz_x,
self.sun.clone.r_hertz_y,
self.planet.clone.r_hertz_x,
self.planet.clone.r_hertz_y,
self.sun.norm_forces[0])
too_many_els_in_y = 1
too_many_els_in_x = 1
contact_width_y = 0.05
contact_width_x = 0.05
while too_many_els_in_y != 0 or \
too_many_els_in_x != 0:
self.sun.clone.make_profile(self.sun.clone.res_x,
self.sun.clone.res_y, self.init_force,
contact_width=contact_width_y,
contact_length=contact_width_x)
self.planet.clone.make_slave_to(self.sun.clone)
pressure, init_displ = \
pre_solve_half_space(self.sun.clone.profile,
self.planet.clone.profile,
self.sun.clone.x_axis,
self.sun.clone.y_axis,
self.sun.clone.res_x, self.sun.clone.res_y,
self.sun.clone.delta_x,
self.sun.clone.delta_y, self.sun.clone.e,
self.planet.clone.e, self.sun.clone.ny,
self.planet.clone.ny,
self.sun.norm_forces[0],
init_displ=init_displ, print_prog=False)
pressure_els_y = sum(
pressure[math.floor(self.sun.clone.res_y / 2), :] > 0)
too_many_els_in_y = self.sun.clone.res_y - pressure_els_y - 2
if too_many_els_in_y:
contact_width_y += -np.sign(
too_many_els_in_y) * contact_width_y / 25
pressure_els_x = sum(
pressure[:, math.floor(self.sun.clone.res_x / 2)] > 0)
too_many_els_in_x = self.sun.clone.res_x - pressure_els_x - 2
if too_many_els_in_x:
contact_width_x += -np.sign(
too_many_els_in_x) * contact_width_x / 25
self.sun.make_profile(self.sun.res_x, self.sun.res_y, self.init_force,
contact_width=contact_width_y,
contact_length=contact_width_x)
self.planet.make_slave_to(self.sun)
return init_displ
def calc_contact_pressure(self, ui=None, res_dir=None):
"""Calculate contact pressure distribution between sun and planet
ring(s)"""
print_it('calculating 1 pressure distribution')
init_displ = self.get_grid_size(ui, res_dir)
[self.influ_mat_db_1] = load_influ_mat(ui, res_dir, 1)
print_it('solving first half space', PrintOpts.lvl1.value)
self.sun.press, self.influ_mat_db_1 = \
solve_half_space(self.sun.profile, self.planet.profile,
self.sun.x_axis, self.sun.y_axis, self.sun.res_x,
self.sun.res_y, self.sun.delta_x, self.sun.delta_y,
self.sun.e, self.planet.e,
self.sun.ny, self.planet.ny,
self.sun.norm_forces[0], res_dir,
init_displ=init_displ,
influ_mat_db=self.influ_mat_db_1)
cache_influ_mat(ui, [self.influ_mat_db_1], res_dir)
self.sun.max_press = np.amax(self.sun.press, axis=1)
dat_dict = dict(x_axis=self.sun.x_axis,
y_axis=self.sun.y_axis,
contact_pressure=self.sun.press)
save_to_matlab(dat_dict, res_dir, 'pressure_field')
def calc_kinematics(self, rot_vel1, rot_vel2, ui=None, res_dir=None):
"""Calculate tribosystem kinematics based on rotational velocities of
sun and planet(s)"""
print_it("calculating kinematics")
self.sun.rot_vel = rot_vel1
self.planet.rot_vel = rot_vel2
self.sun.omega = self.sun.rot_vel / 60
self.planet.omega = self.planet.rot_vel / 60
self.sun.vel = self.sun.diameter * math.pi * self.sun.omega
self.planet.vel = self.planet.diameter * math.pi * self.planet.omega
self.slip = (self.sun.vel - self.planet.vel) / self.sun.vel
self.rel_vel = np.ones(self.sun.res_x) * (
self.sun.vel - self.planet.vel)
self.sun.footpr_vel = \
2 * math.pi * self.sun.diameter / 2 * self.sun.omega
self.planet.footpr_vel = \
2 * math.pi * self.planet.diameter / 2 * self.planet.omega
try:
self.sun.overroll_t_incr = self.sun.delta_y / self.sun.footpr_vel
self.planet.overroll_t_incr = \
self.planet.delta_y / self.planet.footpr_vel
except ZeroDivisionError:
exit_program(
'rotational velocities of sun and planet must not be 0')
self.press_zone_len = (self.sun.press > 0).sum(1) * self.sun.delta_y
self.sun.overroll_t = np.divide(self.press_zone_len,
self.sun.footpr_vel)
self.planet.overroll_t = np.divide(self.press_zone_len,
self.planet.footpr_vel)
self.sun.no_overroll_t = np.divide(
(2 * math.pi * (self.sun.diameter / 2) - self.num_planets *
self.press_zone_len) / self.num_planets, self.sun.footpr_vel)
self.planet.no_overroll_t = np.divide(
(2 * math.pi * (self.planet.diameter / 2) - self.press_zone_len),
self.planet.footpr_vel)
def calc_pv(self, ui=None, res_dir=None):
"""Calculate product of local maximum pressure and local maximum
relative velocity"""
print_it("calculating pv_rel")
self.pv = np.multiply(abs(self.rel_vel), self.sun.max_press) / 1000
dat_dict = dict(x_axis=self.sun.x_axis,
pv_rel=self.pv)
save_to_matlab(dat_dict, res_dir, 'pv-rel')
def calc_e_akin(self, ui=None, res_dir=None):
""""Calculate the kinetic friction energy accumulation in W per m^2"""
print_it("calculating e_a,kin")
pv_local = np.multiply(self.sun.press.sum(1), self.rel_vel)
self.sun.e_akin = np.absolute(
np.divide(np.multiply(pv_local, self.sun.overroll_t_incr),
self.sun.no_overroll_t)) / 1000
self.planet.e_akin = np.absolute(
np.divide(np.multiply(pv_local, self.planet.overroll_t_incr),
self.planet.no_overroll_t)) / 1000
def plot_it(self, ui=None, res_dir=None):
"""Orchestrate output plot generation"""
print_it("plotting results")
plt_profile(self.sun, PltOpts.DD.value, res_dir, SubDir.profiles.value)
plt_profile(self.sun, PltOpts.DDD.value, res_dir, SubDir.profiles.value)
plt_profile(self.planet, PltOpts.DD.value, res_dir,
SubDir.profiles.value)
plt_profile(self.planet, PltOpts.DDD.value, res_dir,
SubDir.profiles.value)
plt_profile_approx(res_dir, SubDir.profiles.value)
plt_contact(self.sun, self.planet, PltOpts.DD.value, res_dir,
SubDir.contacts.value)
plt_contact(self.sun, self.planet, PltOpts.DDD.value, res_dir,
SubDir.contacts.value)
plt_3d(self.sun.x_axis, self.sun.y_axis, self.sun.press,
self.sun.x_label, self.sun.y_label, 'pressure in MPa',
'contact_pressure_sun', res_dir, SubDir.pressures.value,
'contact_pressure_sun')
plt_2d_scatt_line(self.sun.x_axis, self.pv, self.sun.x_axis, self.pv,
self.sun.x_label,
'pv_rel in {}'.format(Unit.pvrel.value), 'pv_rel',
res_dir, SubDir.energy.value, 'pv_rel')
plt_2d_scatt_line(self.sun.x_axis, self.sun.e_akin, self.sun.x_axis,
self.sun.e_akin, self.sun.x_label,
'e_akin in {}'.format(Unit.eakin.value), 'e_akin',
res_dir, SubDir.energy.value, 'sun.e_akin')
plt_2d_scatt_line(self.planet.x_axis, self.planet.e_akin,
self.planet.x_axis, self.planet.e_akin,
self.planet.x_label,
'e_akin in {}'.format(Unit.eakin.value), 'e_akin',
res_dir,
SubDir.energy.value, 'planet.e_akin')
plt_energy_ring_on_ring(self, res_dir, SubDir.energy.value,
'e-akin-vs-pv-rel')
def generate_latex_output(self, calc_spec_tex_file_handle, sim, ui=None,
res_dir=None):
"""Generate calculation-specific part of the LaTeX output file"""
average_pressure = np.mean(self.sun.press[self.sun.press > 0])
numeric_output_data = [
('pressure, max.', to_preci(np.amax(self.sun.press), 4),
UnitTex.pressure.value, 'unverified'),
('pressure, av.', to_preci(average_pressure, 4),
UnitTex.pressure.value, 'unverified'),
('e_a,kin sun, max.', to_preci(np.amax(self.sun.e_akin), 4),
UnitTex.eakin.value, 'unverified'),
('e_a,kin planet, max.', to_preci(np.amax(self.planet.e_akin), 4),
UnitTex.eakin.value, 'unverified'),
('pv_rel, max.', to_preci(np.amax(self.pv), 4),
UnitTex.pvrel.value, 'unverified'),
('contact area', to_preci(self.sun.get_area(self.sun.press), 4),
UnitTex.area.value, 'unverified')]
table_calc_summary = []
for key, value, unit, status in sorted(numeric_output_data):
table_calc_summary.append(
(re.sub('_', '\_', key), value, unit, status))
latex_variables = {'table_calc_summary': table_calc_summary,
'contact_plot1': '{}{}contact1.png'.format(
SubDir.tex_figs_rel_to_tex_file.value, '/'),
'pressure_plot1': '{}{}pressure1.png'.format(
SubDir.tex_figs_rel_to_tex_file.value, '/'),
'energy_plot1': '{}{}energy1.png'.format(
SubDir.tex_figs_rel_to_tex_file.value, '/')}
template_calc_specific = get_calc_specific_latex_template(
TexTempl.RingOnRing.value, sim)
with open(calc_spec_tex_file_handle, 'w') as f:
f.write(template_calc_specific.render(latex_variables))
def generate_latex_figures(self, ui=None, res_dir=None):
"""Generate calculation-specific figures for LaTeX report"""
plt_contact(self.sun, self.planet, PltOpts.DDD.value, res_dir,
SubDir.tex_figs.value, 'contact1')
plt_profile_approx(res_dir, SubDir.tex_figs.value)
plt_3d(self.sun.x_axis, self.sun.y_axis, self.sun.press,
self.sun.x_label, self.sun.y_label, 'pressure in MPa',
'contact_pressure_sun', res_dir, SubDir.tex_figs.value,
'pressure1')
plt_energy_ring_on_ring(self, res_dir, SubDir.tex_figs.value, 'energy1')
|
141567
|
import locale
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
import re
import dateparser
try:
from local_extractor.utils import common_utils
from local_extractor.utils.table_concatenation import concatenate_tables
except ImportError:
import sys, os, pathlib
path = pathlib.Path(__file__).absolute().parents[2]
path = os.path.join(path, 'local_extractor')
if path not in sys.path:
sys.path.insert(0, path)
from utils import common_utils
from utils.table_concatenation import concatenate_tables
class KeralaExtractor(object):
def __init__(self, date, report_fpath):
super().__init__()
self.date = date
self.report_fpath = report_fpath
self.list_of_districts = [
"Thiruvananthapuram",
"Kollam",
"Pathanamthitta",
"Alappuzha",
"Kottayam",
"Idukki",
"Ernakulam",
"Thrissur",
"Palakkad",
"Malappuram",
"Kozhikode",
"Wayanad",
"Kannur",
"Kasaragod"
]
def __extract_district_tables(self, datatable, keyidxmap=None, major_key=0, find_total=False):
if datatable is not None:
result = []
for district in self.list_of_districts:
keymap = {district: [district.lower()]}
new_result = {
'date' : self.date,
'district' : district,
}
for key in keyidxmap:
minor_key = keyidxmap[key]
try:
df_dict = common_utils.convert_df_to_dict(datatable, key_idx=major_key, val_idx=minor_key)
new_result[key] = common_utils.extract_info_from_table_by_keywords(df_dict, keymap).get(district, None)
new_result[key] = locale.atoi(new_result[key])
except: pass
result.append(new_result)
if find_total:
total_key='total'
keymap = {total_key: [total_key]}
datatable = datatable.iloc[-1:]
new_result = {
'date' : self.date,
'district' : total_key,
}
for key in keyidxmap:
minor_key = keyidxmap[key]
try:
df_dict = common_utils.convert_df_to_dict(datatable, key_idx=major_key, val_idx=minor_key)
new_result[key] = common_utils.extract_info_from_table_by_keywords(df_dict, keymap).get(total_key, None)
new_result[key] = locale.atoi(new_result[key])
except: pass
result.append(new_result)
return result
def __extract_generic_datatable(self, datatable, keymap, transpose=False):
if datatable is not None:
if transpose:
datatable = datatable.transpose()
df_dict = common_utils.convert_df_to_dict(datatable, key_idx=0, val_idx=1)
result = common_utils.extract_info_from_table_by_keywords(df_dict, keymap)
for key in result.keys():
result[key] = locale.atoi(result[key])
result['date'] = self.date
return result
def __extract_generic_datatables(self, datatables, key, keymap, transpose=False):
if datatables:
return self.__extract_generic_datatable(datatables[key], keymap, transpose)
def extract_cumulative_summary_t_minus_one(self, tables):
keywords = {'positive', 'case', 'recovered', 'quarantine', 'isolation', 'home', 'hospital', 'death'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'positive_cases': ['positive', 'cases'],
'recovered': ['recovered'],
'new_persons_in_surveillance': ['new', 'person', 'quarantine', 'isolation'],
'new_persons_in_home_ins_isolation': ['new', 'person', 'home', 'quarantine'],
'new_persons_in_hospital_isolation': ['new', 'person', 'hospital', 'isolation'],
# 'daily_deaths': ['deaths'],
'deaths_declared_as_per_appeal': ['deaths declared as per appeal'],
'pending_deaths': ['pending deaths']
}
result = self.__extract_generic_datatables(datatables, 0, keymap, transpose=True)
# TODO: Cheap fix. Need to modify
result['daily_deaths'] = None
tbl = datatables[0]
daily_deaths_header = tbl[5][0]
daily_deaths_val = tbl[5][1]
if 'deaths' in daily_deaths_header.lower():
result['daily_deaths'] = locale.atoi(daily_deaths_val)
return result
def extract_daily_summary(self, tables):
keywords = {'positive', 'case', 'recovered', 'quarantine', 'isolation', 'home', 'hospital', 'death'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'positive_cases': ['positive', 'cases'],
'recovered': ['recovered'],
'new_persons_in_surveillance': ['new', 'person', 'quarantine', 'isolation'],
'new_persons_in_home_ins_isolation': ['new', 'person', 'home', 'quarantine'],
'new_persons_in_hospital_isolation': ['new', 'person', 'hospital', 'isolation'],
# 'daily_deaths': ['deaths'],
'deaths_declared_as_per_appeal': ['deaths declared as per appeal'],
'pending_deaths': ['pending deaths']
}
result = self.__extract_generic_datatables(datatables, 1, keymap, transpose=True)
# TODO: Cheap fix. Need to modify
result['daily_deaths'] = None
tbl = datatables[1]
daily_deaths_header = tbl[5][0]
daily_deaths_val = tbl[5][1]
if 'deaths' in daily_deaths_header.lower():
result['daily_deaths'] = locale.atoi(daily_deaths_val)
return result
def extract_cumulative_summary(self, tables):
keywords = {'positive', 'case', 'recovered', 'quarantine', 'isolation', 'home', 'hospital', 'death'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'total_positive_cases': ['positive', 'cases'],
'active_cases': ['active', 'cases'],
'total_recovered': ['recovered'],
'total_persons_in_surveillance': ['persons', 'quarantine', 'isolation'],
'total_persons_in_home_ins_isolation': ['persons', 'home', 'institution', 'quarantine'],
'total_persons_in_hospital_isolation': ['persons', 'hospital'],
# 'total_deaths': ['deaths'],
'total_deaths_declared_as_per_appeal': ['deaths declared as per appeal'],
'total_pending_deaths': ['pending deaths']
}
result = self.__extract_generic_datatables(datatables, 2, keymap, transpose=True)
# TODO: Cheap fix. Need to modify
result['total_deaths'] = None
tbl = datatables[2]
daily_deaths_header = tbl[6][0]
daily_deaths_val = tbl[6][1]
if 'deaths' in daily_deaths_header.lower():
result['total_deaths'] = locale.atoi(daily_deaths_val)
return result
def extract_district_case_info(self, tables):
keywords = {'positive cases declared today', 'declared negative today'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keyidxmap = {
'declared_positive': 1,
'declared_negative': 2,
'positive_cases_admitted': 3,
'other_districts': 4
}
return self.__extract_district_tables(datatable, keyidxmap, major_key=0, find_total=True)
def extract_district_death_info(self, tables):
keywords = {'no of deaths reported daily', 'district'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keyidxmap = {
'deaths_reported': 1,
'death_through_appeal': 2,
'pending_deaths': 3,
'death_cases_approved': 4
}
return self.__extract_district_tables(datatable, keyidxmap, major_key=0, find_total=True)
def extract_contact_travel_cumulative(self, tables):
keywords = {'international', 'interstate', 'travel', 'contact', 'history'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'total_cases': ['total cases'],
'history_of_travel': ['history', 'international/interstate', 'travel'],
'history_of_contact': ['history', 'contact']
}
return self.__extract_generic_datatables(datatables, 0, keymap)
def extract_contact_travel_new(self, tables):
keywords = {'international', 'interstate', 'travel', 'contact', 'history'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'total_cases': ['total cases'],
'history_of_travel': ['history', 'international/interstate', 'travel'],
'history_of_contact': ['history', 'contact'],
'no_history': ['no', 'history', 'travel']
}
return self.__extract_generic_datatables(datatables, 1, keymap)
def extract_individual_death_info(self, tables):
def convert_header_text_to_colname(datadict, keymap):
datadict_new = {}
processed_colnames = []
for text, val in datadict.items():
for colname, keys in keymap:
if False in [key in text.lower() for key in keys] or colname in processed_colnames:
continue
datadict_new[colname] = val
processed_colnames.append(colname)
return datadict_new
keywords = {'district', 'age', 'date', 'death'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
if datatable is None:
return None
# convert dataframe into a dictionary
datalist = [list(row) for _, row in datatable.iterrows()]
datadict = {}
cols = datalist[0]
for rownum in range(1, len(datalist)):
for colnum, col in enumerate(cols):
if col not in datadict:
datadict[col] = []
datadict[col].append(datalist[rownum][colnum])
header_keymap = [
('district', ['district']),
('name', ['name']),
('place', ['place']),
('age', ['age']),
('gender', ['gender']),
('gender', ['sex']),
('death_date', ['date', 'death'])
]
datadict = convert_header_text_to_colname(datadict, header_keymap)
result = []
cols = list(datadict.keys())
n = len(datadict[cols[0]])
for i in range(n):
try:
row = {col: datadict[col][i] for col in cols}
row['date'] = self.date
if 'death_date' in row:
date = dateparser.parse(row['death_date'].strip(), ['%d-%m-%Y'])
row['death_date'] = f'{date.year}-{date.month:02d}-{date.day:02d}'
if 'age' in row:
row['age'] = locale.atoi(row['age'].strip())
except:
pass
else:
result.append(row)
return result
def extract_critical_patients(self, tables):
keywords = {'icus', 'ventilator', 'support', 'patient'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keymap = {
'patients_in_icu': ['icus'],
'patients_on_ventillation': ['ventilator', 'support']
}
return self.__extract_generic_datatable(datatable, keymap)
def extract_cumulative_tests(self, tables):
keywords = {'samples', 'sent', 'naat', 'antigen'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'samples_sent': ['samples sent'],
'routine_sentinel_samples_pcr': ['sentinel'],
'airport_surveillance': ['surveillance'],
'CB_NAAT': ['cb', 'naat'],
'True_NAT': ['true', 'nat'],
'POCT_PCR': ['poct', 'pcr'],
'RT_LAMP': ['lamp'],
'Antigen_Assay': ['assay']
}
return self.__extract_generic_datatables(datatables, 0, keymap, transpose=True)
def extract_new_tests(self, tables):
keywords = {'samples', 'sent', 'naat', 'antigen'}
datatables = common_utils.find_all_tables_by_keywords(tables, keywords)
keymap = {
'samples_sent': ['samples sent'],
'routine_sentinel_samples_pcr': ['sentinel'],
'airport_surveillance': ['surveillance'],
'CB_NAAT': ['cb', 'naat'],
'True_NAT': ['true', 'nat'],
'POCT_PCR': ['poct', 'pcr'],
'RT_LAMP': ['lamp'],
'Antigen_Assay': ['assay']
}
return self.__extract_generic_datatables(datatables, 1, keymap, transpose=True)
def extract_surveillance_info(self, tables):
keywords = {'quarantine', 'observation', 'isolation', 'district'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keyidxmap = {
'cumulative_under_observation': 1,
'cumulative_under_home_isolation': 2,
'cumulative_hospitalized': 3,
'new_hospitalized': 4
}
return self.__extract_district_tables(datatable, keyidxmap, major_key=0, find_total=True)
def extract_travel_surveillance(self, tables):
keywords = {'travel', 'mode'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keymap = {
'international_cumulative': ['international'],
'domestic_cumulative': ['domestic'],
'total': ['total']
}
return self.__extract_generic_datatable(datatable, keymap)
def extract_psychosocial_support(self, tables):
keywords = {'psychosocial', 'children', 'alone'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
keymap = {
'psychosocial_workers': ['psychosocial', 'workers', 'no'],
'calls_to_persons_in_surveillance': ['quarantine', 'isolation'],
'followup_calls': ['follow', 'up', 'calls'],
'post_covid_calls': ['post', 'covid', 'calls'],
'calls_special': ['migrant', 'alone', 'mental', 'illness', 'different', 'able'],
'calls_to_school_children': ['school', 'children'],
'calls_to_health_care_workers': ['health', 'care', 'workers'],
'calls_received_helpline': ['calls', 'received', 'helpline'],
'calls_total': ['all categories']
}
return self.__extract_generic_datatable(datatable, keymap)
def extract_district_abstract(self, tables):
keywords = {'wipr (> 10)'}
datatable = common_utils.find_table_by_keywords(tables, keywords)
if datatable is not None:
datatable = datatable.iloc[2:]
keyidxmap = {
'LSG': 2,
'Wards': 3
}
return self.__extract_district_tables(datatable, keyidxmap, major_key=1, find_total=True)
def extract(self):
# do not parse bulletins prior to June 1st, 2020
if self.date < "2020-06-01":
return dict()
all_tables_camelot = common_utils.get_tables_from_pdf(library='camelot', pdf_fpath=self.report_fpath, split_text=False)
all_tables_camelot_joined = concatenate_tables.concatenate_tables(all_tables_camelot, 'same-table-width')
result = {
'contact-travel-cumulative': self.extract_contact_travel_cumulative(all_tables_camelot),
'contact-travel-new': self.extract_contact_travel_new(all_tables_camelot),
'critical-patients': self.extract_critical_patients(all_tables_camelot),
'cumulative-summary-t-minus-one': self.extract_cumulative_summary_t_minus_one(all_tables_camelot),
'cumulative-summary': self.extract_cumulative_summary(all_tables_camelot),
'daily-summary': self.extract_daily_summary(all_tables_camelot),
'district-abstract': self.extract_district_abstract(all_tables_camelot_joined),
'district-case-info': self.extract_district_case_info(all_tables_camelot),
'district-death-info': self.extract_district_death_info(all_tables_camelot), # not available in all bulletins (29-Oct-2021)
'individual-death-info': self.extract_individual_death_info(all_tables_camelot_joined),
'psychosocial-support': self.extract_psychosocial_support(all_tables_camelot),
'surveillance-info': self.extract_surveillance_info(all_tables_camelot),
'testing-cumulative': self.extract_cumulative_tests(all_tables_camelot),
'testing-new': self.extract_new_tests(all_tables_camelot),
'travel-surveillance': self.extract_travel_surveillance(all_tables_camelot),
}
return result
if __name__ == '__main__':
date = '2021-10-29'
path = "/home/mayankag/covid19-india-data/localstore/bulletins/KL/KL-Bulletin-2021-12-20.pdf"
obj = KeralaExtractor(date, path)
from pprint import pprint
pprint(obj.extract())
|
141624
|
from typing import Dict
import numpy as np
from gym import spaces
from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper
class ObsDictWrapper(VecEnvWrapper):
"""
Wrapper for a VecEnv which overrides the observation space for Hindsight Experience Replay to support dict observations.
:param env: The vectorized environment to wrap.
"""
def __init__(self, venv: VecEnv):
super(ObsDictWrapper, self).__init__(venv, venv.observation_space, venv.action_space)
self.venv = venv
self.spaces = list(venv.observation_space.spaces.values())
# get dimensions of observation and goal
if isinstance(self.spaces[0], spaces.Discrete):
self.obs_dim = 1
self.goal_dim = 1
else:
self.obs_dim = venv.observation_space.spaces["observation"].shape[0]
self.goal_dim = venv.observation_space.spaces["achieved_goal"].shape[0]
# new observation space with concatenated observation and (desired) goal
# for the different types of spaces
if isinstance(self.spaces[0], spaces.Box):
low_values = np.concatenate(
[venv.observation_space.spaces["observation"].low, venv.observation_space.spaces["desired_goal"].low]
)
high_values = np.concatenate(
[venv.observation_space.spaces["observation"].high, venv.observation_space.spaces["desired_goal"].high]
)
self.observation_space = spaces.Box(low_values, high_values, dtype=np.float32)
elif isinstance(self.spaces[0], spaces.MultiBinary):
total_dim = self.obs_dim + self.goal_dim
self.observation_space = spaces.MultiBinary(total_dim)
elif isinstance(self.spaces[0], spaces.Discrete):
dimensions = [venv.observation_space.spaces["observation"].n, venv.observation_space.spaces["desired_goal"].n]
self.observation_space = spaces.MultiDiscrete(dimensions)
else:
raise NotImplementedError(f"{type(self.spaces[0])} space is not supported")
def reset(self):
return self.venv.reset()
def step_wait(self):
return self.venv.step_wait()
@staticmethod
def convert_dict(
observation_dict: Dict[str, np.ndarray], observation_key: str = "observation", goal_key: str = "desired_goal"
) -> np.ndarray:
"""
Concatenate observation and (desired) goal of observation dict.
:param observation_dict: Dictionary with observation.
:param observation_key: Key of observation in dicitonary.
:param goal_key: Key of (desired) goal in dicitonary.
:return: Concatenated observation.
"""
return np.concatenate([observation_dict[observation_key], observation_dict[goal_key]], axis=-1)
|
141649
|
import time
import sys
import board
import busio
import digitalio
from adafruit_mcp9600 import MCP9600
SENSOR_ADDR = 0X67
i2c = busio.I2C(board.SCL, board.SDA,frequency=200000)
try:
sensor = MCP9600(i2c,SENSOR_ADDR,"K")
except ValueError as e:
print(e)
print("Unable to connect to the thermocouple sensor.")
sys.exit(1)
oven = digitalio.DigitalInOut(board.D4)
oven.direction = digitalio.Direction.OUTPUT
def oven_control(enable=False):
#board.D4
oven.value = enable
check_temp = 100
print("This program will determine calibration settings ")
print("for your oven to use with the EZ Make Oven.\n\n")
for i in range(10):
print("Calibration will start in %d seconds..." % (10-i))
time.sleep(1)
print("Starting...")
print("Calibrating oven temperature to %d C" % check_temp)
finish = False
oven_control(True)
maxloop=300
counter = 0
while not finish:
time.sleep(1)
counter += 1
current_temp = sensor.temperature
print("%.02f C" % current_temp)
if current_temp >= check_temp:
finish = True
oven_control(False)
if counter >= maxloop:
raise Exception("Oven not working or bad sensor")
print("checking oven lag time and temperature")
finish = False
start_time = time.monotonic()
start_temp = sensor.temperature
last_temp = start_temp
while not finish:
time.sleep(1)
current_temp = sensor.temperature
print(current_temp)
if current_temp <= last_temp:
finish = True
last_temp = current_temp
lag_temp = last_temp - check_temp
lag_time = int(time.monotonic() - start_time)
print("** Calibration Results **")
print("Modify config.json with these values for your oven:")
print("calibrate_temp:", lag_temp)
print("calibrate_seconds:",lag_time)
|
141657
|
import time
print("OwO What's This!!!")
print("Here's my favorite activity in Japoneeese: プログラミング (yes google translate is good)")
furry = "Rawr x3 nuzzles how are you pounces on you you're so warm o3o notices you have a bulge o: someone's happy ;) nuzzles your necky wecky~ murr~ hehehe rubbies your bulgy wolgy you're so big :oooo rubbies more on your bulgy wolgy it doesn't stop growing -///- kisses you and lickies you"
furry = furry.split()
for f in furry:
time.sleep(0.1)
print(f)
|
141723
|
from helium._impl import TextImpl
from helium._impl.selenium_wrappers import WebDriverWrapper
from tests.api import BrowserAT
class TextImplTest(BrowserAT):
def get_page(self):
return 'test_text_impl.html'
def test_empty_search_text_xpath(self):
xpath = TextImpl(WebDriverWrapper(self.driver))._get_search_text_xpath()
text_elements = self.driver.find_elements_by_xpath(xpath)
texts = [w.get_attribute('innerHTML') for w in text_elements]
self.assertEqual(
["A paragraph", "A paragraph inside a div",
"Another paragraph inside the div"],
sorted(texts)
)
|
141758
|
from hijri_converter import helpers
def test_julian_to_ordinal():
assert helpers.jdn_to_ordinal(2447977) == 726552
def test_ordinal_to_julian():
assert helpers.ordinal_to_jdn(726552) == 2447977
def test_julian_to_reduced_julian():
assert helpers.jdn_to_rjd(2456087) == 56087
def test_reduced_julian_to_julian():
assert helpers.rjd_to_jdn(56087) == 2456087
|
141759
|
import ctypes
import pytest
import pyradamsa
import sys
import unittest
def test_lib_present():
assert len(pyradamsa.Radamsa.lib_path()) > 0, 'library not found'
def test_lib_symbols():
lib = ctypes.CDLL(pyradamsa.Radamsa.lib_path())
assert hasattr(lib, 'init')
assert hasattr(lib, 'radamsa')
assert hasattr(lib, 'radamsa_inplace')
def test_default_attrs():
assert pyradamsa.Radamsa().mut_offset == 4096
r = pyradamsa.Radamsa(17, 2048)
assert r.seed == 17
assert r.mut_offset == 2048
r = pyradamsa.Radamsa(mut_offset=19)
assert r.mut_offset == 19
assert r.seed == None
@pytest.fixture
def data():
return b'GET /auth?pass=<PASSWORD> HTTP1.1'
def test_seed_arg(data):
assert pyradamsa.Radamsa().fuzz(
data, seed=1337) == b'GET /auth?pass=<PASSWORD> HTTP\xc0\xb1.1'
def test_seed_wraparound(data):
r = pyradamsa.Radamsa()
assert r.fuzz(data, -1) == r.fuzz(data, sys.maxsize * 2 + 1)
def test_seed_static(data):
r = pyradamsa.Radamsa(1337)
assert r.fuzz(data) == r.fuzz(data)
def test_returned_len():
data = b"\xaa\x00"*100
assert len(pyradamsa.Radamsa(seed=1337).fuzz(data)) == 201
|
141762
|
class Solution(object):
def canVisitAllRooms(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: bool
"""
seen = [False] * len(rooms)
seen[0] = True
stack = [0, ]
while stack:
roomIdx = stack.pop()
for key in rooms[roomIdx]:
if not seen[key]:
seen[key] = True
stack.append(key)
return all(seen)
|
141773
|
import os
import argparse
import time
import gc
# spark imports
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import col, lower
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
class AlsRecommender:
"""
This a collaborative filtering recommender with Alternating Least Square
Matrix Factorization, which is implemented by Spark
"""
def __init__(self, spark_session, path_movies, path_ratings):
self.spark = spark_session
self.sc = spark_session.sparkContext
self.moviesDF = self._load_file(path_movies) \
.select(['movieId', 'title'])
self.ratingsDF = self._load_file(path_ratings) \
.select(['userId', 'movieId', 'rating'])
self.model = ALS(
userCol='userId',
itemCol='movieId',
ratingCol='rating',
coldStartStrategy="drop")
def _load_file(self, filepath):
"""
load csv file into memory as spark DF
"""
return self.spark.read.load(filepath, format='csv',
header=True, inferSchema=True)
def tune_model(self, maxIter, regParams, ranks, split_ratio=(6, 2, 2)):
"""
Hyperparameter tuning for ALS model
Parameters
----------
maxIter: int, max number of learning iterations
regParams: list of float, regularization parameter
ranks: list of float, number of latent factors
split_ratio: tuple, (train, validation, test)
"""
# split data
train, val, test = self.ratingsDF.randomSplit(split_ratio)
# holdout tuning
self.model = tune_ALS(self.model, train, val,
maxIter, regParams, ranks)
# test model
predictions = self.model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse",
labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print('The out-of-sample RMSE of the best tuned model is:', rmse)
# clean up
del train, val, test, predictions, evaluator
gc.collect()
def set_model_params(self, maxIter, regParam, rank):
"""
set model params for pyspark.ml.recommendation.ALS
Parameters
----------
maxIter: int, max number of learning iterations
regParams: float, regularization parameter
ranks: float, number of latent factors
"""
self.model = self.model \
.setMaxIter(maxIter) \
.setRank(rank) \
.setRegParam(regParam)
def _regex_matching(self, fav_movie):
"""
return the closest matches via SQL regex.
If no match found, return None
Parameters
----------
fav_movie: str, name of user input movie
Return
------
list of indices of the matching movies
"""
print('You have input movie:', fav_movie)
matchesDF = self.moviesDF \
.filter(
lower(
col('title')
).like('%{}%'.format(fav_movie.lower()))
) \
.select('movieId', 'title')
if not len(matchesDF.take(1)):
print('Oops! No match is found')
else:
movieIds = matchesDF.rdd.map(lambda r: r[0]).collect()
titles = matchesDF.rdd.map(lambda r: r[1]).collect()
print('Found possible matches in our database: '
'{0}\n'.format([x for x in titles]))
return movieIds
def _append_ratings(self, userId, movieIds):
"""
append a user's movie ratings to ratingsDF
Parameter
---------
userId: int, userId of a user
movieIds: int, movieIds of user's favorite movies
"""
# create new user rdd
user_rdd = self.sc.parallelize(
[(userId, movieId, 5.0) for movieId in movieIds])
# transform to user rows
user_rows = user_rdd.map(
lambda x: Row(
userId=int(x[0]),
movieId=int(x[1]),
rating=float(x[2])
)
)
# transform rows to spark DF
userDF = self.spark.createDataFrame(user_rows) \
.select(self.ratingsDF.columns)
# append to ratingsDF
self.ratingsDF = self.ratingsDF.union(userDF)
def _create_inference_data(self, userId, movieIds):
"""
create a user with all movies except ones were rated for inferencing
"""
# filter movies
other_movieIds = self.moviesDF \
.filter(~col('movieId').isin(movieIds)) \
.select(['movieId']) \
.rdd.map(lambda r: r[0]) \
.collect()
# create inference rdd
inferenceRDD = self.sc.parallelize(
[(userId, movieId) for movieId in other_movieIds]
).map(
lambda x: Row(
userId=int(x[0]),
movieId=int(x[1]),
)
)
# transform to inference DF
inferenceDF = self.spark.createDataFrame(inferenceRDD) \
.select(['userId', 'movieId'])
return inferenceDF
def _inference(self, model, fav_movie, n_recommendations):
"""
return top n movie recommendations based on user's input movie
Parameters
----------
model: spark ALS model
fav_movie: str, name of user input movie
n_recommendations: int, top n recommendations
Return
------
list of top n similar movie recommendations
"""
# create a userId
userId = self.ratingsDF.agg({"userId": "max"}).collect()[0][0] + 1
# get movieIds of favorite movies
movieIds = self._regex_matching(fav_movie)
# append new user with his/her ratings into data
self._append_ratings(userId, movieIds)
# matrix factorization
model = model.fit(self.ratingsDF)
# get data for inferencing
inferenceDF = self._create_inference_data(userId, movieIds)
# make inference
return model.transform(inferenceDF) \
.select(['movieId', 'prediction']) \
.orderBy('prediction', ascending=False) \
.rdd.map(lambda r: (r[0], r[1])) \
.take(n_recommendations)
def make_recommendations(self, fav_movie, n_recommendations):
"""
make top n movie recommendations
Parameters
----------
fav_movie: str, name of user input movie
n_recommendations: int, top n recommendations
"""
# make inference and get raw recommendations
print('Recommendation system start to make inference ...')
t0 = time.time()
raw_recommends = \
self._inference(self.model, fav_movie, n_recommendations)
movieIds = [r[0] for r in raw_recommends]
scores = [r[1] for r in raw_recommends]
print('It took my system {:.2f}s to make inference \n\
'.format(time.time() - t0))
# get movie titles
movie_titles = self.moviesDF \
.filter(col('movieId').isin(movieIds)) \
.select('title') \
.rdd.map(lambda r: r[0]) \
.collect()
# print recommendations
print('Recommendations for {}:'.format(fav_movie))
for i in range(len(movie_titles)):
print('{0}: {1}, with rating '
'of {2}'.format(i+1, movie_titles[i], scores[i]))
class Dataset:
"""
data object make loading raw files easier
"""
def __init__(self, spark_session, filepath):
"""
spark dataset constructor
"""
self.spark = spark_session
self.sc = spark_session.sparkContext
self.filepath = filepath
# build spark data object
self.RDD = self.load_file_as_RDD(self.filepath)
self.DF = self.load_file_as_DF(self.filepath)
def load_file_as_RDD(self, filepath):
ratings_RDD = self.sc.textFile(filepath)
header = ratings_RDD.take(1)[0]
return ratings_RDD \
.filter(lambda line: line != header) \
.map(lambda line: line.split(",")) \
.map(lambda tokens: (int(tokens[0]), int(tokens[1]), float(tokens[2]))) # noqa
def load_file_as_DF(self, filepath):
ratings_RDD = self.load_file_as_rdd(filepath)
ratingsRDD = ratings_RDD.map(lambda tokens: Row(
userId=int(tokens[0]), movieId=int(tokens[1]), rating=float(tokens[2]))) # noqa
return self.spark.createDataFrame(ratingsRDD)
def tune_ALS(model, train_data, validation_data, maxIter, regParams, ranks):
"""
grid search function to select the best model based on RMSE of
validation data
Parameters
----------
model: spark ML model, ALS
train_data: spark DF with columns ['userId', 'movieId', 'rating']
validation_data: spark DF with columns ['userId', 'movieId', 'rating']
maxIter: int, max number of learning iterations
regParams: list of float, one dimension of hyper-param tuning grid
ranks: list of float, one dimension of hyper-param tuning grid
Return
------
The best fitted ALS model with lowest RMSE score on validation data
"""
# initial
min_error = float('inf')
best_rank = -1
best_regularization = 0
best_model = None
for rank in ranks:
for reg in regParams:
# get ALS model
als = model.setMaxIter(maxIter).setRank(rank).setRegParam(reg)
# train ALS model
model = als.fit(train_data)
# evaluate the model by computing the RMSE on the validation data
predictions = model.transform(validation_data)
evaluator = RegressionEvaluator(metricName="rmse",
labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print('{} latent factors and regularization = {}: '
'validation RMSE is {}'.format(rank, reg, rmse))
if rmse < min_error:
min_error = rmse
best_rank = rank
best_regularization = reg
best_model = model
print('\nThe best model has {} latent factors and '
'regularization = {}'.format(best_rank, best_regularization))
return best_model
def parse_args():
parser = argparse.ArgumentParser(
prog="Movie Recommender",
description="Run ALS Movie Recommender")
parser.add_argument('--path', nargs='?', default='../data/MovieLens',
help='input data path')
parser.add_argument('--movies_filename', nargs='?', default='movies.csv',
help='provide movies filename')
parser.add_argument('--ratings_filename', nargs='?', default='ratings.csv',
help='provide ratings filename')
parser.add_argument('--movie_name', nargs='?', default='',
help='provide your favoriate movie name')
parser.add_argument('--top_n', type=int, default=10,
help='top n movie recommendations')
return parser.parse_args()
if __name__ == '__main__':
# get args
args = parse_args()
data_path = args.path
movies_filename = args.movies_filename
ratings_filename = args.ratings_filename
movie_name = args.movie_name
top_n = args.top_n
# initial spark
spark = SparkSession \
.builder \
.appName("movie recommender") \
.getOrCreate()
# initial recommender system
recommender = AlsRecommender(
spark,
os.path.join(data_path, movies_filename),
os.path.join(data_path, ratings_filename))
# set params
recommender.set_model_params(10, 0.05, 20)
# make recommendations
recommender.make_recommendations(movie_name, top_n)
# stop
spark.stop()
|
141777
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
ecalBarrelSimHitsValidation = DQMEDAnalyzer("EcalBarrelSimHitsValidation",
moduleLabelG4 = cms.string('g4SimHits'),
verbose = cms.untracked.bool(False),
ValidationCollection = cms.string('EcalValidInfo'),
EBHitsCollection = cms.string('EcalHitsEB')
)
|
141781
|
import fileinput
import math
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import re
from scipy import interpolate # strait up linear interpolation, nothing fancy
import scipy.signal as signal
yaw_interp = None
pitch_interp = None
roll_interp = None
north_interp = None
east_interp = None
down_interp = None
def load_horiz(filename):
global roll_interp
global pitch_interp
data = pd.read_csv(filename)
data.set_index('flight time (sec)', inplace=True, drop=False)
# time range / hz
tmin = data['flight time (sec)'].min()
tmax = data['flight time (sec)'].max()
span_sec = tmax - tmin
feat_count = len(data['flight time (sec)'])
print("number of video records:", feat_count)
hz = int(round((feat_count / span_sec)))
# smooth
cutoff_hz = 1
b, a = signal.butter(2, cutoff_hz, fs=hz)
data['ekf roll error (rad)'] = \
signal.filtfilt(b, a, data['ekf roll error (rad)'])
data['ekf pitch error (rad)'] = \
signal.filtfilt(b, a, data['ekf pitch error (rad)'])
if False:
plt.figure()
plt.plot(data['ekf roll error (rad)'], label="roll error")
plt.plot(data['ekf pitch error (rad)'], label="pitch error")
plt.xlabel("Flight time (sec)")
plt.ylabel("Rad")
plt.legend()
plt.show()
# interpolators
roll_interp = interpolate.interp1d(data['flight time (sec)'], data['ekf roll error (rad)'], bounds_error=False, fill_value=0.0)
pitch_interp = interpolate.interp1d(data['flight time (sec)'], data['ekf pitch error (rad)'], bounds_error=False, fill_value=0.0)
def load_old(filename):
global yaw_interp
global pitch_interp
global roll_interp
global north_interp
global east_interp
global down_interp
f = fileinput.input(filename)
table = []
for line in f:
tokens = re.split('[,\s]+', line.rstrip())
time = float(tokens[0])
yaw_error = float(tokens[1])
pitch_error = float(tokens[2])
roll_error = float(tokens[3])
n_error = float(tokens[4])
e_error = float(tokens[5])
d_error = float(tokens[6])
table.append( [ time,
yaw_error, pitch_error, roll_error,
n_error, e_error, d_error ] )
array = np.array(table)
x = array[:,0]
yaw_interp = interpolate.interp1d(x, array[:,1], bounds_error=False, fill_value=0.0)
pitch_interp = interpolate.interp1d(x, array[:,2], bounds_error=False, fill_value=0.0)
roll_interp = interpolate.interp1d(x, array[:,3], bounds_error=False, fill_value=0.0)
north_interp = interpolate.interp1d(x, array[:,4], bounds_error=False, fill_value=0.0)
east_interp = interpolate.interp1d(x, array[:,5], bounds_error=False, fill_value=0.0)
down_interp = interpolate.interp1d(x, array[:,6], bounds_error=False, fill_value=0.0)
|
141802
|
import os
import tempfile
import subprocess
import logging
import uuid
import time
import socket
import numpy as np
import cclib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import PeriodicTable
from rdkit.Chem.rdMolTransforms import GetBondLength
logging.getLogger("cclib").setLevel(30)
class GaussianRunner(object):
def __init__(self, smiles, cid, type_, max_conformers=1000,
min_conformers=100, nprocs=18, mem='40GB',
scratchdir='/tmp/scratch',
projectdir='/projects/rlmolecule/svss/home/Projects-python/crest-bde/',
crest='~/bin/crest/crest',
crest_timeout=86400,
gaussian_timeout=86400):
""" Class to handle the overall temporary directory management for
running Gaussian on Eagle """
self.smiles = smiles
self.cid = cid
self.type_ = type_
self.max_conformers = max_conformers
self.min_conformers = min_conformers
self.nprocs = nprocs
self.mem = mem
self.scratchdir = scratchdir
self.projectdir = projectdir
self.crest = crest
self.crest_timeout = crest_timeout
self.gaussian_timeout = gaussian_timeout
def process(self):
with tempfile.TemporaryDirectory(dir=self.scratchdir) as tmpdirname:
print("starting SMILES {0} on host {1}".format(self.smiles, socket.gethostname()))
mol, confId = self.optimize_molecule_mmff()
# running crest
self.run_crest(mol, confId,tmpdirname)
self.write_gaussian_input_file(tmpdirname)
# Run gaussian and time calculation
gauss_start_time = time.time()
self.run_gaussian(tmpdirname)
gauss_run_time = time.time() - gauss_start_time
print("python walltime for SMILES {0} on host {1}: {2}".format(
self.smiles, socket.gethostname(), gauss_run_time))
mol, enthalpy, freeenergy, scfenergy = self.parse_log_file()
log = self.cleanup()
molstr = Chem.MolToMolBlock(mol)
return molstr, enthalpy, freeenergy, scfenergy, log
def optimize_molecule_mmff(self):
""" Embed a molecule in 3D space, optimizing a number of conformers and
selecting the most stable
"""
mol = Chem.MolFromSmiles(self.smiles)
mol = Chem.rdmolops.AddHs(mol)
# If the molecule is a radical; add a hydrogen so MMFF converges to a
# reasonable structure
is_radical = False
radical_index = None
for i, atom in enumerate(mol.GetAtoms()):
if atom.GetNumRadicalElectrons() != 0:
is_radical = True
radical_index = i
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetNumRadicalElectrons(0)
# Use min < 3^n < max conformers, where n is the number of rotatable bonds
NumRotatableBonds = AllChem.CalcNumRotatableBonds(mol)
NumConformers = np.clip(3**NumRotatableBonds, self.min_conformers,
self.max_conformers)
conformers = AllChem.EmbedMultipleConfs(
mol, numConfs=int(NumConformers), pruneRmsThresh=0.2, randomSeed=1,
useExpTorsionAnglePrefs=True, useBasicKnowledge=True)
def optimize_conformer(conformer):
prop = AllChem.MMFFGetMoleculeProperties(mol, mmffVariant="MMFF94s")
ff = AllChem.MMFFGetMoleculeForceField(mol, prop, confId=conformer)
ff.Minimize()
return float(ff.CalcEnergy())
assert conformers, "Conformer embedding failed"
if len(conformers) == 1:
logging.critical(
'Only 1 conformer for SMILES {}'.format(self.smiles))
most_stable_conformer = conformers[0]
else:
conformer_energies = np.array(
[optimize_conformer(conformer) for conformer in conformers])
most_stable_conformer = conformer_energies.argmin()
# If hydrogen was added; remove it before returning the final mol
if is_radical:
radical_atom = mol.GetAtomWithIdx(radical_index)
radical_atom.SetNumExplicitHs(int(radical_atom.GetNumExplicitHs()) - 1)
radical_atom.SetNumRadicalElectrons(1)
return mol, int(most_stable_conformer)
def run_crest(self, mol, confId, tmpdirname):
""" Given an rdkit.Mol object with an optimized, minimum energy conformer
ID, converting to .xyz and running crest in the scratch folder """
self.run_hex = uuid.uuid4().hex[:6]
self.xyz = tmpdirname + '/{0}_{1}.xyz'.format(self.cid, self.run_hex)
self.crest_out = tmpdirname + '/{0}_{1}.crest.out'.format(self.cid, self.run_hex)
self.best_xyz = tmpdirname + '/{0}_{1}_best.xyz.'.format(self.cid, self.run_hex)
#writing to xyzfile
rdkit.Chem.rdmolfiles.MolToXYZFile(mol,self.xyz,confId=confId)
#running calc in temporary TemporaryDirectory : tmpdirname
env = os.environ.copy()
crest_cmd = "{0} {1} > {2}".format(self.crest,
self.xyz, self.crest_out)
subprocess.run(crest_cmd, shell=True, env=env,
timeout=self.crest_timeout)
#crest outputs common name files such as crest_best.xyz. We have to move it
#such that it can be accessed again to creat gjf file for gaussian
subprocess.run(['mv', 'crest_best.xyz', self.best_xyz])
def write_gaussian_input_file(self, tmpdirname):
""" Given an best conformer after crest,
write a gaussian input file using openbabel to the scratch folder """
self.gjf = tmpdirname + '/{0}_{1}.gjf'.format(self.cid, self.run_hex)
checkpoint_file = tmpdirname + '/{0}_{1}.chk'.format(self.cid, self.run_hex)
if self.type_ == 'fragment':
# Run stable=opt
header1 = [
'%chk={0}'.format(checkpoint_file),
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# stable=opt M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400)'
' nosymm guess=mix']
subprocess.run(
['obabel', '-ixyz',self.best_xyz, '-O', self.gjf, '-xk',
'\n'.join(header1)])
with open(self.gjf, 'r') as f:
inputs = f.read().split('\n')
inputs[5] = f'{self.cid}_{self.run_hex}'
chg_mul = inputs[7]
inputs += [
'--link1--',
'%chk={0}'.format(checkpoint_file),
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# opt freq M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400)'
' nosymm guess=read geom=check\n',
' {0}_{1}\n'.format(self.cid, self.run_hex),
chg_mul
]
else:
header1 = [
'%MEM={}'.format(self.mem),
'%nprocshared={}'.format(self.nprocs),
'# opt freq M062X/Def2TZVP scf=(xqc,maxconventionalcycles=400) nosymm']
subprocess.run(
['obabel', '-ixyz',self.best_xyz, '-O', self.gjf, '-xk',
'\n'.join(header1)])
with open(self.gjf, 'r') as f:
inputs = f.read().split('\n')
inputs[4] = f'{self.cid}_{self.run_hex}'
with open(self.gjf, 'wt') as f:
f.write('\n'.join(inputs))
# debug -- keep a copy before running gaussian
gjf_basename = os.path.basename(self.gjf)
newgjf = self.projectdir + 'gjf_errors/' + gjf_basename
subprocess.run(['cp', self.gjf, newgjf])
def run_gaussian(self, tmpdirname):
""" Run the given Guassian input file (with associated mol ID) """
self.log = tmpdirname + '/{0}_{1}.log'.format(self.cid, self.run_hex)
gaussian_cmd = "module load gaussian/G16C && g16 < {0} > {1}".format(
self.gjf, self.log)
with tempfile.TemporaryDirectory(dir=tmpdirname) as gausstmp:
env = os.environ.copy()
env['GAUSS_SCRDIR'] = gausstmp
subprocess.run(gaussian_cmd, shell=True, env=env,
timeout=self.gaussian_timeout)
def parse_log_file(self):
""" Parse the gaussian log file using cclib, return the optimized mol and
enthalpy. """
# Parse the output log with cclib, assert the optimization completed
data = cclib.io.ccread(self.log)
assert data.optdone, "Optimization not converged"
if hasattr(data, 'vibfreqs'): # single atoms don't have this property
assert min(data.vibfreqs) >= 0, "Imaginary Frequency"
# Create an RDKit Molecule from the SMILES string
mol = Chem.MolFromSmiles(self.smiles)
mol = AllChem.AddHs(mol)
AllChem.EmbedMolecule(mol)
conf = mol.GetConformer()
assert np.allclose(
np.array([a.GetAtomicNum() for a in mol.GetAtoms()]),
data.atomnos), "Stoichiometry check failed"
# Correct the 3D positions of the atoms using the optimized geometry
for i in range(conf.GetNumAtoms()):
conf.SetAtomPosition(i, data.atomcoords[-1][i])
pt = Chem.GetPeriodicTable()
covalent_radii = lambda x: PeriodicTable.GetRcovalent(pt, x)
# Check bond lengths
for bond in mol.GetBonds():
length = GetBondLength(
mol.GetConformer(), bond.GetBeginAtomIdx(), bond.GetEndAtomIdx())
max_length = (covalent_radii(bond.GetBeginAtom().GetSymbol()) +
covalent_radii(bond.GetEndAtom().GetSymbol()) + 0.4)
assert length <= max_length, "bond greater than maximum covalent length"
# Set property fields of the molecule for final SDF export
mol.SetProp("_Name", str(self.cid))
mol.SetProp('SMILES', self.smiles)
mol.SetDoubleProp('Enthalpy', data.enthalpy)
return mol, data.enthalpy, data.freeenergy, data.scfenergies[-1] / 27.2114
def cleanup(self):
""" Compress files and store in /projects """
log_basename = os.path.basename(self.log)
gjf_basename = os.path.basename(self.gjf)
newlog = self.projectdir + 'log/' + log_basename + '.gz'
newgjf = self.projectdir + 'gjf/' + gjf_basename + '.gz'
subprocess.run(['gzip', self.log, self.gjf])
subprocess.run(['mv', self.log + '.gz', newlog])
subprocess.run(['mv', self.gjf + '.gz', newgjf])
return newlog
|
141810
|
import doctest
import json
from django.core.management import call_command
from django.contrib.auth.models import User
from django.test import TestCase
from django_comments_xtd.models import XtdComment
from wagtail.core.models import Page
import responses
from .models import (
BlogPage,
BlogTag,
BlogPageTag,
BlogIndexPage,
BlogCategory,
BlogCategoryBlogPage,
)
from .management.commands.wordpress_to_wagtail import Command
from . import wp_xml_parser
from .wordpress_import import WordpressImport
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(wp_xml_parser))
return tests
from django.urls import reverse
from django.contrib.auth.models import Group
class BlogTests(TestCase):
def setUp(self):
home = Page.objects.get(slug="home")
self.user = User.objects.create_user("test", "<EMAIL>", "pass")
self.xml_path = "example_export.xml"
self.blog_index = home.add_child(
instance=BlogIndexPage(
title="Blog Index", slug="blog", search_description="x", owner=self.user
)
)
def test_index(self):
url = self.blog_index.url
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
blog_page = self.blog_index.add_child(
instance=BlogPage(
title="Blog Page",
slug="blog_page1",
search_description="x",
owner=self.user,
)
)
url = blog_page.url
res = self.client.get(url)
self.assertContains(res, "Blog Page")
def test_author(self):
# make super to access admin
self.user.is_superuser = True
self.user.save()
self.assertTrue(self.client.login(username="test", password="<PASSWORD>"))
# make an is_staff admin
staff_user = User.objects.create_user("mr.staff", "<EMAIL>", "pass")
staff_user.is_staff = True
staff_user.save()
# make some groups
bloggers = "Bloggers"
Group.objects.create(name=bloggers)
others = "Others"
Group.objects.create(name=others)
# make a non-admin Blogger author
author_user = User.objects.create_user("mr.author", "<EMAIL>", "<PASSWORD>")
author_user.groups.add(Group.objects.get(name=bloggers))
author_user.save()
# make a blog page
blog_page = self.blog_index.add_child(
instance=BlogPage(
title="Blog Page",
slug="blog_page1",
search_description="x",
owner=self.user,
)
)
with self.settings(
BLOG_LIMIT_AUTHOR_CHOICES_GROUP=None, BLOG_LIMIT_AUTHOR_CHOICES_ADMIN=False
):
response = self.client.get(
reverse("wagtailadmin_pages:edit", args=(blog_page.id,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "mr.staff")
self.assertNotContains(response, "mr.author")
with self.settings(
BLOG_LIMIT_AUTHOR_CHOICES_GROUP=bloggers,
BLOG_LIMIT_AUTHOR_CHOICES_ADMIN=False,
):
response = self.client.get(
reverse("wagtailadmin_pages:edit", args=(blog_page.id,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "mr.staff")
self.assertContains(response, "mr.author")
with self.settings(
BLOG_LIMIT_AUTHOR_CHOICES_GROUP=bloggers,
BLOG_LIMIT_AUTHOR_CHOICES_ADMIN=True,
):
response = self.client.get(
reverse("wagtailadmin_pages:edit", args=(blog_page.id,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "mr.staff")
self.assertContains(response, "mr.author")
with self.settings(
BLOG_LIMIT_AUTHOR_CHOICES_GROUP=[bloggers, others],
BLOG_LIMIT_AUTHOR_CHOICES_ADMIN=False,
):
response = self.client.get(
reverse("wagtailadmin_pages:edit", args=(blog_page.id,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "mr.staff")
self.assertContains(response, "mr.author")
with self.settings(
BLOG_LIMIT_AUTHOR_CHOICES_GROUP=[bloggers, others],
BLOG_LIMIT_AUTHOR_CHOICES_ADMIN=True,
):
response = self.client.get(
reverse("wagtailadmin_pages:edit", args=(blog_page.id,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "mr.staff")
self.assertContains(response, "mr.author")
def test_latest_entries_feed(self):
self.blog_index.add_child(
instance=BlogPage(
title="Blog Page",
slug="blog_page1",
search_description="x",
owner=self.user,
)
)
res = self.client.get(
"{0}{1}/rss/".format(self.blog_index.url, self.blog_index.slug)
)
self.assertContains(res, "Blog Page")
self.assertContains(res, "<rss")
self.assertContains(res, 'version="2.0"')
self.assertContains(res, "</rss>")
def test_latest_entries_feed_atom(self):
self.blog_index.add_child(
instance=BlogPage(
title="Blog Page",
slug="blog_page1",
search_description="x",
owner=self.user,
)
)
res = self.client.get(
"{0}{1}/atom/".format(self.blog_index.url, self.blog_index.slug)
)
self.assertContains(res, "Blog Page")
self.assertContains(res, "<feed")
self.assertContains(res, 'xmlns="http://' 'www.w3.org/2005/Atom"')
self.assertContains(res, "</feed>")
def test_import_url(self):
"""
Tests migrate_wordpress command -
the command should do the following:
1. create BlogPage objects from a given BlogIndex
2. create category and tag objects as BlogCategory,
BlogTag, BlogPageBlogCategory and BlogPageTag objects
The test imports from test-data.json which includes one wordpress blog
post with 11 tags and 2 categories
"""
command = Command()
command.username = None
command.password = <PASSWORD>
command.should_import_comments = True
command.url = "just_testing"
with open("test-data.json") as test_json:
posts = json.load(test_json)
command.create_blog_pages(posts, self.blog_index)
self.assertEquals(Page.objects.all().count(), 4)
self.assertEquals(BlogPage.objects.all().count(), 1)
page = BlogPage.objects.get()
self.assertEqual(page.title, "My wordpress title")
self.assertInHTML("<strong>Bold here</strong>", page.body)
self.assertEqual(page.categories.count(), 2)
self.assertEqual(page.tags.count(), 11)
self.assertEqual(page.owner.id, 2)
self.assertEqual(BlogCategory.objects.all().count(), 2)
self.assertEqual(BlogTag.objects.all().count(), 11)
self.assertEqual(BlogCategoryBlogPage.objects.all().count(), 2)
self.assertEqual(BlogPageTag.objects.all().count(), 11)
parent_category = BlogCategory.objects.get(slug="writing-wisdom")
child_category = BlogCategory.objects.get(slug="swoon-reads")
self.assertTrue(child_category.parent is not None)
self.assertEqual(child_category.parent, parent_category)
self.assertEqual(child_category.slug, "swoon-reads")
self.assertEqual(parent_category.slug, "writing-wisdom")
comments = XtdComment.objects.all()
self.assertEqual(comments.count(), 2)
parent_comment = XtdComment.objects.get(level=0)
child_comment = XtdComment.objects.get(level=1)
self.assertEqual(parent_comment.id, child_comment.parent_id)
def test_import_xml(self):
"""
Tests migrate_wordpress command -
the command should do the following:
1. create BlogPage objects from a given BlogIndex
2. create category and tag objects as BlogCategory,
BlogTag, BlogPageBlogCategory and BlogPageTag objects
The test imports from example_export.xml which includes a wordpress blog
"""
command = Command()
command.handle(xml=self.xml_path, blog_index="blog")
self.assertEquals(Page.objects.all().count(), 6)
self.assertEquals(BlogPage.objects.all().count(), 3)
page = BlogPage.objects.filter(
slug="10-things-super-successful-people-do-during-lunch"
).get()
self.assertEqual(
page.title, "10 Things Super Successful People Do During Lunch"
)
self.assertEqual(
page.body,
"<p>Before you spend another lunch scarfing down food at your desk with your eyes glued to your computer screen, here's some food for thought.</p>",
)
self.assertEqual(page.categories.count(), 2)
self.assertEqual(page.tags.count(), 1)
self.assertEqual(page.owner.id, 2)
self.assertEqual(BlogCategory.objects.all().count(), 2)
self.assertEqual(BlogTag.objects.all().count(), 1)
self.assertEqual(BlogCategoryBlogPage.objects.all().count(), 2)
self.assertEqual(BlogPageTag.objects.all().count(), 1)
parent_category = BlogCategory.objects.get(slug="marketing-2")
child_category = BlogCategory.objects.get(slug="cheat-sheets")
self.assertTrue(child_category.parent is not None)
self.assertEqual(child_category.parent, parent_category)
self.assertEqual(child_category.slug, "cheat-sheets")
self.assertEqual(parent_category.slug, "marketing-2")
# Assert that <p> tags were added to the post that didn't contain them
page = BlogPage.objects.filter(
slug="asa-releases-2013-economic-analysis-of-staffing-industry-trends"
).get()
self.assertEqual(
page.body,
'<p>The American Staffing Association has released its 2013 economic analysis,"Navigating the 1% Economy." Written by ASA chief operating officer <NAME>, CSP, the report takes an in-depth look at recent staffing employment trends and what these suggest about the current economic environment and future labor market conditions.</p>',
)
def test_import_xml_comments(self):
"""
Comment data in XML should be inserted and threaded correctly
"""
call_command(
"wordpress_to_wagtail", "blog", xml=self.xml_path, import_comments=True
)
comments = XtdComment.objects.all()
self.assertEqual(comments.count(), 2)
parent_comment = XtdComment.objects.get(level=0)
child_comment = XtdComment.objects.get(level=1)
self.assertEqual(parent_comment.id, child_comment.parent_id)
def test_unique_category_slug(self):
""" Ensure unique slugs are generated without erroring """
BlogCategory.objects.create(name="one")
BlogCategory.objects.create(name="one#")
BlogCategory.objects.create(name="one!")
class BlogAPIImportTests(TestCase):
@responses.activate
def test_import(self):
url = "https://public-api.wordpress.com/wp/v2/sites/davidmburke.com"
with open("test_v2_resp.json") as json_file:
data = json.load(json_file)
responses.add(
responses.GET,
url + "/posts?per_page=50&_embed=1",
json=data,
status=404,
headers={"X-WP-TotalPages": "1"},
)
home = Page.objects.get(slug="home")
self.user = User.objects.create_user("test", "<EMAIL>", "<PASSWORD>")
blog_index = home.add_child(
instance=BlogIndexPage(
title="Blog Index", slug="blog", search_description="x", owner=self.user
)
)
importer = WordpressImport(url, create_users=True)
importer.convert_images = True
importer.get_posts()
posts = BlogPage.objects.all()
self.assertEqual(len(posts), 1)
self.assertEqual(posts[0].blog_categories.all().count(), 2)
self.assertEqual(posts[0].tags.all().count(), 2)
|
141833
|
import os
import unittest
from uavcan.dsdl import common
class TestCRC16FromBytes(unittest.TestCase):
def test_str(self):
self.assertEqual(common.crc16_from_bytes('123456789'), 0x29B1)
def test_bytes(self):
self.assertEqual(common.crc16_from_bytes(b'123456789'), 0x29B1)
def test_bytearray(self):
self.assertEqual(
common.crc16_from_bytes(bytearray('123456789', 'utf-8')),
0x29B1)
class TestBytesFromCRC64(unittest.TestCase):
def test_zero(self):
self.assertEqual(common.bytes_from_crc64(0),
b"\x00\x00\x00\x00\x00\x00\x00\x00")
def test_check_val(self):
self.assertEqual(common.bytes_from_crc64(0x62EC59E3F1A4F00A),
b"\x0A\xF0\xA4\xF1\xE3\x59\xEC\x62")
if __name__ == '__main__':
unittest.main()
|
141843
|
import json
from functools import wraps
from django.contrib.auth import SESSION_KEY
from django.contrib.messages import api, constants
from django.db import models
from django.utils import timezone
from django.utils.functional import SimpleLazyObject
def _positional(count):
"""
Only allows ``count`` positional arguments to the decorated callable
Will be removed as soon as we drop support for Python 2.
"""
def _dec(fn):
@wraps(fn)
def _fn(*args, **kwargs):
if len(args) > count: # pragma: no cover
raise TypeError(
"Only %s positional argument%s allowed"
% (count, "" if count == 1 else "s")
)
return fn(*args, **kwargs)
return _fn
return _dec
@_positional(4)
def add_message(user, level, message, extra_tags="", deliver_once=True, meta=None):
from user_messages.models import Message
Message.objects.create(
level=level or 20, # INFO
message=message,
extra_tags=extra_tags,
_metadata=json.dumps(meta or {}),
deliver_once=deliver_once,
**{"user" if isinstance(user, models.Model) else "user_id": user}
)
@_positional(0)
def get_messages(request=None, user=None):
assert bool(request) != bool(user), "Pass exactly one of request or user"
_nonlocal = (user,)
def fetch():
messages = []
(user,) = _nonlocal
if request is not None:
messages.extend(api.get_messages(request))
if request.session.get(SESSION_KEY) and request.user.is_authenticated:
user = request.user
if user is not None:
from user_messages.models import Message
user_messages = Message.objects.filter(
user=user, delivered_at__isnull=True
).order_by("pk")
messages.extend(user_messages)
if any(m.deliver_once for m in user_messages):
user_messages.filter(deliver_once=True).update(
delivered_at=timezone.now()
)
return messages
return SimpleLazyObject(fetch)
def _create_shortcut(level):
@_positional(3)
def helper(user, message, extra_tags="", deliver_once=True, meta=None):
add_message(
user,
level,
message,
extra_tags=extra_tags,
deliver_once=deliver_once,
meta=meta,
)
return helper
debug = _create_shortcut(constants.DEBUG)
info = _create_shortcut(constants.INFO)
success = _create_shortcut(constants.SUCCESS)
warning = _create_shortcut(constants.WARNING)
error = _create_shortcut(constants.ERROR)
|
141913
|
from django import forms
class PaymentHiddenInputsPostForm(forms.Form):
def __init__(self, fields, *args, **kwargs):
super().__init__(*args, **kwargs)
for key in fields:
self.fields[key] = forms.CharField(
initial=fields[key], widget=forms.HiddenInput
)
|
141935
|
import FWCore.ParameterSet.Config as cms
from HLTriggerOffline.Higgs.hltHiggsValidator_cfi import *
HiggsValidationSequence = cms.Sequence(
hltHiggsValidator
)
#HLTHiggsVal_FastSim = cms.Sequence(
# recoHiggsValidationHLTFastSim_seq +
# hltHiggsValidator
# )
|
141940
|
from mltoolkit.mldp.steps.transformers import BaseTransformer
import numpy as np
from logging import getLogger
import os
logger_name = os.path.basename(__file__)
logger = getLogger(logger_name)
class RatingProp(BaseTransformer):
"""Computes the rating deviation property for reviews. And
that each batch contains data related to one group only!
"""
def __init__(self, rating_fname, new_fname, **kwargs):
super(RatingProp, self).__init__(**kwargs)
self.rating_fname = rating_fname
self.new_fname = new_fname
def _transform(self, data_chunk):
rating = data_chunk[self.rating_fname]
data_chunk[self.new_fname] = []
for indx in range(len(rating)):
_hyp = rating[indx]
_ref = [rating[i] for i in range(len(rating)) if i != indx]
rating_dev = _comp_rating_dev(_hyp, _ref)
data_chunk[self.new_fname].append(rating_dev)
return data_chunk
def _comp_rating_dev(hyp_rating, refs_rating):
res = hyp_rating - np.mean(refs_rating)
return res
|
141947
|
import random
import torch
from torch.autograd import Variable
class TensorPool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, tensors):
if self.pool_size == 0:
return tensors
return_tensors = []
for tensor in tensors.data:
tensor = torch.unsqueeze(tensor, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(tensor)
return_tensors.append(tensor)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = tensor
return_tensors.append(tmp)
else:
return_tensors.append(tensor)
return_images = Variable(torch.cat(return_tensors, 0))
return return_images
|
141956
|
from random import choice
from CybORG.Shared import Observation
from .Monitor import Monitor
from CybORG.Shared.Actions import Action
from CybORG.Shared.Actions.ConcreteActions.StopProcess import StopProcess
from CybORG.Simulator.Session import VelociraptorServer
from CybORG.Simulator.State import State
class Remove(Action):
def __init__(self, session: int, agent: str, hostname: str):
super().__init__()
self.agent = agent
self.session = session
self.hostname = hostname
def sim_execute(self, state: State) -> Observation:
# perform monitor at start of action
#monitor = Monitor(session=self.session, agent=self.agent)
#obs = monitor.sim_execute(state)
parent_session: VelociraptorServer = state.sessions[self.agent][self.session]
# find relevant session on the chosen host
sessions = [s for s in state.sessions[self.agent].values() if s.host == self.hostname]
if len(sessions) > 0:
session = choice(sessions)
obs = Observation(True)
# remove suspicious processes
if self.hostname in parent_session.sus_pids:
for sus_pid in parent_session.sus_pids[self.hostname]:
action = StopProcess(session=self.session, agent=self.agent, target_session=session.ident, pid=sus_pid)
action.sim_execute(state)
# remove suspicious files
return obs
else:
return Observation(False)
def __str__(self):
return f"{self.__class__.__name__} {self.hostname}"
|
142013
|
import json
__all__ = (
"QuillParseError",
"Quill",
)
class QuillParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "Failed to parse value(%s)" % self.value
class Quill:
def __init__(self, json_string):
try:
self.json_string = json_string
json_data = json.loads(json_string)
self.delta = json_data["delta"]
self.html = json_data.get("html", "")
except (json.JSONDecodeError, KeyError, TypeError):
raise QuillParseError(json_string)
|
142029
|
import pytest
from certbot_dns_loopia import LoopiaAuthenticator, DnsRecord, split_domain
from unittest.mock import MagicMock
# This config just sets all parameters to some value. It's just to make sure
# that the DNSAuthenticator constructor has all the parameters it might need
class PluginConfig:
verb = "certonly"
config_dir = "/tmp/cfg"
work_dir = "/tmp/work"
logs_dir = "tmp/log"
cert_path = "./cert.pem"
fullchain_path = "./chain.pem"
chain_path = "./chain.pem"
server = "https://acme-v02.api.letsencrypt.org/directory"
class LoopiaTestAuthenticator(LoopiaAuthenticator):
def __init__(self, client):
super().__init__(config=PluginConfig, name="dns-loopia")
self._test_client = client
def _get_loopia_client(self):
return self._test_client
def test_perform_cleanup_cycle():
domain = "*.runfalk.se" # Unused
validation_domain = "_acme-challenge.runfalk.se"
validation_key = "thisgoesinthetetxtrecord"
domain_parts = split_domain(validation_domain)
dns_record = DnsRecord("TXT", ttl=LoopiaAuthenticator.ttl,
data=validation_key)
loopia_mock = MagicMock()
auth = LoopiaTestAuthenticator(loopia_mock)
auth._perform(domain, validation_domain, validation_key)
loopia_mock.add_zone_record.assert_called_with(
dns_record,
domain_parts[0],
domain_parts[1]
)
record_id = 20200305
loopia_mock.get_zone_records.return_value = [
DnsRecord("TXT", id=record_id, ttl=auth.ttl, data=validation_key),
]
auth._cleanup(domain, validation_domain, validation_key)
loopia_mock.remove_zone_record.assert_called_with(
record_id,
domain_parts[0],
domain_parts[1],
)
loopia_mock.remove_subdomain.assert_called_with(
domain_parts[0],
domain_parts[1],
)
|
142074
|
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.contrib.gis.db.backends.mysql.creation import MySQLCreation
from django.contrib.gis.db.backends.mysql.introspection import MySQLIntrospection
from django.contrib.gis.db.backends.mysql.operations import MySQLOperations
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.creation = MySQLCreation(self)
self.ops = MySQLOperations(self)
self.introspection = MySQLIntrospection(self)
|
142089
|
import vcellapi
import sys
def main(argv):
if (len(argv)!=5):
print("usage: testapi host port clientID userid")
sys.exit(1)
host = argv[1] # "vcellapi.cam.uchc.edu"
port = argv[2] # 8080
clientID = argv[3] # "85133f8d-26f7-4247-8356-d175399fc2e6"
userid = argv[4] # schaff
password = raw_input("password ")
api = vcellapi.VCellApi(host,port,clientID)
api.authenticate(userid,password)
biomodels = api.getBiomodels(vcellapi.BiomodelsQuerySpec())
for model in biomodels:
print("model name = "+model.name);
for app in model.applications:
print(" app name = "+app.name)
for sim in model.simulations:
print(" sim name = "+sim.name)
bFirstSimulationToStartStop = True;
if (len(biomodels)>0):
# test /biomodel/[bmkey]
print(" ... re-fetching first biomodel ...");
biomodelsQuerySpec = vcellapi.BiomodelsQuerySpec()
biomodelsQuerySpec.owner = userid
firstBiomodel = api.getBiomodels(biomodelsQuerySpec)[0];
print("biomodel : "+firstBiomodel.bmKey+" : "+firstBiomodel.name);
for app in firstBiomodel.applications:
print(" app : "+app.name)
for sim in firstBiomodel.simulations:
print(" sim (returned with BioModel) : "+sim.key+" : "+sim.name);
# test /biomodel/[bmkey]/simulation/simkey
simulation = api.getSimulation(firstBiomodel.bmKey, sim.key);
print(" sim (retrieved separately) : "+simulation.key+" : "+simulation.name);
if (bFirstSimulationToStartStop):
bFirstSimulationToStartStop = False;
# test /biomodel/[bmkey]/simulation/[simkey]/startSimulation
simTasksQuerySpec = vcellapi.SimulationTasksQuerySpec();
simTasksQuerySpec.simId = sim.key;
beforeStartSimTasks = api.getSimulationTasks(simTasksQuerySpec);
print("SENDING START SIMULATION");
justAfterStartSimTasks = api.startSimulation(firstBiomodel.bmKey, sim.key);
print("SENT START SIMULATION");
print("WAITING 5 seconds");
sleep(5)
longAfterStartSimTasks = api.getSimulationTasks(simTasksQuerySpec);
print("SENDING STOP SIMULATION");
justAfterStopSimTasks = api.stopSimulation(firstBiomodel.bmKey, sim.key);
print("SENT STOP SIMULATION");
print("WAITING 5 seconds");
sleep(5)
longAfterStopSimTasks = api.getSimulationTasks(simTasksQuerySpec);
print("\n\nsimulation status:");
for simTask in beforeStartSimTasks:
print(" BEFORE START Job = "+simTask.jobIndex+", Task = "+simTask.taskId+", Status = "+simTask.status);
for simTask in justAfterStartSimTasks:
print(" JUST AFTER START Job = "+simTask.jobIndex+", Task = "+simTask.taskId+", Status = "+simTask.status);
for simTask in longAfterStartSimTasks:
print(" LONG AFTER START Job = "+simTask.jobIndex+", Task = "+simTask.taskId+", Status = "+simTask.status);
for simTask in justAfterStopSimTasks:
print(" JUST AFTER STOP Job = "+simTask.jobIndex+", Task = "+simTask.taskId+", Status = "+simTask.status);
for simTask in longAfterStopSimTasks:
print(" LONG AFTER STOP Job = "+simTask.jobIndex+", Task = "+simTask.taskId+", Status = "+simTask.status);
print("\n\n");
print("\n");
# test /simtask
simTasks = vcellApiClient.getSimulationTasks(vcellapi.SimulationTasksQuerySpec());
for simTask in simTasks:
print("simTask : "+simTask.simKey+" : "+simTask.simName);
print("done")
if __name__ == "__main__":
main(sys.argv)
|
142147
|
import sys
import random
import FormulaSolidityPort
import FormulaNativePython
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 80 # 78 digits for a maximum of 2^256-1, and 2 more digits for after the decimal point
def singleHopTestFixed(balance1, weight1, balance2, weight2, amount):
try:
return FormulaSolidityPort.calculateCrossConnectorReturn(balance1, weight1, balance2, weight2, amount)
except:
return -1
def doubleHopTestFixed(supply, balance1, weight1, balance2, weight2, amount):
try:
amount = FormulaSolidityPort.calculatePurchaseReturn(supply, balance1, weight1, amount)
return FormulaSolidityPort.calculateSaleReturn(supply + amount, balance2, weight2, amount)
except:
return -1
def doubleHopTestFloat(supply, balance1, weight1, balance2, weight2, amount):
try:
amount = FormulaNativePython.calculatePurchaseReturn(supply, balance1, weight1, amount)
return FormulaNativePython.calculateSaleReturn(supply + amount, balance2, weight2, amount)
except:
return -1
size = int(sys.argv[1]) if len(sys.argv) > 1 else 0
if size == 0:
size = int(input('How many test-cases would you like to execute? '))
minRatio = Decimal('+inf')
for n in range(size):
supply = random.randrange(2, 10 ** 26)
balance1 = random.randrange(1, 10 ** 23)
weight1 = random.randrange(1, 1000000)
balance2 = random.randrange(1, 10 ** 23)
weight2 = random.randrange(1, 1000000)
amount = random.randrange(1, supply)
singleHopFixed = singleHopTestFixed(balance1, weight1, balance2, weight2, amount)
doubleHopFixed = doubleHopTestFixed(supply, balance1, weight1, balance2, weight2, amount)
doubleHopFloat = doubleHopTestFloat(supply, balance1, weight1, balance2, weight2, amount)
if 0 <= doubleHopFixed <= singleHopFixed <= doubleHopFloat:
ratio = Decimal(singleHopFixed) / Decimal(doubleHopFloat)
minRatio = min(minRatio, ratio)
print('Test #{}: ratio = {:.24f}, minRatio = {:.24f}'.format(n, ratio, minRatio))
elif singleHopFixed < 0 and doubleHopFixed < 0:
ratio = Decimal(0)
print('Test #{}: ratio = {:.24f}, minRatio = {:.24f}'.format(n, ratio, minRatio))
else:
print('Implementation Error:')
print('supply = {}'.format(supply))
print('balance1 = {}'.format(balance1))
print('weight1 = {}'.format(weight1))
print('balance2 = {}'.format(balance2))
print('weight2 = {}'.format(weight2))
print('amount = {}'.format(amount))
print('singleHopFixed = {}'.format(singleHopFixed))
print('doubleHopFixed = {}'.format(doubleHopFixed))
print('doubleHopFloat = {}'.format(doubleHopFloat))
break
|
142156
|
import htfe as ht
import pickle as pic
import math
import random
############################## HTFE Init ##############################
cs = ht.ComputeSystem()
cs.create(ht._gpu)
prog = ht.ComputeProgram()
if not prog.loadFromFile("htfe.cl", cs):
print("Could not load program!")
h = ht.HTFE()
minNote = 21 # Inclusive
maxNote = 109 # Exclusive
numNotes = maxNote - minNote
numNotesRoot = int(math.ceil(math.sqrt(numNotes)))
inputSize = numNotesRoot * numNotesRoot
inputWidth = numNotesRoot
inputHeight = numNotesRoot
minInitWeight = -0.1
maxInitWeight = 0.1
layerDescs = []
l1 = ht.LayerDesc()
l1._width = 64
l1._height = 64
l2 = ht.LayerDesc()
l2._width = 44
l2._height = 44
l3 = ht.LayerDesc()
l3._width = 32
l3._height = 32
l4 = ht.LayerDesc()
l4._width = 22
l4._height = 22
layerDescs.append(l1)
layerDescs.append(l2)
layerDescs.append(l3)
layerDescs.append(l4)
h.createRandom(cs, prog, inputWidth, inputHeight, layerDescs, minInitWeight, maxInitWeight)
############################## Training ##############################
f = open("Piano-midi.de.pickle", "rb")
dataset = pic.load(f)
numSequencesUse = min(4, len(dataset["train"]))
trainIterations = 1
for i in range(0, trainIterations):
for seq in range(0, numSequencesUse):
for j in range(0, len(dataset["train"][seq])):
for k in range(0, numNotes):
h.setInput(k, 0.0)
for k in dataset["train"][seq][j]:
h.setInput(int(k) - minNote, 1.0)
h.activate(cs)
h.learn(cs)
h.stepEnd()
h.clearMemory(cs)
print("Training sequence " + str(seq + 1) + " out of " + str(numSequencesUse) + " completed.")
print("Training iteration " + str(i + 1) + " out of " + str(trainIterations) + " completed.")
############################## Testing Predictions ##############################
errorCount = 0.0
totalCount = 0.0
for seq in range(0, numSequencesUse):
prediction = []
for j in range(0, len(dataset["train"][seq])):
currentInput = []
for k in range(0, numNotes):
h.setInput(k, 0.0)
currentInput.append(0.0)
for k in dataset["train"][seq][j]:
h.setInput(int(k) - minNote, 1.0)
currentInput[int(k) - minNote] = 1.0
if j > 0:
# Compare prediction to input
for k in range(0, numNotes):
if (prediction[k] > 0.5) != (currentInput[k] > 0.5):
errorCount += 1
totalCount += 1
h.activate(cs)
h.stepEnd()
prediction = []
for k in range(0, numNotes):
prediction.append(h.getPrediction(k))
h.clearMemory(cs)
print("Test sequence " + str(seq + 1) + " out of " + str(numSequencesUse) + " tested.")
print("Error percent: " + str(errorCount / totalCount * 100) + "%")
|
142169
|
import unittest
from pathlib import Path
from cif.cif_file_io import CifContainer
from tests.test_utils import current_file_path
class CifFileCRCTestCase(unittest.TestCase):
def setUp(self) -> None:
current_file_path()
self.cif = CifContainer(Path('tests/examples/1979688.cif'))
def test_calc_crc(self):
self.assertEqual(20714, self.cif.calc_checksum(self.cif['_shelx_hkl_file']))
class CifFileCRClargerTestCase(unittest.TestCase):
def setUp(self) -> None:
current_file_path()
self.cif = CifContainer(Path('test-data/DK_Zucker2_0m.cif'))
def test_calc_crc(self):
self.assertEqual(26780, self.cif.calc_checksum(self.cif['_shelx_hkl_file']))
class CifFileTestCase(unittest.TestCase):
def setUp(self) -> None:
current_file_path()
self.cif = CifContainer(Path('tests/examples/1979688.cif'))
def test_calc_crc(self):
self.assertEqual(3583, self.cif.calc_checksum('hello world'))
def test_res_crc(self):
self.assertEqual(17612, self.cif.res_checksum_calcd)
def test_hkl_crc(self):
self.assertEqual(20714, self.cif.hkl_checksum_calcd)
def test_res_crc_without_res(self):
self.assertEqual(0, CifContainer(Path('test-data/1000006.cif')).res_checksum_calcd)
def test_get_unknown_value_from_key(self):
self.assertEqual('', self.cif['_chemical_melting_point'])
def test_get_known_value_from_key(self):
self.assertEqual('702.70', self.cif['_chemical_formula_weight'])
def test_get_spgr(self):
self.assertEqual('P 21 21 2', self.cif.space_group)
def test_symmops(self):
self.assertEqual(['x, y, z', '-x, -y, z', '-x+1/2, y+1/2, -z', 'x+1/2, -y+1/2, -z'], self.cif.symmops)
def test_symmops_from_spgr(self):
self.assertEqual(['x,y,z', '-x,-y,z', 'x+1/2,-y+1/2,-z', '-x+1/2,y+1/2,-z'], self.cif.symmops_from_spgr)
def test_centrosymm(self):
self.assertEqual(False, self.cif.is_centrosymm)
c = CifContainer(Path('test-data/DK_ML7-66-final.cif'))
self.assertEqual(True, c.is_centrosymm)
def test_ishydrogen(self):
self.assertEqual(True, self.cif.ishydrogen('H18a'))
self.assertEqual(True, self.cif.ishydrogen('H18A'))
self.assertEqual(False, self.cif.ishydrogen('C2'))
self.assertEqual(False, self.cif.ishydrogen('c2'))
def test_cell(self):
expected = [round(x, 8) for x in (19.678, 37.02290000000001, 4.772, 90.0, 90.0, 90.0, 3476.576780226401)]
actual = [round(y, 8) for y in self.cif.cell]
self.assertEqual(expected, actual)
def test_natoms(self):
self.assertEqual(94, self.cif.natoms())
self.assertEqual(52, self.cif.natoms(without_h=True))
def test_checksum_tests(self):
self.assertEqual(True, self.cif.test_hkl_checksum())
self.assertEqual(True, self.cif.test_res_checksum())
def test_checksum_test_without_checksum(self):
self.assertEqual(True, CifContainer('test-data/1000006.cif').test_res_checksum())
self.assertEqual(True, CifContainer('test-data/1000006.cif').test_hkl_checksum())
if __name__ == '__main__':
unittest.main()
|
142210
|
import math
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from memory_replay import Transition
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class DQN(nn.Module):
"""
Deep neural network with represents an agent.
"""
def __init__(self, num_actions):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(1, 5, kernel_size=2)
self.bn1 = nn.BatchNorm2d(5)
self.conv2 = nn.Conv2d(5, 10, kernel_size=3)
self.bn2 = nn.BatchNorm2d(10)
self.conv3 = nn.Conv2d(10, 10, kernel_size=3)
self.bn3 = nn.BatchNorm2d(10)
self.head = nn.Linear(200, num_actions)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.conv1(x)))
x = F.leaky_relu(self.bn2(self.conv2(x)))
x = F.leaky_relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
# class DQN(nn.Module):
# """
# Deep neural network with represents an agent.
# """
# def __init__(self, num_actions):
# super(DQN, self).__init__()
# self.conv1 = nn.Conv2d(1, 10, kernel_size=2)
# self.max_pool = nn.MaxPool2d((2,2))
# self.bn1 = nn.BatchNorm2d(10)
# self.conv2 = nn.Conv2d(10, 20, kernel_size=3)
# self.bn2 = nn.BatchNorm2d(20)
# self.linear = nn.Linear(80, 20)
# # self.bn3 = nn.BatchNorm1d(50)
# self.head = nn.Linear(20, num_actions)
# def forward(self, x):
# x = F.leaky_relu(self.max_pool(self.bn1(self.conv1(x))))
# x = F.leaky_relu(self.bn2(self.conv2(x)))
# x = F.leaky_relu(self.linear(x.view(x.size(0), -1)))
# return self.head(x)
def select_action(state, model, num_actions,
EPS_START, EPS_END, EPS_DECAY, steps_done):
"""
Selects whether the next action is choosen by our model or randomly
"""
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
if sample > eps_threshold:
return model(
Variable(state, volatile=True).type(FloatTensor)).data.max(1)[1].view(1, 1)
else:
return LongTensor([[random.randrange(num_actions)]])
def optimize_model(model, optimizer, memory, BATCH_SIZE, GAMMA, BETA):
global last_sync
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = ByteTensor(tuple(map(lambda s: s is not None,
batch.next_state)))
# We don't want to backprop through the expected action values and volatile
# will save us on temporarily changing the model parameters'
# requires_grad to False!
non_final_next_states = Variable(torch.cat([s for s in batch.next_state
if s is not None]),
volatile=True)
state_batch = Variable(torch.cat(batch.state))
action_batch = Variable(torch.cat(batch.action))
reward_batch = Variable(torch.cat(batch.reward))
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = model(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = Variable(torch.zeros(BATCH_SIZE).type(Tensor))
next_state_values[non_final_mask] = torch.log( torch.exp(
BETA * model(non_final_next_states)).sum(1)) / BETA
# Now, we don't want to mess up the loss with a volatile flag, so let's
# clear it. After this, we'll just end up with a Variable that has
# requires_grad=False
next_state_values.volatile = False
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.mse_loss(state_action_values, expected_state_action_values)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
|
142242
|
import pylab as plt
import matplotlib
import numpy as np
import pylab as plt
import matplotlib
from itertools import product
import numpy as np
import pandas as pd
import os
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib.ticker import ScalarFormatter, FuncFormatter
matplotlib.style.use('bmh')
markers = [("-", "o"), ("-", "p"), ("-", "D"), ("-", "^"), ("-", "s"),
("-", "8"), ("-", "o"), ("-", "o"), ("-", "o"), ("-", "o"),
("-", "o"), ("-", "o")]
colors = ['#741111', "#000000", '#3a49ba','#7634c9',
"#4C9950", "#CC29A3", '#ba3a3a', "#0f7265",
"#7A7841", "#00C5CD", "#6e26d9"]
bright_colors = ["#00C5CD"]
def myticks(x,pos):
if x == 0: return "$0$"
exponent = int(np.log10(x))
coeff = x/10**exponent
#return r"{:0.1f}e{:0d}".format(coeff,exponent)
return r"${:0.1f} \times 10^{{ {:2d} }}$".format(coeff,exponent)
def myticks_new(x,pos, exponent=1e5):
if x == 0: return "$0$"
exponent = int(np.log10(x))
coeff = x/10**exponent
return r"${:0s}$".format(coeff/exponent)
#return r"${:0.1f} \times 10^{{ {:2d} }}$".format(coeff,exponent)
class FixedOrderFormatter(ScalarFormatter):
"""Formats axis ticks using scientific notation with a constant order of
magnitude"""
def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
self._order_of_mag = order_of_mag
ScalarFormatter.__init__(self, useOffset=useOffset,
useMathText=useMathText)
def _set_orderOfMagnitude(self, range):
"""Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
self.orderOfMagnitude = self._order_of_mag
class PrettyPlot:
def __init__(self, title=None, ylabel=None, xlabel=None,
fontsize=14, line_width=2.5, markersize=12,
ratio=1.0,axFontSize=18,
figsize=(13, 10), legend_type="line",
yscale="log", subplots=(1,1),
shareRowLabel=True, axTickSize=14):
self.axTickSize = int(axTickSize * ratio)
self.fontsize = int(fontsize * ratio)
self.shareRowLabel = shareRowLabel
self.lim_set = False
self.ylim = None
self.legend_type = legend_type
self.yscale = yscale
self.line_width = int(line_width * ratio)
self.markersize = int(markersize * ratio)
self.axFontSize = int(axFontSize * ratio)
self.ratio = ratio
if self.yscale=="log":
plt.yscale("log")
#ax.set_yscale('logit')
self.labels = []
self.y_list = []
self.x_list = []
self.converged = []
fig = plt.figure(figsize=figsize)
if title is not None:
fig.suptitle(title, fontsize=self.axFontSize)
self.fig = fig
subplots = list(subplots)
self.nrows = subplots[0]
self.ncols = subplots[1]
self.pIndex = 1
self.axList = []
def subplot():
pass
def add_yxList(self, y_vals, x_vals, label, converged = False):
if isinstance(y_vals, list):
y_vals = np.array(y_vals)
if isinstance(x_vals, list):
x_vals = np.array(x_vals)
self.y_list += [y_vals]
self.x_list += [x_vals]
self.labels += [label]
self.converged += [converged]
def show(self):
plt.show()
def save(self, path, iformat="png"):
create_dirs(path)
fname = path + ".%s" % iformat
self.fig.savefig(fname, bbox_inches='tight')
print(("Figure saved in %s" % (fname)))
def plot_DataFrame(self, results):
n_points, n_labels = results.shape
x_vals = np.arange(n_points)
labels = results.columns
y_array = np.array(results)
y_list = []
x_list = []
for j in range(n_labels):
x_list += [x_vals]
y_list += [y_array[:, j]]
self.plot(y_list, x_list, labels)
def set_lim(self, ylim, xlim):
self.lim_set = True
self.ylim = ylim
self.ax.set_ylim(ylim)
self.ax.set_xlim(xlim)
def set_tickSize(self, labelsize=8):
[tick.label.set_fontsize(labelsize) for tick in self.ax.yaxis.get_major_ticks()]
[tick.label.set_fontsize(labelsize) for tick in self.ax.xaxis.get_major_ticks()]
def set_title(self, title):
self.fig.suptitle(title, fontsize=self.axFontSize, y=1.08)
def plot(self, y_list=None, x_list=None, labels=None, ax=None,
ylabel="", xlabel="", yscale=False):
fig = self.fig
if y_list == None and x_list == None:
y_list = self.y_list
x_list = self.x_list
if yscale == "log":
# Makse sure everything is non-negative
# for yi in y_list:
# assert np.all(yi >= 0)
# Set zeros to eps
for i in range(len(y_list)):
y_list[i] = np.maximum(y_list[i], np.finfo(float).eps)
# Set zeros to eps
for i in range(len(y_list)):
opt_ind = np.where(y_list[i] == np.finfo(float).eps)[0]
if opt_ind.size > 0:
opt_ind = opt_ind[0]
y_list[i] = y_list[i][:opt_ind+1]
x_list[i] = x_list[i][:opt_ind+1]
n_labels = len(y_list)
if ax is None:
ax = self.fig.add_subplot(self.nrows,
self.ncols, self.pIndex)
ax.set_facecolor('white')
ax.set_yscale("log", nonposy='clip')
if labels is None and self.labels is None:
labels = list(map(str, np.arange(n_labels)))
elif labels is None:
labels = self.labels
ref_points = []
for i in range(len(self.converged)):
if self.converged[i] is not None:
ref_points += [[self.converged[i]["X"],
self.converged[i]["Y"]]]
label_positions, label_indices = get_labelPositions(y_list,
x_list,
self.ylim,
labels=labels,
ref_points=np.array(ref_points))
ls_markers = markers
if not self.lim_set:
y_min, y_max = get_min_max(y_list)
x_min, x_max = get_min_max(x_list)
#y_min = max(y_min, 1e-8)
ax.set_ylim([y_min, y_max])
ax.set_xlim([x_min, x_max])
for i in range(n_labels):
color = colors[i]
ls, marker = ls_markers[i]
y_vals = y_list[i]
x_vals = x_list[i]
n_points = len(y_vals)
label = labels[i]
markerFreq = n_points / (int(np.log(n_points)) + 1)
## SCATTER PLOT OPTIMAL
# ind_opt = np.where(y_vals == np.finfo(float).eps)[0]
# if ind_opt.size > 0:
# x_opt = x_vals[np.where(y_vals == np.finfo(float).eps)[0][0]]
# y_opt = np.finfo(float).eps
if self.converged[i] is not None:
ax.scatter(self.converged[i]["X"],
self.converged[i]["Y"], s=300, marker="*", color=color, clip_on=False, zorder=100)
##
line, = ax.plot(x_vals, y_vals, markevery=int(markerFreq),
markersize=int(self.markersize), color=color,
lw=self.line_width, alpha=1.0,
label=label, ls=ls, marker=marker)
if self.legend_type == "line":
x_point, y_point = label_positions[i]
angle = get_label_angle(x_vals, y_vals, label_indices[i], ax, color='0.5', size=12)
box = dict(facecolor="white",
edgecolor=color, linestyle=ls,
#hatch=marker,
linewidth=int(2*self.ratio), boxstyle="round")
ax.text(x_point , y_point, label, va='center',ha='center',
rotation=angle,
color=color,
bbox=box,
fontsize=self.fontsize)
else:
plt.legend(loc="best")
if self.shareRowLabel and (((self.pIndex-1) % (self.ncols)) == 0):
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
if not self.shareRowLabel:
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
ax.set_xlabel(xlabel, fontsize=self.axFontSize)
ax.tick_params(labelsize=self.axTickSize)
ax.tick_params(axis='y', labelsize=int(self.axTickSize*1.5))
self.y_list = []
self.x_list = []
self.labels = []
self.converged = []
self.pIndex += 1
self.axList += [ax]
ax.minorticks_off()
vals = np.logspace(np.log10(y_min),np.log10(y_max), 5)
ax.set_yticks(vals)
ax.yaxis.set_major_formatter(FuncFormatter(myticks))
return fig, ax
def plot_old(self, y_list=None, x_list=None, labels=None, ax=None,
ylabel="", xlabel="", yscale=False):
if y_list == None and x_list == None:
y_list = self.y_list
x_list = self.x_list
n_labels = len(y_list)
if ax is None:
ax = self.fig.add_subplot(self.nrows,
self.ncols, self.pIndex)
ax.set_facecolor('white')
if yscale == "log":
#pFunc = ax.semilogy
pFunc = ax.plot
#plt.yscale('log')
else:
pFunc = ax.plot
ax.set_yscale("log", nonposy='clip')
if labels is None and self.labels is None:
labels = list(map(str, np.arange(n_labels)))
elif labels is None:
labels = self.labels
fig = self.fig
label_positions, label_indices = get_labelPositions(y_list,
x_list,
self.ylim,
scale=yscale)
ls_markers = markers
if not self.lim_set:
y_min, y_max = get_min_max(y_list)
x_min, x_max = get_min_max(x_list)
ax.set_ylim([y_min, y_max])
ax.set_xlim([x_min, x_max])
for i in range(n_labels):
color = colors[i]
ls, marker = ls_markers[i]
y_vals = y_list[i]
x_vals = x_list[i]
n_points = len(y_vals)
label = labels[i]
# if i > 0:
# percentage = get_overlapPercentage(i, y_list)
# if percentage > 0.6:
# ls = "--"
# color = bright_colors[0]
markerFreq = n_points / (int(np.log(n_points)) + 1)
#
#ax.spines['left']._adjust_location()
# if self.yscale == "log":
# line, = ax.semilogy(x_vals, y_vals, markevery=markerFreq,
# markersize=12, color=color, lw=lw, alpha=0.9,
# label=label, ls=ls, marker=marker)
# else:
line, = pFunc(x_vals, y_vals, markevery=markerFreq,
markersize=self.markersize, color=color,
lw=self.line_width, alpha=0.9,
label=label, ls=ls, marker=marker)
if self.legend_type == "line":
x_point, y_point = label_positions[i]
angle = get_label_angle(x_vals, y_vals, label_indices[i], ax, color='0.5', size=12)
box = dict(facecolor="white",
edgecolor=color, linestyle=ls,
#hatch=marker,
linewidth=int(2*self.ratio), boxstyle="round")
ax.text(x_point , y_point, label, va='center',ha='center',
rotation=angle,
color=color,
bbox=box,
fontsize=self.fontsize)
else:
plt.legend(loc="best")
if self.shareRowLabel and (((self.pIndex-1) % (self.ncols)) == 0):
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
if not self.shareRowLabel:
ax.set_ylabel(ylabel, fontsize=self.axFontSize)
fmt= matplotlib.ticker.ScalarFormatter(useOffset=True)
fmt.set_scientific(True)
#ax.yaxis.set_major_formatter(fmt)
ax.set_xlabel(xlabel, fontsize=self.axFontSize)
ax.tick_params(labelsize=self.axTickSize)
ax.tick_params(axis='y', labelsize=int(self.axTickSize*1.5))
self.y_list = []
self.x_list = []
self.labels = []
self.pIndex += 1
self.axList += [ax]
#ax.tick_params(axis='y', which='minor')
#ax.locator_params(axis='y', numticks=5)
# y_minor_ticks = ax.yaxis.get_minor_ticks()
# y_minor_ticks[0].label.set_visible(False)
# y_minor_ticks = ax.yaxis.get_major_ticks()
# y_minor_ticks[0].label.set_visible(False)
#
# y_formatter = ticker.ScalarFormatter(useOffset=True)
# ax.yaxis.set_major_formatter(y_formatter)
#
ax.minorticks_off()
vals = np.logspace(np.log10(y_min),np.log10(y_max), 5)
#vals = np.linspace(y_min,y_max, 5)
ax.set_yticks(vals)
ax.yaxis.set_major_formatter(FuncFormatter(myticks))
#ax.yaxis.set_major_formatter(FixedOrderFormatter(5))
# __vals = np.unique(10**(np.floor(np.log10(np.logspace(np.log10(y_min),
# np.log10(y_max), num=10)))))
# v = __vals[0]
# powers10 = [v]
# while v < __vals[-1]:
# v = v * 10
# powers10 += [v]
# powers10 += [__vals[-1]]
# powers10 = np.unique(powers10)
# if len(powers10) <= 3:
# ax.set_yticks(np.linspace(y_min, y_max, num=5))
# #ax.set_yticks([3.1*10**3], minor=False)
# ax.yaxis.set_major_formatter(ticker.FuncFormatter(myticks))
# else:
# ax.set_yticks(powers10)
# #ax.yaxis.set_major_locator(MaxNLocator(nbins=9))
# #ax.get_yaxis().get_major_formatter().labelOnlyBase = False
# #ax.get_xaxis().get_major_formatter().set_useOffset(False)
# #ax.get_xaxis().get_major_formatter().set_scientific(False)
return fig, ax
def plot_csv(results, fig, ax):
for rank, column in enumerate(results.columns):
color = colors[2*rank]
ls, marker = markers[rank]
n_points = results.shape[0]
freq = n_points / (int(np.log(n_points)) + 1)
ax.plot(results[column], markevery=freq,
markersize=8,
color=color, lw=self.line_width, label=column, ls=ls, marker=marker)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, prop={'size':10},
ncol=2, mode="expand", borderaxespad=0.,fancybox=True, shadow=True)
#plt.tight_layout(pad=7)
plt.tight_layout(pad=0)
return fig, ax
#########
# HELPERS
#########
def get_overlapPercentage(index, y_list):
n_points = y_list[0].size
for i in range(index + 1):
n_points = min(n_points, y_list[i].size)
y_vector = y_list[index][:n_points, np.newaxis]
prev_lines = np.zeros((n_points, index))
for i in range(index):
prev_lines[:, i] = y_list[i][:n_points]
prev_lines /= (np.linalg.norm(prev_lines, axis=0) + 1e-10)
y_norm = y_vector / np.linalg.norm(y_vector, axis=0)
diff = np.abs((prev_lines - y_norm)).min(axis=1)
n_overlap = np.sum(diff < 1e-6)
percentage = n_overlap / float(n_points)
return percentage
def create_dirs(fname):
if "/" not in fname:
return
if not os.path.exists(os.path.dirname(fname)):
try:
os.makedirs(os.path.dirname(fname))
except OSError:
pass
def normalize(xy_points, ref_points, y_min, y_max, x_min, x_max):
xy_points[:, 1] = np.log(np.maximum(1e-15, xy_points[:, 1])) /np.log(10)
ref_points[:, 1] = np.log(np.maximum(ref_points[:, 1], 1e-15)) /np.log(10)
y_min = np.log(y_min)/np.log(10)
y_max = np.log(y_max)/np.log(10)
xy_normed = xy_points - np.array([x_min, y_min])
xy_normed /= np.array([x_max - x_min, y_max - y_min])
ref_normed = ref_points - np.array([x_min, y_min])
ref_normed /= np.array([x_max - x_min, y_max - y_min])
return xy_normed, ref_normed
# LABEL POSITIONS
def get_labelPositions(y_list, x_list, ylim=None, labels=None, ref_points=None):
if ref_points is None:
ref_points = []
"""Get label positions greedily"""
n_labels = len(y_list)
# GET BORDER POINTS
x_min, x_max = get_min_max(x_list)
if ylim is not None:
y_min, y_max = ylim
else:
y_min, y_max = get_min_max(y_list)
xdiff = (x_max - x_min)
ydiff = (y_max - y_min)
# Border points
bp1 = np.array(list(product([x_min, x_max, xdiff * 0.5],
[y_min, y_max, ydiff * 0.5])))[:-1]
bp1 = np.array(list(product([x_max],
[y_max])))[:-1]
bp1 = np.array(list(product([8],
[0])))
addedPoints = []
for yPoint in np.linspace(y_min, y_max, 6):
addedPoints += [(x_min,yPoint)]
addedPoints += [(x_max,yPoint)]
# for xx, yy in zip(x_list, y_list):
# for x, y in zip(xx, yy):
# if (abs(x - x_min) / xdiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(x - x_max) / xdiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(y - y_max) / ydiff) < 0.0005:
# addedPoints += [(x, y)]
# elif (abs(y - y_min) / ydiff) < 0.0005:
# addedPoints += [(x, y)]
sPoints = [(xx[0], yy[0]) for xx, yy in zip(x_list, y_list)]
ePoints = [(xx[-1], yy[-1]) for xx, yy in zip(x_list, y_list)]
bp2 = np.array(addedPoints + sPoints + ePoints)
if len(ref_points) == 0:
border_points = np.vstack([bp1, bp2])
else:
border_points = np.vstack([bp1, bp2, ref_points])
n_border = border_points.shape[0]
# Initialize placeholders
ref_points = np.zeros((n_border + n_labels, 2))
label_positions = np.zeros((n_labels, 2))
label_indices = np.zeros(n_labels, int)
ref_points[:n_border] = border_points
for i in range(n_labels):
# GET POSITIONS
if ylim is not None:
ind = (y_list[i]<y_max+1e-4) & (y_list[i]>y_min-1e-4)
n_points = x_list[i][ind].size
xy_points = np.zeros((n_points, 2))
xy_points[:, 0] = x_list[i][ind]
xy_points[:, 1] = y_list[i][ind]
else:
n_points = x_list[i].size
xy_points = np.zeros((n_points, 2))
xy_points[:, 0] = x_list[i]
xy_points[:, 1] = y_list[i]
# NORMALIZE
xy_normed, ref_normed = normalize(xy_points.copy(),
ref_points[:n_border+i].copy(), y_min, y_max, x_min, x_max)
# GET REF POINTS
dist = pairwise_distances(xy_normed, ref_normed,
metric="l1")
# elif scale == "log":
# xy_copy = xy_normed.copy()
# ref_copy = ref_normed.copy()
# xy_copy[:, 1] = 10**(xy_normed[:, 1])
# ref_copy[:, 1] = 10**(ref_normed[:, 1])
# dist = pairwise_distances(xy_copy, ref_copy,
# metric="l1")
# GET MINIMUM DISTANCES
min_dist = dist.min(axis=1)
# GET MAXIMUM MINIMUM DISTANCE
label_index = np.argmax(min_dist)
label_pos = xy_points[label_index]
ref_points[n_border + i] = label_pos
label_positions[i] = label_pos
label_indices[i] = label_index
return label_positions, label_indices
def get_min_max(v_list):
vector = v_list[0]
v_min = np.min(vector)
v_max = np.max(vector)
for i in range(1, len(v_list)):
vector = v_list[i]
v_min = min(np.min(vector), v_min)
v_max = max(np.max(vector), v_max)
return v_min, v_max
def get_label_angle(xdata, ydata, index, ax, color='0.5', size=12, window=3):
n_points = xdata.size
x1 = xdata[index]
y1 = ydata[index]
#ax = line.get_axes()
sp1 = ax.transData.transform_point((x1, y1))
slope_degrees = 0.
count= 0.
for i in range(index+1, min(index+window, n_points)):
y2 = ydata[i]
x2 = xdata[i]
sp2 = ax.transData.transform_point((x2, y2))
rise = (sp2[1] - sp1[1])
run = (sp2[0] - sp1[0])
slope_degrees += np.degrees(np.arctan2(rise, run))
count += 1.
for i in range(index-1, max(index-window, 0), -1):
y2 = ydata[i]
x2 = xdata[i]
sp2 = ax.transData.transform_point((x2, y2))
rise = - (sp2[1] - sp1[1])
run = -(sp2[0] - sp1[0])
slope_degrees += np.degrees(np.arctan2(rise, run))
count += 1.
slope_degrees /= count
return slope_degrees
def box_color(edgecolor, linestyle, marker):
"""Creates box shape"""
return dict(facecolor="white",
edgecolor=edgecolor, linestyle=linestyle,
#hatch=marker,
linewidth=2, boxstyle="round")
# def get_pairwise_distances(A, B):
# # GET EUCLIDEAN DISTANCES
# n_A = A.shape[0]
# n_B = B.shape[0]
# A_square = np.dot(A ** 2, np.ones((2, n_B)))
# B_square = np.dot(np.ones((n_A, 2)), B.T) ** 2
# dist = A_square + B_square - 2 * np.dot(A, B.T)
# return np.sqrt(dist)
|
142287
|
import numpy as np
import tvm
from tvm.contrib import graph_runtime
import topi
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing.config import ctx_list
def helper(symbol, inputs, dtype,
np_forward, np_backward=None, need_input=True, need_head_grads=True):
ishapes = {}
input_syms = []
np_inputs = {}
for (name, shape, s) in inputs:
ishapes.update({name: shape})
np_inputs.update({name: np.random.uniform(size=shape).astype(dtype)})
input_syms.append(s)
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(symbol, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
m.run(**np_inputs)
y_np = np_forward(**np_inputs)
out = m.get_output(0, tvm.nd.empty(y_np.shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np, atol=1e-5, rtol=1e-5)
# backward
if np_backward:
graph._set_symbol_list_attr("grad_ys", symbol)
graph._set_symbol_list_attr("grad_xs", input_syms)
graph._set_symbol_list_attr("grad_ys_out_grad", sym.Variable("head_grads", shape=y_np.shape))
graph = graph.apply("Gradient")
ishapes.update({"head_grads": y_np.shape})
graph, lib, _ = nnvm.compiler.build(graph, target, ishapes)
m = graph_runtime.create(graph, lib, ctx)
head_grads = np.random.uniform(size=y_np.shape).astype(dtype)
y_np = np_backward(head_grads=head_grads, **np_inputs)
b_inputs = {}
if need_input:
b_inputs.update(np_inputs)
if need_head_grads:
b_inputs.update({"head_grads":head_grads})
m.run(**b_inputs)
for i in range(len(y_np)):
out = m.get_output(i, tvm.nd.empty(y_np[i].shape, dtype))
np.testing.assert_allclose(out.asnumpy(), y_np[i], atol=1e-5, rtol=1e-5)
def verify_transpose(dshape, axes):
x = sym.Variable("x")
if axes:
y = sym.transpose(x, axes=axes)
else:
y = sym.transpose(x)
y = y + 1
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data)
out_np = np.transpose(data.asnumpy(), axes=axes) + 1
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def verify_reduce(dshape, fnp, fsym, **kwargs):
x = sym.Variable("x")
y = fsym(x + 1, **kwargs)
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = np.random.uniform(size=dshape).astype(dtype)
out_np = fnp(data + 1, **kwargs)
m.run(x=data)
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def test_tranpose():
verify_transpose((2, 3, 4), (0, 2, 1))
verify_transpose((2, 3, 4), None)
def test_reduce():
verify_reduce((2, 3, 4), np.max, sym.max, axis=1, keepdims=True)
verify_reduce((4, 4, 3), np.min, sym.min, keepdims=True)
verify_reduce((4, 4, 3), np.sum, sym.sum, axis=(0, 2))
def verify_flip(ishape, axis):
x = sym.Variable("x")
y = sym.flip(x, axis=axis) + 1
dtype = "float32"
x_np = np.random.uniform(size=ishape).astype(dtype)
res = np.flip(x_np, axis) + 1
for target, ctx in ctx_list():
# set input
graph, lib, _ = nnvm.compiler.build(y, target, {"x": ishape})
m = graph_runtime.create(graph, lib, ctx)
m.run(x=x_np)
out = m.get_output(0, tvm.nd.empty(res.shape))
np.testing.assert_allclose(out.asnumpy(), res, atol=1e-5, rtol=1e-5)
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
def verify_reshape(dshape, oshape):
x = sym.Variable("x")
y = sym.reshape(x, shape=oshape)
y = y + 1
dtype = "float32"
for target, ctx in ctx_list():
graph, lib, _ = nnvm.compiler.build(y, target, {"x": dshape})
m = graph_runtime.create(graph, lib, ctx)
# set input
data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype))
m.run(x=data)
out_np = data.asnumpy().reshape(oshape) + 1
out = m.get_output(0, tvm.nd.empty(out_np.shape))
np.testing.assert_allclose(out.asnumpy(), out_np, atol=1e-5, rtol=1e-5)
def test_reshape():
verify_reshape((2, 3, 4), (-1, 2, 1))
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
def test_clip():
x = sym.Variable("x")
a_min=0.2
a_max=0.75
y = sym.clip(x, a_min=a_min, a_max=a_max)
def forward(x):
return np.clip(x, a_min=a_min, a_max=a_max)
def backward(head_grads, x):
mask1 = np.greater_equal(x, a_min).astype("float")
mask2 = np.less_equal(x, a_max).astype("float")
return [head_grads * mask1 * mask2]
dtype = "float32"
inputs = [('x', (3, 4, 5), x)]
helper(y, inputs, dtype, forward, backward)
def test_greater():
l = sym.Variable("l")
r = sym.Variable("r")
y = sym.greater(l, r)
def forward(l, r):
return np.greater(l, r).astype("float32")
def backward(head_grads, l, r):
return [np.zeros_like(l)]
dtype = "float32"
inputs = [('l', (3, 4, 5), l),
('r', (3, 4, 5), r)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_less():
l = sym.Variable("l")
r = sym.Variable("r")
y = sym.less(l, r)
def forward(l, r):
return np.less(l, r).astype("float32")
def backward(head_grads, l, r):
return [np.zeros_like(l)]
dtype = "float32"
inputs = [('l', (3, 4, 5), l),
('r', (3, 4, 5), r)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_reshape_like():
x = sym.Variable("x")
y = sym.Variable("y")
z = sym.reshape_like(x, y)
def forward(x, y):
return np.reshape(x, y.shape)
def backward(head_grads, x, y):
return [np.reshape(head_grads, x.shape),
np.zeros_like(y)]
dtype = "float32"
inputs = [('x', (3, 4, 5), x),
('y', (5, 4, 3), y)]
helper(z, inputs, dtype, forward, backward)
def verify_expand_like(in_shape, out_shape, axis, exclude):
x = sym.Variable("x")
y = sym.Variable("y")
z = sym.expand_like(x, y, axis=axis, exclude=exclude)
def forward(x, y):
odim = len(out_shape)
real_axis = [i if i >= 0 else i + odim for i in axis]
real_axis = sorted(real_axis)
if exclude:
real_axis = list(set(range(odim)) - set(real_axis))
for i in real_axis:
x = np.expand_dims(x, i).astype(x.dtype)
for i in real_axis:
x = np.concatenate([x]*out_shape[i], axis=i).astype(x.dtype)
return x
def backward(head_grads, x, y):
odim = len(out_shape)
real_axis = [i if i >= 0 else i + odim for i in axis]
real_axis = sorted(real_axis)
if exclude:
real_axis = list(set(range(odim)) - set(real_axis))
return [np.sum(head_grads, axis=tuple(real_axis)),
np.zeros_like(y)]
dtype = "float32"
inputs = [('x', in_shape, x),
('y', out_shape, y)]
helper(z, inputs, dtype, forward, backward, need_input=False)
def test_expand_like():
verify_expand_like((3,), (3, 2), [1], False)
verify_expand_like((2,), (2, 3), [1], False)
verify_expand_like((3, 4), (3, 5, 4), [1], False)
verify_expand_like((5, 7), (5, 6, 7, 8), [0, 2], True)
def verify_elemwise_sum(num_args):
s = [sym.Variable("input" + str(i)) for i in range(num_args)]
y = sym.elemwise_sum(*s, num_args=num_args)
def forward(**inputs):
return np.sum(np.array(list(inputs.values())), axis=0)
def backward(head_grads, **inputs):
return [head_grads] * num_args
dtype = "float32"
inputs = [("input" + str(i), (3, 4, 5), s[i])
for i in range(num_args)]
helper(y, inputs, dtype, forward, backward, need_input=False)
def test_elemwise_sum():
verify_elemwise_sum(1)
verify_elemwise_sum(5)
verify_elemwise_sum(7)
def test_block_grad():
x = sym.Variable("x")
y = sym.block_grad(x)
def forward(x):
return x
def backward(head_grads, x):
return [np.zeros_like(head_grads)]
dtype = "float32"
inputs = [('x', (3, 4, 5), x)]
helper(y, inputs, dtype, forward, backward, need_head_grads=False)
def test_full():
shape = (3, 4, 5)
value = 7
dtype = "float32"
for target, ctx in ctx_list():
data = sym.Variable("data", dtype=dtype)
# full_like
s = sym.full_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=value, dtype=dtype),
atol=1e-5, rtol=1e-5)
# ones_like
s = sym.ones_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=1, dtype=dtype),
atol=1e-5, rtol=1e-5)
# zeros_like
s = sym.zeros_like(data=data, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target, {"data": shape})
m = graph_runtime.create(graph, lib, ctx)
m.run(data=np.random.uniform(size=shape).astype(dtype))
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=0, dtype=dtype),
atol=1e-5, rtol=1e-5)
# full
s = sym.full(shape=shape, dtype=dtype, fill_value=value, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=value, dtype=dtype),
atol=1e-5, rtol=1e-5)
# ones
s = sym.ones(shape=shape, dtype=dtype, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=1, dtype=dtype),
atol=1e-5, rtol=1e-5)
# zeros
s = sym.zeros(shape=shape, dtype=dtype, name="s")
graph, lib, _ = nnvm.compiler.build(s, target)
m = graph_runtime.create(graph, lib, ctx)
m.run()
out = m.get_output(0, tvm.nd.empty(shape, dtype=dtype))
np.testing.assert_allclose(
out.asnumpy(),
np.full(shape, fill_value=0, dtype=dtype),
atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_reshape()
test_reduce()
test_tranpose()
test_clip()
test_greater()
test_less()
test_reshape_like()
test_expand_like()
test_elemwise_sum()
test_block_grad()
test_full()
test_flip()
print(nnvm.compiler.engine.dump())
|
142291
|
import requests as rq
from requests.exceptions import HTTPError
from bs4 import BeautifulSoup
import warnings
def get_dom(url):
"""returns document from url as a BS DOM"""
response = rq.get(url)
response.raise_for_status()
return BeautifulSoup(response.content, "html.parser")
def _abs_link(link, base='https://en.wikipedia.org'):
return base + link
def _table_to_dict(table):
result = {}
for row in table.find_all('tr'):
result[row.th.text] = row.td.get_text().strip()
return result
def _get_main_info(table):
"""finds "main" data table on the page
and returns key data points as a dict"""
main = [
el
for el in table.tbody.find_all("tr", recursive=False)
if "Location" in el.get_text()
][0]
return _table_to_dict(main)
def _parse_row(row, names=("allies", "axis", "third party")):
"""parse secondory info row
as dict of info points
"""
cells = row.find_all("td", recursive=False)
if len(cells) == 1:
return {"total": cells[0].get_text(separator=" ").strip()}
return {
name: cell.get_text(separator=" ").strip() for name, cell in zip(names, cells)
}
def _find_row_by_header(table, string):
"""find a header row in the table,
and return NEXT element if finds"""
header = table.tbody.find("tr", text=string)
if header:
return header.next_sibling
def _additional(table):
"""collects additional info
using header keywords and returning
data from the row below each
"""
keywords = (
"Belligerents",
"Commanders and leaders",
"Strength",
"Casualties and losses",
)
result = {}
for keyword in keywords:
try:
data = _find_row_by_header(table, keyword)
if data:
result[keyword] = _parse_row(data)
except Exception as e:
raise Exception(keyword, e)
return result
def parse_battle_page(url):
""" main function to parse battle urls from wikipedia
"""
try:
dom = get_dom(url) # dom
except Exception as e:
warnings.warn(str(e))
return {}
table = dom.find("table", "infobox vevent") # info table
if table is None: # some campaigns don't have table
return {}
data = _get_main_info(table)
data["url"] = url
additional = _additional(table)
data.update(additional)
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.