code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import time
import numpy as np
import copy
import sys
sys.path.append(".")
import ai.parameters
import ai.actionplanner
import ai.energyplanner
# get/set/update/check/
# random.choice(d.keys())
class BehaviourPlanner:
def __init__(self):
self.energy = ai.energyplanner.EnergyPlanner()
self.last_behaviour = ''
self.last_behaviour_count = 0
self.last_behaviour_time_spend = 0
self.last_behaviour_time_left = 0
self.last_time = self.get_time()
def get_time(self):
return time.time()
def get_time_gap(self):
return self.get_time() - self.last_time
def get_time_cons(self, behaviour):
if behaviour in ai.parameters.TIME_MIN_CONS:
return ai.parameters.TIME_MIN_CONS[behaviour]
else:
return 0
def update_last_time(self):
self.last_time = self.get_time()
def update_last_time_left(self):
print (self.last_behaviour_time_left)
self.last_behaviour_time_left -= self.get_time_gap()
print (self.last_behaviour_time_left)
self.update_last_time()
def update_last_behaviour(self, this_behaviour):
self.last_behaviour_time_left = self.get_time_cons(this_behaviour)
if self.last_behaviour == this_behaviour:
self.last_behaviour_count += 1
else:
self.last_behaviour_count = 0
ai.actionplanner.ActionPlanner.need_stop = True
self.check_behaviour_times()
self.last_behaviour = this_behaviour
def check_behaviour_times(self):
if self.last_behaviour_count > 10:
print ("too many times")
def update_behaviour(self, input_mode, input_data):
behaviour, processed_data = self.get_behaviour_from_mode(input_mode, input_data)
print('BP(behaviour=' + behaviour + ', data=' + processed_data + ')')
if self.check_behaviour_in_behaviours(behaviour, ['relax', 'move', 'play']):
#print ('2.1 in relax, move play')
if self.last_behaviour_time_left > 0:
#print ('2.11 time left ' + str(self.last_behaviour_time_left))
self.update_last_time_left()
behaviour = self.last_behaviour
else:
#print ('2.12 new behaviour ')
self.update_last_behaviour(behaviour)
else:
#print ('2.2 other move')
self.update_last_behaviour(behaviour)
print('BP -> behaviour=' + behaviour + ', time_left=' + str(self.last_behaviour_time_left))
return behaviour, processed_data
def set_last_behaviour(self, behaviour):
if (self.last_behaviour == behaviour):
self.last_behaviour_count += 1
else:
self.last_behaviour_count = 0
self.last_behaviour = copy.deepcopy(behaviour)
def get_behaviour_from_mode(self, input_mode, input_data):
_behaviour = None
_data = input_data
if input_mode == ai.parameters.MODELS[0]: # ds
_behaviour = "run_away"
elif input_mode == ai.parameters.MODELS[1]: #tc
if input_data[0] == 1:
_behaviour = "touch_head"
elif input_data[2] == 1:
_behaviour = "touch_jaw"
elif input_data[4] == 1:
_behaviour = "touch_back"
elif input_mode == ai.parameters.MODELS[2]: #voice
_behaviour = self.process_voice(input_data)
elif input_mode == ai.parameters.MODELS[3]: #vision
_behaviour,_data = self.process_vision(input_data)
else:
_behaviour = self.process_random_behaviour()
return _behaviour, _data
def process_voice(self, input_data):
'''
1. kitten
2. mars
3. cat
4. mimi
5. hello
6. how are you
7 be quiet
8 look at me
9 sit
10 run
11 walk
12 turn
13 relax
14 stop
15 come here
'''
command = input_data
_behaviour = None
if command == "MARS" or command == "KITTEN" or command == "CAT": # call
_behaviour = 'start_listen'
pass
elif command == "MIMI" or command == "HELLO" or command == "HOW ARE YOU": # hello
_behaviour = 'make_sound'
pass
elif command == "BE QUIET": # be quiet
_behaviour = 'lower_sound'
# be quite
pass
elif command == "LOOK AT ME": # look at me
_behaviour = 'stare_at'
# look at me
pass
elif command == "SIT": # Go to your charger
_behaviour = 'sit'
pass
elif command == "RUN": # Play with me
_behaviour = 'run'
pass
elif command == "WALK": # Look at me
_behaviour = 'walk'
pass
elif command == "TURN": # Go foward/ left/ right/ stop
_behaviour = 'turn'
pass
elif command == "RELAX": # Are you sleepy?
_behaviour = 'lie_down'
pass
elif command == "STOP": # Be quiet
_behaviour = 'stop'
pass
elif command == "COME HERE": # Find your toy
_behaviour = 'walk_towards'
pass
else:
_behaviour = "lower_sound"
pass
return _behaviour
def process_vision(self, input_data):
if type(input_data) != dict or len(input_data)!= 1:
return "error process vision", 0
command = ''
for i in input_data:
command = i
_behaviour = None
_data = None
if command == 'human':
_behaviour = ai.parameters.BEHAVIOURS['ita_human'][self.get_rand(4,2)]
_data = input_data[command][0][0] # coords
elif command == 'qrcode':
_behaviour = 'qrcode'
_data = command[1]
elif command == 'obj':
obj_id = input_data[command][0]
if obj_id == 0: #high and teaser
_behaviour = 'flap_obj'
else:
_behaviour = 'pre_attack'
_data = input_data[command][1] #coord
return _behaviour,_data
def process_random_behaviour(self):
_behaviour = None
_rad = self.get_rand()
_rad_2 = self.get_rand()
if _rad > 0.15: # relax
if _rad_2 > 0.75: # sit
_behaviour = 'lie_down'
elif _rad_2 < 0.7: # lie_down
_behaviour = 'sit'
else: # stand
_behaviour = 'stand'
elif _rad < 0.1: # move
if _rad_2 > 0.3: # walk
_behaviour = 'walk'
elif _rad_2 <0.1:
_behaviour = 'turn'
else:
_behaviour = 'run'
else: # play
_behaviour = ai.parameters.BEHAVIOURS['play'][self.get_rand(4,3)]
return _behaviour
def get_rand(self, num_type = 0, _data=0):
# 0 is for random
# 1 is for normal
# 4 is for choice / swtich
if num_type == 0:
return np.random.random() # 0~1 percentage
elif num_type == 1:
return abs(np.random.normal(0,1)) # 68% in 1; 95% in 2
elif num_type == 2:
return np.random.normal(0,1) # 68% in 1; 95% in 2
elif num_type == 3:
return np.random.gamma(1,1) # 90% 0~2; 10% bigger than 2
elif num_type == 4:
return int(np.random.random()*_data) # 0,1,2 .. data-1
else:
return 0
def check_behaviour_in_behaviours(self,_behaviour, _behaviour_group):
if type(_behaviour_group) is str:
if _behaviour in ai.parameters.BEHAVIOURS[_behaviour_group]:
return True
else:
return False
elif type(_behaviour_group) is list:
for i in _behaviour_group:
if _behaviour in ai.parameters.BEHAVIOURS[i]:
return True
return False
else:
print ('Not in the group')
return False
pass
|
[
"sys.path.append",
"copy.deepcopy",
"time.time",
"numpy.random.gamma",
"numpy.random.random",
"numpy.random.normal"
] |
[((55, 75), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (70, 75), False, 'import sys\n'), ((538, 549), 'time.time', 'time.time', ([], {}), '()\n', (547, 549), False, 'import time\n'), ((2811, 2835), 'copy.deepcopy', 'copy.deepcopy', (['behaviour'], {}), '(behaviour)\n', (2824, 2835), False, 'import copy\n'), ((7335, 7353), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7351, 7353), True, 'import numpy as np\n'), ((7425, 7447), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7441, 7447), True, 'import numpy as np\n'), ((7519, 7541), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7535, 7541), True, 'import numpy as np\n'), ((7612, 7633), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(1)'], {}), '(1, 1)\n', (7627, 7633), True, 'import numpy as np\n'), ((7721, 7739), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7737, 7739), True, 'import numpy as np\n')]
|
import json
import os
from typing import Dict, Iterator, Optional
import ndjson
from ps2_census import Query
from .queries import fire_group_query_factory
DATA_FILENAME = "fire-groups.ndjson"
QUERY_BATCH_SIZE: int = 10
def update_data_files(
service_id: str, directory: str, force_update: bool = False,
):
filepath: str = "/".join((directory, DATA_FILENAME))
print(f"Updating {filepath}")
if os.path.exists(filepath):
if force_update is True:
print(f"Removing previous file at {filepath}")
os.remove(filepath)
else:
print(f"File already exists at {filepath}")
return
total_items: int = 0
with open(filepath, "a") as f:
previously_returned: Optional[int] = None
i: int = 0
while previously_returned is None or previously_returned > 0:
query: Query = fire_group_query_factory().set_service_id(service_id).start(
i
).limit(QUERY_BATCH_SIZE)
result: dict = query.get()
try:
returned: int = result["returned"]
except KeyError:
print(result)
raise
local: int = 0
for item in result["fire_group_list"]:
local += 1
f.write(f"{json.dumps(item)}\n")
total_items += local
i += QUERY_BATCH_SIZE
print(f"Got {local} items, total {total_items}")
previously_returned = returned
print(f"Saved {total_items} items")
def load_data_files(directory: str) -> Iterator[Dict]:
filepath: str = "/".join((directory, DATA_FILENAME))
with open(filepath) as f:
reader = ndjson.reader(f)
d: dict
for d in reader:
yield d
|
[
"os.remove",
"os.path.exists",
"ndjson.reader",
"json.dumps"
] |
[((416, 440), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (430, 440), False, 'import os\n'), ((1737, 1753), 'ndjson.reader', 'ndjson.reader', (['f'], {}), '(f)\n', (1750, 1753), False, 'import ndjson\n'), ((548, 567), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (557, 567), False, 'import os\n'), ((1336, 1352), 'json.dumps', 'json.dumps', (['item'], {}), '(item)\n', (1346, 1352), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
import sys
import os
import pdb
import json
class CreateParams(object):
def __init__(self, strategy_name):
self._strategy_name = strategy_name
self._params_dict = {}
self.read_rule()
def set_params_scope(self, key, start, end, scope, params_dict):
params_list = []
while start <= end:
if not isinstance(scope, int):
start = round(start,2)
params_list.append(start)
start += scope
params_dict[key] = params_list
return params_dict
def set_params_array(self, key, arrays, params_dict):
params_dict[key] = arrays
return params_dict
def set_params(self, key, value, params_dict):
vlist = []
vlist.append(value)
params_dict[key] = vlist
return params_dict
def merge_params(self, params_list, key, params):
result = []
if len(params_list) == 0:
is_new = 1
else:
is_new = 0
for pm in params:
if is_new:
result.append({key:pm})
else:
for p in params_list:
pt = p.copy()
pt[key] = pm
result.append(pt)
return result
def param_rule(self, job, params_dict):
jtype = job.get('type')
jkey = job.get('key')
if jtype == "scope":
jstart = job.get('start')
jend = job.get('end')
jsection = job.get('section')
params_dict = self.set_params_scope(jkey, jstart, jend, jsection, params_dict)
elif jtype == "array":
jarray = job.get('array')
params_dict = self.set_params_array(jkey, jarray, params_dict)
else:
params_dict = self.set_params(jkey, job.get('value'), params_dict)
return params_dict
def read_rule(self):
rule_file = 'rule.json'#str(self._strategy_name) + '/' + 'rule.json'
with open(rule_file,'rb') as f:
content = f.read()
json_ob = json.loads(content)
for obt in json_ob:
params_dict = {}
params = obt['params']
for ob in params:
params_dict = self.param_rule(ob, params_dict)
self._params_dict[obt['name']] = params_dict
def create_params(self):
params_all = {}
for name, params_dict in self._params_dict.items():
params_sets = []
for key in params_dict:
params_sets = self.merge_params(params_sets, key, params_dict[key])
params_all[name] = params_sets
all_file = str(self._strategy_name) + '_' + 'param.json'
if os.path.exists(str(all_file)):
os.remove(str(all_file))
with open(all_file, 'w') as f:
f.write(json.dumps(params_all))
if __name__ == "__main__":
pdb.set_trace()
params = CreateParams('Alpha191')
params.create_params()
|
[
"pdb.set_trace",
"json.dumps",
"json.loads"
] |
[((3078, 3093), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3091, 3093), False, 'import pdb\n'), ((2162, 2181), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (2172, 2181), False, 'import json\n'), ((3012, 3034), 'json.dumps', 'json.dumps', (['params_all'], {}), '(params_all)\n', (3022, 3034), False, 'import json\n')]
|
"""Pytest configuration, fixtures, and plugins."""
# pylint: disable=redefined-outer-name
import shutil
import sys
import pytest
if sys.version_info.major > 2: # TODO remove after droping python 2
from pathlib import Path # pylint: disable=E
else:
from pathlib2 import Path # pylint: disable=E
TEST_ROOT = Path(__file__).parent
def pytest_ignore_collect(path, config): # pylint: disable=unused-argument
"""Determine if this directory should have its tests collected."""
if config.option.functional:
return True
return not (config.option.integration or config.option.integration_only)
@pytest.fixture
def configs():
"""Path to Runway config fixtures."""
return TEST_ROOT.parent / "fixtures" / "configs"
@pytest.fixture
def cp_config(configs):
"""Copy a config file."""
def copy_config(config_name, dest_path):
"""Copy a config file by name to a destination directory.
The resulting config will be named runway.yml.
"""
runway_yml = dest_path / "runway.yml"
if not config_name.startswith(".yml"):
config_name += ".yml"
shutil.copy(str(configs / config_name), str(runway_yml))
return runway_yml
return copy_config
|
[
"pathlib2.Path"
] |
[((320, 334), 'pathlib2.Path', 'Path', (['__file__'], {}), '(__file__)\n', (324, 334), False, 'from pathlib2 import Path\n')]
|
# Generated by Django 2.2.6 on 2019-10-14 17:06
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('project_core', '0031_import_organisations'),
]
operations = [
migrations.AddField(
model_name='keyword',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which the keyword was created'),
),
migrations.AddField(
model_name='keyword',
name='source',
field=models.CharField(default='', help_text='Source from which the keyword originated', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='source',
name='source',
field=models.CharField(help_text='Source from which a UID may originate', max_length=200),
),
]
|
[
"django.db.models.CharField",
"django.db.models.DateTimeField"
] |
[((377, 497), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'help_text': '"""Date and time at which the keyword was created"""'}), "(default=django.utils.timezone.now, help_text=\n 'Date and time at which the keyword was created')\n", (397, 497), False, 'from django.db import migrations, models\n'), ((613, 716), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Source from which the keyword originated"""', 'max_length': '(200)'}), "(default='', help_text=\n 'Source from which the keyword originated', max_length=200)\n", (629, 716), False, 'from django.db import migrations, models\n'), ((869, 956), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Source from which a UID may originate"""', 'max_length': '(200)'}), "(help_text='Source from which a UID may originate',\n max_length=200)\n", (885, 956), False, 'from django.db import migrations, models\n')]
|
import logging
import torch.nn as nn
import torch.nn.functional as F
from ..initializer import initialize_from_cfg
from ...extensions import DeformableConvInOne
from ...utils.bn_helper import setup_bn, rollback_bn, FREEZE
logger = logging.getLogger('global')
__all__ = [
'resnext_101_32x4d', 'resnext_101_32x8d', 'resnext_101_64x4d', 'resnext_101_64x8d', 'resnext_152_32x4d',
'resnext_152_32x8d', 'resnext_152_64x4d', 'resnext_152_64x8d'
]
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def init_weights(module, std=0.01):
for m in module.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, std)
class ResNeXtBottleneck(nn.Module):
"""RexNeXt bottleneck type C"""
expansion = 2
def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor, deformable=False):
"""
Arguments:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
key_conv = nn.Conv2d
if deformable:
key_conv = DeformableConvInOne
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = key_conv(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module(
'shortcut_conv',
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce(x)
bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True)
bottleneck = self.conv_conv(bottleneck)
bottleneck = F.relu(self.bn(bottleneck), inplace=True)
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
residual = self.shortcut(x)
return F.relu(residual + bottleneck, inplace=True)
class ResNeXt(nn.Module):
def __init__(self,
block,
layers,
cardinality,
base_width,
out_layers,
out_strides,
bn={FREEZE: True},
frozen_layers=None,
deformable=None,
layer_deform=None,
initializer=None):
# setup bn before building model
setup_bn(bn)
super(ResNeXt, self).__init__()
frozen_layers = [] if frozen_layers is None else frozen_layers
if frozen_layers is not None and len(frozen_layers) > 0:
assert min(frozen_layers) >= 0, frozen_layers
assert max(frozen_layers) <= 4, frozen_layers
assert min(out_layers) >= 0, out_layers
assert max(out_layers) <= 4, out_layers
self.out_layers = out_layers
self.out_strides = out_strides
self.frozen_layers = frozen_layers
midplanes = [64, 256, 512, 1024, 2048]
self.out_planes = [midplanes[i] for i in self.out_layers]
if layer_deform is None:
logger.warning("Argument `deformable` will be deprecated" "pls use layer_deform instead")
if deformable:
layer_deform = [None, None, 'last', 'last', 'last']
else:
layer_deform = [None] * 5
assert len(layer_deform) == 5, layer_deform
assert not layer_deform[0] and not layer_deform[1]
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# layer1 should not use deformable
self.layer1 = self._make_layer(block, 64, 256, layers[0], 1, cardinality, base_width)
self.layer2 = self._make_layer(
block, 256, 512, layers[1], 2, cardinality, base_width, deformable=layer_deform[2])
self.layer3 = self._make_layer(
block, 512, 1024, layers[2], 2, cardinality, base_width, deformable=layer_deform[3])
self.layer4 = self._make_layer(
block, 1024, 2048, layers[3], 2, cardinality, base_width, deformable=layer_deform[4])
if initializer is not None:
initialize_from_cfg(self, initializer)
# It's IMPORTANT when you want to freeze part of your backbone.
# ALWAYS remember freeze layers in __init__ to avoid passing freezed params
# to optimizer
self.freeze_layer()
# rollback bn after model builded
rollback_bn()
def forward(self, input):
x = input['image']
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
c1 = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
outs = [c1, c2, c3, c4, c5]
features = [outs[i] for i in self.out_layers]
return {'features': features, 'strides': self.out_strides}
def _make_layer(self, block, inplanes, outplanes, blocks, stride=1, cardinality=32, base_width=4, deformable=None):
block_deform = [False] * blocks
if deformable == 'last':
block_deform[-1] = True
elif deformable == 'all':
block_deform = [True] * blocks
elif isinstance(deformable, int):
block_deform = [False] * (blocks - deformable) + [True] * deformable
layers = []
layers.append(block(inplanes, outplanes, stride, cardinality, base_width, 4, deformable=block_deform[0]))
for i in range(1, blocks):
layers.append(block(outplanes, outplanes, 1, cardinality, base_width, 4, deformable=block_deform[i]))
return nn.Sequential(*layers)
def get_outplanes(self):
return self.out_planes
def get_outstrides(self):
return self.out_strides
def train(self, mode=True):
"""
Sets the module in training mode.
This has any effect only on modules such as Dropout or BatchNorm.
Returns:
Module: self
"""
self.training = mode
for module in self.children():
module.train(mode)
self.freeze_layer()
return self
def freeze_layer(self):
layers = [
nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool), self.layer1, self.layer2, self.layer3,
self.layer4
]
for layer_idx in self.frozen_layers:
layer = layers[layer_idx]
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def resnext_101_32x4d(**kwargs):
"""Constructs a ResNeXt-101-32x4d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], 32, 4, **kwargs)
return model
def resnext_101_32x8d(**kwargs):
"""Constructs a ResNeXt-101-32x8d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], 32, 8, **kwargs)
return model
def resnext_101_64x4d(**kwargs):
"""Constructs a ResNeXt-101-64x4d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], 64, 4, **kwargs)
return model
def resnext_101_64x8d(**kwargs):
"""Constructs a ResNeXt-101-64x8d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], 64, 8, **kwargs)
return model
def resnext_152_32x4d(**kwargs):
"""Constructs a ResNeXt-152-32x4d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], 32, 4, **kwargs)
return model
def resnext_152_32x8d(**kwargs):
"""Constructs a ResNeXt-152-32x8d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], 32, 8, **kwargs)
return model
def resnext_152_64x4d(**kwargs):
"""Constructs a ResNeXt-152-64x4d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], 64, 4, **kwargs)
return model
def resnext_152_64x8d(**kwargs):
"""Constructs a ResNeXt-151-64x8d model"""
model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], 64, 8, **kwargs)
return model
|
[
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.relu",
"torch.nn.MaxPool2d",
"logging.getLogger"
] |
[((234, 261), 'logging.getLogger', 'logging.getLogger', (['"""global"""'], {}), "('global')\n", (251, 261), False, 'import logging\n'), ((550, 639), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (559, 639), True, 'import torch.nn as nn\n'), ((1695, 1768), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'D'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n', (1704, 1768), True, 'import torch.nn as nn\n'), ((1794, 1811), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['D'], {}), '(D)\n', (1808, 1811), True, 'import torch.nn as nn\n'), ((1943, 1960), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['D'], {}), '(D)\n', (1957, 1960), True, 'import torch.nn as nn\n'), ((1988, 2062), 'torch.nn.Conv2d', 'nn.Conv2d', (['D', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n', (1997, 2062), True, 'import torch.nn as nn\n'), ((2088, 2116), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2102, 2116), True, 'import torch.nn as nn\n'), ((2142, 2157), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2155, 2157), True, 'import torch.nn as nn\n'), ((2856, 2899), 'torch.nn.functional.relu', 'F.relu', (['(residual + bottleneck)'], {'inplace': '(True)'}), '(residual + bottleneck, inplace=True)\n', (2862, 2899), True, 'import torch.nn.functional as F\n'), ((4406, 4470), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(False)'}), '(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n', (4415, 4470), True, 'import torch.nn as nn\n'), ((4490, 4508), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4504, 4508), True, 'import torch.nn as nn\n'), ((4529, 4550), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4536, 4550), True, 'import torch.nn as nn\n'), ((4574, 4622), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (4586, 4622), True, 'import torch.nn as nn\n'), ((6697, 6719), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6710, 6719), True, 'import torch.nn as nn\n'), ((7267, 7327), 'torch.nn.Sequential', 'nn.Sequential', (['self.conv1', 'self.bn1', 'self.relu', 'self.maxpool'], {}), '(self.conv1, self.bn1, self.relu, self.maxpool)\n', (7280, 7327), True, 'import torch.nn as nn\n'), ((2285, 2379), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, stride=stride, padding=\n 0, bias=False)\n', (2294, 2379), True, 'import torch.nn as nn\n'), ((2428, 2456), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (2442, 2456), True, 'import torch.nn as nn\n')]
|
"""
This module contains mathematically focused functions
"""
import numpy as np
from math import sin, cos
def normalise(vector):
"""Return a normalised vector"""
return vector / np.linalg.norm(vector)
def rotZ(theta):
"""
Return rotation matrix that rotates with repect to z axis with theta degress
"""
rot = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return rot
def rotedEpsilon(Epsilon, theta):
"""Return an epsilon of an material rotated by theta degree with z as the rotation matrix"""
return rotZ(theta).dot(Epsilon.dot(rotZ(-theta)))
def rotVTheta(v, theta):
"""Return the rotation matrix for rotation against a unit vector v and angel theta"""
v = normalise(v) # we first normalise the vector
w = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.cos(theta) * np.identity(3) + np.sin(theta) * w + (1 - np.cos(theta)) *\
np.outer(v,v)
def stackDot(array):
"""
Calculate the overall transfer matrix from a stack of arrays in the increasing
z direction. e.g. stack start at z=0
Psi(zb) = P_(zb, z_{N-1}) * ... * P(z1,zf) * Psi(zf)
= P(zb,zf) * Psi(zf)
"""
product = np.identity(len(array[0]))
for i in array:
product = product.dot(i)
return product
def buildDeltaMatrix(eps, Kx):
"""Returns Delta matrix for given relative permitivity and reduced wave number.
'Kx' : reduced wave number, Kx = kx/k0
'eps' : relative permitivity tensor
Returns : Delta 4x4 matrix, generator of infinitesimal translations
"""
return np.array([[
-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0,
1 - Kx**2 / eps[2, 2]
], [0, 0, -1, 0], [
eps[1, 2] * eps[2, 0] / eps[2, 2] - eps[1, 0],
Kx**2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0,
Kx * eps[1, 2] / eps[2, 2]
], [
eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2, 2],
eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0,
-Kx * eps[0, 2] / eps[2, 2]
]])
def rotXY(theta):
"""Return a roation matrix in 2D. Used in Jones Calculus"""
R = np.array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
return R
def polariserJ(theta):
"""Return the Jones matrix of a linear polariser"""
R = rotXY(theta)
Ri = rotXY(-theta)
J = np.array([[1, 0], [0, 0]])
return Ri.dot(J.dot(R))
def vectorFromTheta(theta):
"""
Return a unit vector in XY plane given the value of theta
'theta' : a list of angles to be calculated
return a 3xN array of N vectors calcuated
"""
X = np.cos(theta)
Y = np.sin(theta)
Z = np.zeros(len(theta))
return np.array([X, Y, Z])
######################DEPRECATED########################
# For the depricated Yeh's methods
def construct_epsilon_heli(epsilon_diag,
pitch,
divisions,
thickness,
handness="left"):
"""
construct the dielectric matrices of all layers
return a N*3*3 array where N is the number of layers
We define pitch to be the distance such the rotation is 180 degree e.g. apparant
period in z direction
"""
if pitch == thickness:
angles = np.linspace(0, -np.pi, divisions, endpoint=False)
elif pitch > thickness:
angles = np.linspace(
0, -np.pi * thickness / pitch, divisions, endpoint=False)
else:
raise NameError('Need thickness to be smaller than pitch')
return np.array(
[rotZ(i).dot(epsilon_diag.dot(rotZ(-i))) for i in angles])
def calc_c(e, a, b, u=1): # Check units
"""
calculate the z components of 4 partial waves in medium
e: dielectric tensor
a,b: components of wavevector in direction of x and y direction
return a list containting 4 roots for the z components of the partial waves
"""
# assign names
x = e * u
x11, x12, x13 = x[0]
x21, x22, x23 = x[1]
x31, x32, x33 = x[2]
# calculate the coeffciency based on symbolic expression
coef4 = x33
coef3 = a * x13 + a * x31 + b * x23 + b * x32
coef2 = a**2*x11 + a**2*x33 + a*b*x12 + a*b*x21 + b**2*x22 + b**2*x33 - \
x11*x33 + x13*x31 - x22*x33 + x23*x32
coef1 = a**3*x13 + a**3*x31 + a**2*b*x23 + a**2*b*x32 + a*b**2*x13 + \
a*b**2*x31 + a*x12*x23 - a*x13*x22 + a*x21*x32 - a*x22*x31 + b**3*x23 \
+ b**3*x32 - b*x11*x23 - b*x11*x32 + b*x12*x31 + b*x13*x21
coef0 = a**4*x11 + a**3*b*x12 + a**3*b*x21 + a**2*b**2*x11 + a**2*b**2*x22 \
- a**2*x11*x22 - a**2*x11*x33 + a**2*x12*x21 + a**2*x13*x31 + a*b**3*x12 + \
a*b**3*x21 - a*b*x12*x33 + a*b*x13*x32 - a*b*x21*x33 + a*b*x23*x31 + \
b**4*x22 - b**2*x11*x22 + b**2*x12*x21 - b**2*x22*x33 + b**2*x23*x32 + \
x11*x22*x33 - x11*x23*x32 - x12*x21*x33 + x12*x23*x31 + x13*x21*x32 - \
x13*x22*x31
# calculate the roots of the quartic equation
c = np.roots([coef4, coef3, coef2, coef1, coef0])
if len(c) == 2:
return np.append(c, c)
return c
def calc_k(e, a, b, u=1):
"""
A wrapper to calcualte k vector
"""
c = calc_c(e, a, b, u)
return np.array([[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]])
def calc_p(e, k, u=1): #something is wrong with this function. Not giving
#correct directions
"""
Calculate the polarisation vector based on the calculated wavevector and frequency
equation(9.7-5)
e: dielectric tensor
k: 4x3 array of 4 k vectors
"""
x = e * u
p = []
x11, x12, x13 = x[0]
x21, x22, x23 = x[1]
x31, x32, x33 = x[2]
for i in k:
a = i[0]
b = i[1]
c = i[2]
coeff_m = np.array([[x11 - b**2 - c**2, x12 + a * b, x13 + a * c],
[x21 + a * b, x22 - a**2 - c**2, x23 + b * c],
[x31 + a * c, x32 + b * c, x33 - a**2 - b**2]])
# The function seems to return the normalised null spcae vector
p.append(null(coeff_m))
return np.array(p)
def calc_q(k, p, u=1):
"""
calcualte the direction of q vector based on the k and q vectors given
k: an 4x3 array of 4 k vectors
p: an 4x3 array of 4 p vectors
return a 4x3 array of 4 q vectors
use a special unit for the magnetic field such that c/2pi/mu_0 = 1
note these vectors are not normlised
"""
return np.cross(k, p) / u
def calc_D(p, q):
return np.array([p[:, 0], q[:, 1], p[:, 1], q[:, 0]])
def calc_P(k, t):
return np.diag(np.exp(1j * t * k[:, 2]))
def construct_D(e, a, b, omega, u=1):
"""
construct the dynamic matrix for one layer with know dielectric tensor
"""
k = calc_k(e, a, b, omega, u)
p = calc_p(e, k, omega, u)
q = calc_q(k, p, omega, u)
return calc_D(p, q)
def null(A, eps=1e-14):
"""
Return the null vector of matrix A, usefull for calculate the p vector with
known k vector
"""
u, s, vh = np.linalg.svd(A)
null_mask = (s <= eps)
# relax the threshold if no null singularity is identified
if null_mask.any() == False:
return null(A, eps * 10)
null_space = np.compress(null_mask, vh, axis=0).flatten()
return np.transpose(null_space)
def incident_p(k):
"""
Calculate the 4 polarisation vectors based on the incident wave wave vector.
Assuming the medium is isotropic and polarastion is splited into s and p
k is a 3-vector
return a array of ps+,ps-,pp+,pp-
"""
# For normal incidence, fix the direction of polarisation
# p is aligned with x and s is aligned with y
# note the p vector is reversed for reflected wave otherwise p,s,k don't form
# a right hand set
if k[0] == 0 and k[1] == 0:
return np.array([[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]])
# calculate the polarisation vectors see lab book for defined geometries
# Note the normal vector is [0,0,-1] as the incident wave is traving in postive
# z direction
si = normalise(np.cross(k, [0, 0, -1]))
sr = si
pi = normalise(np.cross(si, k))
pr = normalise(np.cross(sr, [k[0], k[1], -k[2]]))
# return a 4x3 array of the four polarisation vectors
return np.array([si, sr, pi, pr])
def calc_coeff(T):
"""
Given the transfer matrix calculate the transmission and reflection coefficients
Not currently in use
"""
deno = (T[0, 0] * T[2, 2] - T[0, 2] * T[2, 0])
rss = (T[1, 0] * T[2, 2] - T[1, 2] * T[2, 0]) / deno
rsp = (T[3, 0] * T[2, 2] - T[3, 2] * T[2, 0]) / deno
rps = (T[0, 0] * T[1, 2] - T[1, 0] * T[0, 2]) / deno
rpp = (T[0, 0] * T[3, 2] - T[3, 0] * T[0, 2]) / deno
tss = T[2, 2] / deno
tsp = -T[2, 0] / deno
tps = -T[0, 2] / deno
tpp = T[0, 0] / deno
return {
"rss": rss,
"rsp": rsp,
"rps": rps,
"rpp": rpp,
"tss": tss,
"tsp": tsp,
"tps": tps,
"tpp": tpp
}
def calc_coupling_matrices(T):
"""
Calculate the coupling matrix between reflected/transmitted light and incident light
T is the overall transfer matrix of the system. Return a dictionary of coupling matrice
Indice are always in the order of s,p or L,R
Note p direction is aligned with x and s is aligned with y in the frame that
wave is traveling in the z direction. Refer to geometry guideline in the lab
book.
"""
# Build the coupling matrice between transmitted light and incident/reflected light
T_ti = np.array([[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]])
T_tr = np.array([[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]])
# Connect reflected light to incident light using the coupling to transmitted light
T_ir = np.linalg.solve(T_ti, T_tr)
T_it = np.linalg.inv(T_ti)
# Switching to circular polarisation
# Coupling matrix between planar and circular polarsiation T_cp * [L,R] = [S,P]
T_cp = np.array([[1j, -1j], [1, 1]]) * np.sqrt(1 / 2)
T_ir_c = np.linalg.solve(T_cp, T_ir.dot(T_cp))
T_it_c = np.linalg.solve(T_cp, T_it.dot(T_cp))
coupling_matrices = {
"Plane_r": T_ir,
"Plane_t": T_it,
"Circular_r": T_ir_c,
"Circular_t": T_it_c
}
return coupling_matrices
if __name__ == '__main__':
e = np.diag([1, 1, 1])
print(calc_c(e, 1, 1, 1))
|
[
"numpy.roots",
"numpy.linalg.svd",
"numpy.sin",
"numpy.linalg.norm",
"numpy.exp",
"numpy.diag",
"numpy.linalg.solve",
"numpy.transpose",
"numpy.identity",
"numpy.append",
"math.cos",
"numpy.linspace",
"numpy.cross",
"math.sin",
"numpy.linalg.inv",
"numpy.cos",
"numpy.compress",
"numpy.outer",
"numpy.array",
"numpy.sqrt"
] |
[((833, 897), 'numpy.array', 'np.array', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (841, 897), True, 'import numpy as np\n'), ((1666, 2065), 'numpy.array', 'np.array', (['[[-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0, 1 - Kx ** 2 /\n eps[2, 2]], [0, 0, -1, 0], [eps[1, 2] * eps[2, 0] / eps[2, 2] - eps[1, \n 0], Kx ** 2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0, Kx *\n eps[1, 2] / eps[2, 2]], [eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2, 2],\n eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0, -Kx * eps[0, 2] / eps\n [2, 2]]]'], {}), '([[-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0, 1 -\n Kx ** 2 / eps[2, 2]], [0, 0, -1, 0], [eps[1, 2] * eps[2, 0] / eps[2, 2] -\n eps[1, 0], Kx ** 2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0, \n Kx * eps[1, 2] / eps[2, 2]], [eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2,\n 2], eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0, -Kx * eps[0, 2] /\n eps[2, 2]]])\n', (1674, 2065), True, 'import numpy as np\n'), ((2425, 2451), 'numpy.array', 'np.array', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (2433, 2451), True, 'import numpy as np\n'), ((2692, 2705), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2698, 2705), True, 'import numpy as np\n'), ((2714, 2727), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2720, 2727), True, 'import numpy as np\n'), ((2768, 2787), 'numpy.array', 'np.array', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (2776, 2787), True, 'import numpy as np\n'), ((5035, 5080), 'numpy.roots', 'np.roots', (['[coef4, coef3, coef2, coef1, coef0]'], {}), '([coef4, coef3, coef2, coef1, coef0])\n', (5043, 5080), True, 'import numpy as np\n'), ((5263, 5329), 'numpy.array', 'np.array', (['[[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]]'], {}), '([[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]])\n', (5271, 5329), True, 'import numpy as np\n'), ((6123, 6134), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (6131, 6134), True, 'import numpy as np\n'), ((6539, 6585), 'numpy.array', 'np.array', (['[p[:, 0], q[:, 1], p[:, 1], q[:, 0]]'], {}), '([p[:, 0], q[:, 1], p[:, 1], q[:, 0]])\n', (6547, 6585), True, 'import numpy as np\n'), ((7058, 7074), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (7071, 7074), True, 'import numpy as np\n'), ((7305, 7329), 'numpy.transpose', 'np.transpose', (['null_space'], {}), '(null_space)\n', (7317, 7329), True, 'import numpy as np\n'), ((8297, 8323), 'numpy.array', 'np.array', (['[si, sr, pi, pr]'], {}), '([si, sr, pi, pr])\n', (8305, 8323), True, 'import numpy as np\n'), ((9580, 9630), 'numpy.array', 'np.array', (['[[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]]'], {}), '([[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]])\n', (9588, 9630), True, 'import numpy as np\n'), ((9642, 9692), 'numpy.array', 'np.array', (['[[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]]'], {}), '([[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]])\n', (9650, 9692), True, 'import numpy as np\n'), ((9792, 9819), 'numpy.linalg.solve', 'np.linalg.solve', (['T_ti', 'T_tr'], {}), '(T_ti, T_tr)\n', (9807, 9819), True, 'import numpy as np\n'), ((9831, 9850), 'numpy.linalg.inv', 'np.linalg.inv', (['T_ti'], {}), '(T_ti)\n', (9844, 9850), True, 'import numpy as np\n'), ((10344, 10362), 'numpy.diag', 'np.diag', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10351, 10362), True, 'import numpy as np\n'), ((190, 212), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (204, 212), True, 'import numpy as np\n'), ((3358, 3407), 'numpy.linspace', 'np.linspace', (['(0)', '(-np.pi)', 'divisions'], {'endpoint': '(False)'}), '(0, -np.pi, divisions, endpoint=False)\n', (3369, 3407), True, 'import numpy as np\n'), ((5116, 5131), 'numpy.append', 'np.append', (['c', 'c'], {}), '(c, c)\n', (5125, 5131), True, 'import numpy as np\n'), ((5798, 5970), 'numpy.array', 'np.array', (['[[x11 - b ** 2 - c ** 2, x12 + a * b, x13 + a * c], [x21 + a * b, x22 - a **\n 2 - c ** 2, x23 + b * c], [x31 + a * c, x32 + b * c, x33 - a ** 2 - b ** 2]\n ]'], {}), '([[x11 - b ** 2 - c ** 2, x12 + a * b, x13 + a * c], [x21 + a * b, \n x22 - a ** 2 - c ** 2, x23 + b * c], [x31 + a * c, x32 + b * c, x33 - a **\n 2 - b ** 2]])\n', (5806, 5970), True, 'import numpy as np\n'), ((6488, 6502), 'numpy.cross', 'np.cross', (['k', 'p'], {}), '(k, p)\n', (6496, 6502), True, 'import numpy as np\n'), ((6625, 6651), 'numpy.exp', 'np.exp', (['(1.0j * t * k[:, 2])'], {}), '(1.0j * t * k[:, 2])\n', (6631, 6651), True, 'import numpy as np\n'), ((7847, 7902), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]])\n', (7855, 7902), True, 'import numpy as np\n'), ((8101, 8124), 'numpy.cross', 'np.cross', (['k', '[0, 0, -1]'], {}), '(k, [0, 0, -1])\n', (8109, 8124), True, 'import numpy as np\n'), ((8157, 8172), 'numpy.cross', 'np.cross', (['si', 'k'], {}), '(si, k)\n', (8165, 8172), True, 'import numpy as np\n'), ((8193, 8226), 'numpy.cross', 'np.cross', (['sr', '[k[0], k[1], -k[2]]'], {}), '(sr, [k[0], k[1], -k[2]])\n', (8201, 8226), True, 'import numpy as np\n'), ((9987, 10020), 'numpy.array', 'np.array', (['[[1.0j, -1.0j], [1, 1]]'], {}), '([[1.0j, -1.0j], [1, 1]])\n', (9995, 10020), True, 'import numpy as np\n'), ((10019, 10033), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (10026, 10033), True, 'import numpy as np\n'), ((989, 1003), 'numpy.outer', 'np.outer', (['v', 'v'], {}), '(v, v)\n', (997, 1003), True, 'import numpy as np\n'), ((3453, 3522), 'numpy.linspace', 'np.linspace', (['(0)', '(-np.pi * thickness / pitch)', 'divisions'], {'endpoint': '(False)'}), '(0, -np.pi * thickness / pitch, divisions, endpoint=False)\n', (3464, 3522), True, 'import numpy as np\n'), ((7249, 7283), 'numpy.compress', 'np.compress', (['null_mask', 'vh'], {'axis': '(0)'}), '(null_mask, vh, axis=0)\n', (7260, 7283), True, 'import numpy as np\n'), ((350, 363), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (356, 363), True, 'import numpy as np\n'), ((406, 419), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (412, 419), True, 'import numpy as np\n'), ((421, 434), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (427, 434), True, 'import numpy as np\n'), ((909, 922), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (915, 922), True, 'import numpy as np\n'), ((925, 939), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (936, 939), True, 'import numpy as np\n'), ((942, 955), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (948, 955), True, 'import numpy as np\n'), ((967, 980), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (973, 980), True, 'import numpy as np\n'), ((2226, 2236), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2229, 2236), False, 'from math import sin, cos\n'), ((2238, 2248), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2241, 2248), False, 'from math import sin, cos\n'), ((2265, 2275), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2268, 2275), False, 'from math import sin, cos\n'), ((366, 379), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (372, 379), True, 'import numpy as np\n'), ((2253, 2263), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2256, 2263), False, 'from math import sin, cos\n')]
|
from color_detection import detect
from cube import Color
COLOR_BOUNDS = {
Color.BLUE: (20, 105, 190),
Color.GREEN: (70, 175, 100),
Color.ORANGE: (220, 105, 90),
Color.RED: (120, 30, 30),
Color.WHITE: (175, 180, 200),
Color.YELLOW: (170, 200, 120)
}
def test_green_detect():
bounds = [(1105, 1170, 1310, 1385),
(1375, 1170, 1585, 1385),
(1645, 1170, 1855, 1385),
(1105, 1450, 1310, 1655),
(1375, 1450, 1585, 1655),
(1645, 1450, 1855, 1655),
(1105, 1715, 1310, 1920),
(1375, 1715, 1585, 1920),
(1645, 1715, 1855, 1920)]
result = detect("tests/color_detection/cube_green.jpg", bounds, COLOR_BOUNDS)
assert result == [Color.GREEN, Color.GREEN, Color.GREEN,
Color.GREEN, Color.GREEN, Color.GREEN,
Color.GREEN, Color.GREEN, Color.GREEN]
def test_orange_detect():
bounds = [(1090, 1070, 1295, 1290),
(1350, 1070, 1565, 1290),
(1630, 1070, 1840, 1290),
(1090, 1350, 1295, 1570),
(1350, 1350, 1565, 1570),
(1630, 1350, 1840, 1570),
(1090, 1630, 1295, 1835),
(1350, 1630, 1565, 1835),
(1630, 1630, 1840, 1835)]
result = detect("tests/color_detection/cube_orange.jpg", bounds, COLOR_BOUNDS)
assert result == [Color.ORANGE, Color.ORANGE, Color.ORANGE,
Color.ORANGE, Color.ORANGE, Color.ORANGE,
Color.ORANGE, Color.ORANGE, Color.ORANGE]
def test_blue_detect():
bounds = [(1090, 1025, 1300, 1240),
(1365, 1025, 1575, 1240),
(1640, 1025, 1850, 1240),
(1095, 1310, 1305, 1520),
(1370, 1310, 1575, 1520),
(1640, 1310, 1845, 1520),
(1105, 1585, 1310, 1785),
(1375, 1585, 1575, 1785),
(1640, 1585, 1840, 1785)]
result = detect("tests/color_detection/cube_blue.jpg", bounds, COLOR_BOUNDS)
assert result == [Color.BLUE, Color.BLUE, Color.BLUE,
Color.BLUE, Color.BLUE, Color.BLUE,
Color.BLUE, Color.BLUE, Color.BLUE]
def test_white_detect():
bounds = [(1080, 890, 1295, 1110),
(1355, 890, 1570, 1110),
(1630, 890, 1845, 1110),
(1080, 1175,1295, 1390),
(1355, 1175, 1570, 1390),
(1630, 1175, 1845, 1390),
(1080, 1450, 1295, 1660),
(1355, 1450, 1570, 1660),
(1630, 1450, 1845, 1660)]
result = detect("tests/color_detection/cube_white.jpg", bounds, COLOR_BOUNDS)
assert result == [Color.WHITE, Color.WHITE, Color.WHITE,
Color.WHITE, Color.WHITE, Color.WHITE,
Color.WHITE, Color.WHITE, Color.WHITE]
|
[
"color_detection.detect"
] |
[((674, 742), 'color_detection.detect', 'detect', (['"""tests/color_detection/cube_green.jpg"""', 'bounds', 'COLOR_BOUNDS'], {}), "('tests/color_detection/cube_green.jpg', bounds, COLOR_BOUNDS)\n", (680, 742), False, 'from color_detection import detect\n'), ((1327, 1396), 'color_detection.detect', 'detect', (['"""tests/color_detection/cube_orange.jpg"""', 'bounds', 'COLOR_BOUNDS'], {}), "('tests/color_detection/cube_orange.jpg', bounds, COLOR_BOUNDS)\n", (1333, 1396), False, 'from color_detection import detect\n'), ((1988, 2055), 'color_detection.detect', 'detect', (['"""tests/color_detection/cube_blue.jpg"""', 'bounds', 'COLOR_BOUNDS'], {}), "('tests/color_detection/cube_blue.jpg', bounds, COLOR_BOUNDS)\n", (1994, 2055), False, 'from color_detection import detect\n'), ((2626, 2694), 'color_detection.detect', 'detect', (['"""tests/color_detection/cube_white.jpg"""', 'bounds', 'COLOR_BOUNDS'], {}), "('tests/color_detection/cube_white.jpg', bounds, COLOR_BOUNDS)\n", (2632, 2694), False, 'from color_detection import detect\n')]
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
import sys
n = int(sys.stdin.readline())
max_point = 0
restaurant = ''
for _ in range(n):
name, point = sys.stdin.readline().strip().split()
if int(point) > max_point:
max_point = int(point)
restaurant = name
elif max_point == int(point):
restaurant = min(restaurant, name)
print(restaurant)
|
[
"sys.stdin.readline"
] |
[((297, 317), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (315, 317), False, 'import sys\n'), ((386, 406), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (404, 406), False, 'import sys\n')]
|
# library: python-telegram-bot
import telegram
my_token = '' # bot token
chat_id = '' # telegram group or chat id
def sendTelegramMsg(msg, chat_id=chat_id, token=my_token):
bot = telegram.Bot(token=token)
bot.sendMessage(chat_id=chat_id, text=msg)
|
[
"telegram.Bot"
] |
[((181, 206), 'telegram.Bot', 'telegram.Bot', ([], {'token': 'token'}), '(token=token)\n', (193, 206), False, 'import telegram\n')]
|
# Nota: presionar enter después de que la info sea desplegada para limpiar la pantalla y volver al menú
# La función red puede tomar un poco de tiempo en ejecutarse por completo
from threading import Semaphore, Thread
import os, threading
#se utilizara para lograr la sincronización de los hilos
mutex = threading.Semaphore(1)
def switch_Op(opcion):
if opcion == '1':
#Procesos
x = threading.Thread(target = procesos)
x.start()
elif opcion == '2':
#Memoria
x = threading.Thread(target = memoria)
x.start()
elif opcion == '3':
#CpuInfo
x = threading.Thread(target = cpuInfo)
x.start()
elif opcion == '4':
#Temperatura
x = threading.Thread(target = temperatura)
x.start()
elif opcion == '5':
#Muestra Todo
hilo()
elif opcion == '6':
x = threading.Thread(target = red)
x.start()
elif opcion == '7':
#Salir
print("\nHasta luego!\n")
exit()
#Funciones
def main():
global mutex, opcion
while True:
os.system("clear")
print ("Seleccione una opcion: \n1) Procesos \n2) Memoria \n3) Cpuinfo \n4) Temperatura \n5) Todo \n6) Red \n7) Salir")
opcion = input('Ingrese opcion: ')
switch_Op(opcion)
def procesos():
global mutex
mutex.acquire()
print("\n-----------------------------------Procesos----------------------------------\n\n")
os.system("ps -auf")
mutex.release()
def temperatura():
global mutex
mutex.acquire()
print("\n----------------------------------Temperatura---------------------------------\n\n")
os.system("sensors")
mutex.release()
def cpuInfo():
global mutex
mutex.acquire()
print("\n------------------------------------CPUinfo----------------------------------\n\n")
os.system("cat /proc/cpuinfo")
mutex.release()
def memoria():
global mutex
mutex.acquire()
print("\n------------------------------------Memoria-----------------------------------\n\n")
os.system("free -h")
os.system("cat /proc/meminfo")
mutex.release()
def red():
global mutex
mutex.acquire()
print("\n---------------------------------------red------------------------------------\n\n")
os.system("netstat -a")
mutex.release()
def hilo():
h1 = threading.Thread(target = procesos)
h1.start()
h2 = threading.Thread(target = memoria)
h2.start()
h3 = threading.Thread(target = temperatura)
h3.start()
h4 = threading.Thread(target = cpuInfo)
h4.start()
h5 = threading.Thread(target = red)
h5.start()
main()
|
[
"threading.Thread",
"threading.Semaphore",
"os.system"
] |
[((307, 329), 'threading.Semaphore', 'threading.Semaphore', (['(1)'], {}), '(1)\n', (326, 329), False, 'import os, threading\n'), ((1298, 1318), 'os.system', 'os.system', (['"""ps -auf"""'], {}), "('ps -auf')\n", (1307, 1318), False, 'import os, threading\n'), ((1483, 1503), 'os.system', 'os.system', (['"""sensors"""'], {}), "('sensors')\n", (1492, 1503), False, 'import os, threading\n'), ((1663, 1693), 'os.system', 'os.system', (['"""cat /proc/cpuinfo"""'], {}), "('cat /proc/cpuinfo')\n", (1672, 1693), False, 'import os, threading\n'), ((1855, 1875), 'os.system', 'os.system', (['"""free -h"""'], {}), "('free -h')\n", (1864, 1875), False, 'import os, threading\n'), ((1877, 1907), 'os.system', 'os.system', (['"""cat /proc/meminfo"""'], {}), "('cat /proc/meminfo')\n", (1886, 1907), False, 'import os, threading\n'), ((2064, 2087), 'os.system', 'os.system', (['"""netstat -a"""'], {}), "('netstat -a')\n", (2073, 2087), False, 'import os, threading\n'), ((2127, 2160), 'threading.Thread', 'threading.Thread', ([], {'target': 'procesos'}), '(target=procesos)\n', (2143, 2160), False, 'import os, threading\n'), ((2181, 2213), 'threading.Thread', 'threading.Thread', ([], {'target': 'memoria'}), '(target=memoria)\n', (2197, 2213), False, 'import os, threading\n'), ((2234, 2270), 'threading.Thread', 'threading.Thread', ([], {'target': 'temperatura'}), '(target=temperatura)\n', (2250, 2270), False, 'import os, threading\n'), ((2291, 2323), 'threading.Thread', 'threading.Thread', ([], {'target': 'cpuInfo'}), '(target=cpuInfo)\n', (2307, 2323), False, 'import os, threading\n'), ((2344, 2372), 'threading.Thread', 'threading.Thread', ([], {'target': 'red'}), '(target=red)\n', (2360, 2372), False, 'import os, threading\n'), ((392, 425), 'threading.Thread', 'threading.Thread', ([], {'target': 'procesos'}), '(target=procesos)\n', (408, 425), False, 'import os, threading\n'), ((957, 975), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (966, 975), False, 'import os, threading\n'), ((480, 512), 'threading.Thread', 'threading.Thread', ([], {'target': 'memoria'}), '(target=memoria)\n', (496, 512), False, 'import os, threading\n'), ((566, 598), 'threading.Thread', 'threading.Thread', ([], {'target': 'cpuInfo'}), '(target=cpuInfo)\n', (582, 598), False, 'import os, threading\n'), ((656, 692), 'threading.Thread', 'threading.Thread', ([], {'target': 'temperatura'}), '(target=temperatura)\n', (672, 692), False, 'import os, threading\n'), ((782, 810), 'threading.Thread', 'threading.Thread', ([], {'target': 'red'}), '(target=red)\n', (798, 810), False, 'import os, threading\n')]
|
import io
import re
import numpy as np
class NamedPoints():
def __init__(self, fl):
data = np.genfromtxt(fl, dtype=None)
self.xyz = np.array([[l[1], l[2], l[3]] for l in data])
self.names = [l[0].decode('ascii') for l in data]
self.name_to_xyz = dict(zip(self.names, self.xyz))
class Contacts(NamedPoints):
contact_single_regex = re.compile("^([A-Za-z]+[']?)([0-9]+)$")
contact_pair_regex_1 = re.compile("^([A-Za-z]+[']?)([0-9]+)-([0-9]+)$")
contact_pair_regex_2 = re.compile("^([A-Za-z]+[']?)([0-9]+)-([A-Za-z]+[']?)([0-9]+)$")
def __init__(self, filename):
super().__init__(filename)
self.electrodes = {}
for i, name in enumerate(self.names):
match = self.contact_single_regex.match(name)
if match is None:
raise ValueError("Unexpected contact name %s" % name)
elec_name, _ = match.groups()
if elec_name not in self.electrodes:
self.electrodes[elec_name] = []
self.electrodes[elec_name].append(i)
def get_elec(self, name):
match = self.contact_single_regex.match(name)
if match is None:
return None
return match.groups()[0]
def get_coords(self, name):
"""Get the coordinates of a specified contact or contact pair. Allowed formats are:
A1 : Single contact.
A1-2 or A1-A2 : Contact pair. The indices must be adjacent.
Examples:
>>> np.set_printoptions(formatter={'float': lambda x: "{0:0.1f}".format(x)})
>>> contacts = Contacts(io.BytesIO("A1 0.0 0.0 1.0\\nA2 0.0 0.0 2.0".encode()))
>>> contacts.get_coords("A1")
array([0.0, 0.0, 1.0])
>>> contacts.get_coords("A1-2")
array([0.0, 0.0, 1.5])
>>> contacts.get_coords("A2-A1")
array([0.0, 0.0, 1.5])
"""
match = self.contact_single_regex.match(name)
if match is not None:
return self.name_to_xyz[name]
match = self.contact_pair_regex_1.match(name)
if match is not None:
assert abs(int(match.group(2)) - int(match.group(3))) == 1
contact1 = match.group(1) + match.group(2)
contact2 = match.group(1) + match.group(3)
return (self.name_to_xyz[contact1] + self.name_to_xyz[contact2])/2.
match = self.contact_pair_regex_2.match(name)
if match is not None:
assert match.group(1) == match.group(3)
assert abs(int(match.group(2)) - int(match.group(4))) == 1
contact1 = match.group(1) + match.group(2)
contact2 = match.group(3) + match.group(4)
return (self.name_to_xyz[contact1] + self.name_to_xyz[contact2])/2.
raise ValueError("Given name '%s' does not follow any expected pattern." % name)
|
[
"numpy.array",
"numpy.genfromtxt",
"re.compile"
] |
[((374, 413), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)$")\n', (384, 413), False, 'import re\n'), ((441, 489), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)-([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)-([0-9]+)$")\n', (451, 489), False, 'import re\n'), ((517, 580), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)-([A-Za-z]+[\']?)([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)-([A-Za-z]+[\']?)([0-9]+)$")\n', (527, 580), False, 'import re\n'), ((106, 135), 'numpy.genfromtxt', 'np.genfromtxt', (['fl'], {'dtype': 'None'}), '(fl, dtype=None)\n', (119, 135), True, 'import numpy as np\n'), ((155, 199), 'numpy.array', 'np.array', (['[[l[1], l[2], l[3]] for l in data]'], {}), '([[l[1], l[2], l[3]] for l in data])\n', (163, 199), True, 'import numpy as np\n')]
|
#-*- coding: utf-8 -*-
from django.http import HttpResponse, Http404
from django.shortcuts import render, redirect
from clustering.form import ScreenOneForm
from clustering.modelStatic import Stats
from . import ecran1
import json
import csv
import re
def view_screen(request):
if request.method == 'POST': #just work in screen 2 and 3
form = ScreenOneForm(request.POST, request.FILES)
#on a ici toutes les données de l'écran 1 obtenu via POST
#on doit traiter les champs demandés via l'id de la colonne obtenu sur champClassification
#print form.is_valid(), form.errors, type(form.errors)
if form.is_valid():
fichier = request.FILES['fichierData']
#ici on peut travailler sur notre fichier
idColumns = request.POST.getlist('champsClassification')
tabNameColumns = matchNameColumn(idColumns)
#normalisation
resultNormalize = normalizeMe(idColumns, fichier)
#tableau contenant des objets Stats pour pouvoir remplir correctement la vue
tabObjectStats = matchStats(resultNormalize, tabNameColumns)
return render(request, 'ecran2.html', {'id_screen': 2, 'objetsStats' : tabObjectStats, 'tabStatsRaw' : resultNormalize})
else: #si on a pas bien valider le formulaire, on retourne sur l'ecran 1 pour le faire
return render(request, 'ecran1.html', {'id_screen': 1})
#
# Fonction de normalisation de champs
# @params : tableaux multidimentionnel contenant les ids des champs de classification a normaliser
# @fichier : le fichier dans lequel se trouve les champs a normaliser
# @return : tableau contenant les tableaux resultats des champs normalisés
#
def normalizeMe(params, fichier):
linesWithDefaut = [] #line avec des \n
matrixWithDefaut = []
#Remplit matrix par chaque ligne
for line in fichier.readlines():
linesWithDefaut = line.split(",")
matrixWithDefaut.append(linesWithDefaut)
#on doit rstrip toutes les infos de cette liste pour ne pas avoir de \n
#il faut donc la parcourrir et les enlever
#enleve dans chaque ligne remplit precedemment les \n
for linesWithDefaut in matrixWithDefaut:
for index, number in enumerate(linesWithDefaut):
linesWithDefaut[index] = number.rstrip()
#now matrixWithDefaut is a matrix without defaut o/
#this matrix contain lines of files
#we need now to search columns we need for next step
#params contient les valeurs des indices des champs de classification que l'on veut
#sous forme [u'2', u'3'] par exemple pour les champs 2 et 3
column = []
columns = []
indiceClassification = []
normalizedColumn = []
normalizedColumns = []
size = len(params)
for i in range (size) :
indiceClassification.append(params[i])
#cherche toutes les colonnes correspondantes et crée un tableau a la volée contenant les valeurs de chaque colonne
for currentIndice in indiceClassification:
for lines in matrixWithDefaut:
for index, number in enumerate(lines):
if int(currentIndice) == int(index):
column.append(float(lines[index-1])) #dans le tableau indice a partir de 0 o/
columns.append(column)
column = []
#normalise chaque colonne
for column in columns:
minColumn = min(column)
maxColumn = max(column)
for index, row in enumerate(column):
diviseur = maxColumn - minColumn
if diviseur == 0:
#si on a le diviseur qui vaut 0 ca veut dire que toute la ligne vaut 0 donc row = 0
row = 0.00
else:
numerateur = float(row) - minColumn
row = numerateur / diviseur
normalizedColumn.append(row)
normalizedColumns.append(normalizedColumn)
normalizedColumn = []
#@todo
#print(normalizedColumns)
#retourne le resultat
return normalizedColumns
#
# Permet de savoir le nom des colonnes correspondantes a l'id pour l'affichage
#
def matchNameColumn(idColumns):
tabNameColumns = []
tabChoice = (
(1, 'word_freq_make'),
(2, 'word_freq_address'),
(3, 'word_freq_all'),
(4, 'word_freq_3d'),
(5, 'word_freq_our'),
(6, 'word_freq_over'),
(7, 'word_freq_remove'),
(8, 'word_freq_internet'),
(9, 'word_freq_order'),
(10, 'word_freq_mail'),
(11, 'word_freq_receive'),
(12, 'word_freq_will'),
(13, 'word_freq_people'),
(14, 'word_freq_report'),
(15, 'word_freq_addresses'),
(16, 'word_freq_free'),
(17, 'word_freq_business'),
(18, 'word_freq_email'),
(19, 'word_freq_you'),
(20, 'word_freq_credit'),
(21, 'word_freq_your'),
(22, 'word_freq_font'),
(23, 'word_freq_000'),
(24, 'word_freq_money'),
(25, 'word_freq_hp'),
(26, 'word_freq_hpl'),
(27, 'word_freq_george'),
(28, 'word_freq_650'),
(29, 'word_freq_lab'),
(30, 'word_freq_labs'),
(31, 'word_freq_telnet'),
(32, 'word_freq_857'),
(33, 'word_freq_data'),
(34, 'word_freq_415'),
(35, 'word_freq_85'),
(36, 'word_freq_technology'),
(37, 'word_freq_1999'),
(38, 'word_freq_parts'),
(39, 'word_freq_pm'),
(40, 'word_freq_direct'),
(41, 'word_freq_cs'),
(42, 'word_freq_meeting'),
(43, 'word_freq_original'),
(44, 'word_freq_project'),
(45, 'word_freq_re'),
(46, 'word_freq_edu'),
(47, 'word_freq_table'),
(48, 'word_freq_conference'),
(49, 'char_freq_;'),
(50, 'char_freq_'),
(51, 'char_freq_['),
(52, 'char_freq_!'),
(53, 'char_freq_$'),
(54, 'char_freq_#'),
(55, 'capital_run_length_average'),
(56, 'capital_run_length_longest'),
(57, 'capital_run_length_total')
)
for index, choice in enumerate(tabChoice):
for idColumn in idColumns:
if int(idColumn) == int(choice[0]):
#print "idColumn : "+str(idColumn)+" choice : "+str(choice[0])
tabNameColumns.append(choice[1])
return tabNameColumns
#
# Permet de remplir des objets stats avec toutes les stats que l'on souhaite pour notre vue
# grace a nos champs normalises
# @params : normalizedColumns toutes nos colonnes normalisees grace a normalizeMe
# @return : un tableau contenant des objets stats remplit
#
def matchStats(normalizedColumns, tabNameColumns):
tabObjectStats = []
for index, normalizedColumn in enumerate(normalizedColumns):
currentStat = Stats(index, normalizedColumn)
tabObjectStats.append(currentStat)
for index, objectStat in enumerate(tabObjectStats):
objectStat._set_nom(tabNameColumns[index])
return tabObjectStats
#print "min S : "
#print(currentStat.minS)
#print "max s :"
#print(currentStat.maxS)
#print "moyenne : "
#print(currentStat.moyenne)
#print "ecart_type"
#print(currentStat.ecart_type)
|
[
"django.shortcuts.render",
"clustering.form.ScreenOneForm",
"clustering.modelStatic.Stats"
] |
[((350, 392), 'clustering.form.ScreenOneForm', 'ScreenOneForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (363, 392), False, 'from clustering.form import ScreenOneForm\n'), ((1257, 1305), 'django.shortcuts.render', 'render', (['request', '"""ecran1.html"""', "{'id_screen': 1}"], {}), "(request, 'ecran1.html', {'id_screen': 1})\n", (1263, 1305), False, 'from django.shortcuts import render, redirect\n'), ((5972, 6002), 'clustering.modelStatic.Stats', 'Stats', (['index', 'normalizedColumn'], {}), '(index, normalizedColumn)\n', (5977, 6002), False, 'from clustering.modelStatic import Stats\n'), ((1045, 1160), 'django.shortcuts.render', 'render', (['request', '"""ecran2.html"""', "{'id_screen': 2, 'objetsStats': tabObjectStats, 'tabStatsRaw': resultNormalize}"], {}), "(request, 'ecran2.html', {'id_screen': 2, 'objetsStats':\n tabObjectStats, 'tabStatsRaw': resultNormalize})\n", (1051, 1160), False, 'from django.shortcuts import render, redirect\n')]
|
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.http import HttpResponse, JsonResponse as DJsonResponse
from . import logic, models
import logging
import json
LOG = logging.getLogger()
def JsonResponse(*args, **kwargs):
kwargs["json_dumps_params"] = {"indent": 4}
return DJsonResponse(*args, **kwargs)
def error(message, status=500, content_type="application/json"):
return JsonResponse({"error": message}, status=status, content_type=content_type)
@require_http_methods(["HEAD", "GET"])
def ping(request):
return HttpResponse("pong", content_type="text/plain")
@require_http_methods(["HEAD", "GET"])
def status(request):
try:
resp = {"last-updated": logic.last_updated(), "row-count": logic.row_count()}
return JsonResponse(resp, status=200)
except Exception:
LOG.exception("unhandled exception calling /status")
return error("unexpected error")
@require_http_methods(["HEAD", "GET", "POST"])
def article(request, msid):
try:
if request.method != "POST": # GET, HEAD
art_data = logic.protocol_data(msid)
return JsonResponse(
art_data, status=200, content_type=settings.ELIFE_CONTENT_TYPE
)
else: # POST
content_encoding = request.content_type.strip().lower()
if (
"application/json" not in content_encoding
and settings.ELIFE_CONTENT_TYPE_GENERAL not in content_encoding
):
return error("unable to negotiate a content encoding", 406)
try:
data = json.loads(request.body)
except Exception:
return error("failed to parse given JSON", 400)
if not data:
return error("empty request", 400)
results = logic.add_result({"elifeID": msid, "data": data})
response = {
"msid": msid,
"successful": len(results["successful"]),
"failed": len(results["failed"]),
}
status_code = 200 if not results["failed"] else 400
return JsonResponse(
response, status=status_code, content_type=settings.ELIFE_CONTENT_TYPE
)
except models.ArticleProtocol.DoesNotExist:
return error("Not found", 404)
except Exception:
LOG.exception("unhandled exception calling /article")
return error("Server error", 500)
|
[
"django.http.HttpResponse",
"json.loads",
"logging.getLogger",
"django.http.JsonResponse",
"django.views.decorators.http.require_http_methods"
] |
[((225, 244), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (242, 244), False, 'import logging\n'), ((528, 565), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['HEAD', 'GET']"], {}), "(['HEAD', 'GET'])\n", (548, 565), False, 'from django.views.decorators.http import require_http_methods\n'), ((647, 684), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['HEAD', 'GET']"], {}), "(['HEAD', 'GET'])\n", (667, 684), False, 'from django.views.decorators.http import require_http_methods\n'), ((974, 1019), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['HEAD', 'GET', 'POST']"], {}), "(['HEAD', 'GET', 'POST'])\n", (994, 1019), False, 'from django.views.decorators.http import require_http_methods\n'), ((341, 371), 'django.http.JsonResponse', 'DJsonResponse', (['*args'], {}), '(*args, **kwargs)\n', (354, 371), True, 'from django.http import HttpResponse, JsonResponse as DJsonResponse\n'), ((596, 643), 'django.http.HttpResponse', 'HttpResponse', (['"""pong"""'], {'content_type': '"""text/plain"""'}), "('pong', content_type='text/plain')\n", (608, 643), False, 'from django.http import HttpResponse, JsonResponse as DJsonResponse\n'), ((1662, 1686), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (1672, 1686), False, 'import json\n')]
|
import math
import numpy as np
from numerical_analysis.splines.bezier import Bezier
from numerical_analysis.dependencies import Polynomial
from numerical_analysis.root_finding import newton_raphson_2x2
from numerical_analysis.dependencies.geometry import StraightLine, Circle
from output_lib.csv_lib import ScvExporter
from output_lib.plot_lib import PlotExporter
from output_lib.screen_lib import ScreenPrinter
# Provides a more appropriate parameterization for the specific application
class CustomCircle(Circle):
def x_t(self, t): return self.R * math.cos(math.pi - 2 * math.pi * t) + self.C[0]
def y_t(self, t): return self.R * math.sin(math.pi - 2 * math.pi * t) + self.C[1]
class CircleApproacher:
def __init__(self, circle_parameters, initial_parameters, num_of_parameters=5):
self.r = circle_parameters["radius"]
self.c = circle_parameters["center"]
self.circle = CustomCircle(self.c, self.r)
self.circle_graph = self.circle.graph(0.01)
self.line = StraightLine([[0, [0, 0]], [1, [1, 1]]])
self.parameters = initial_parameters
self.bezier = self.initialize_bezier()
self.num_of_parameters = num_of_parameters
def initialize_bezier(self):
a, b, c, d, e = self.parameters
cp = np.array([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])
return Bezier(cp)
def refresh_bezier(self):
a, b, c, d, e = self.parameters
CP = np.array([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])
self.bezier.refresh_control_points(CP)
def refresh_parameters(self, new_parameters):
self.parameters = new_parameters
def least_squares(self, point_pairs):
def p0(t): return Polynomial(np.array([1, -5, 10, -10, 5])).value(t)
def p1(t): return Polynomial(np.array([0, 1, -4, 6, -3])).value(t)
def p2(t): return Polynomial(np.array([0, 0, 1, -2, 1])).value(t)
def p3(t): return Polynomial(np.array([0, 1, -4, 6, -5, 2])).value(t)
def p4(t): return Polynomial(np.array([0, 0, 1, -4, 5, -2])).value(t)
def sigma1(f, g, table): return sum([f(table[i][0]) * g(table[i][0]) for i in range(len(table))])
def sigma2(f, index, table): return sum([table[i][1][0][index] * f(table[i][0]) for i in range(len(table))])
if self.num_of_parameters == 5:
A11 = sigma1(p0, p0, point_pairs)
A12 = sigma1(p0, p1, point_pairs)
A13 = sigma1(p0, p2, point_pairs)
A21 = A12
A22 = sigma1(p1, p1, point_pairs)
A23 = sigma1(p1, p2, point_pairs)
A31 = A13
A32 = A23
A33 = sigma1(p2, p2, point_pairs)
A = [[A11, A12, A13],
[A21, A22, A23],
[A31, A32, A33]]
B11 = sigma2(p0, 0, point_pairs)
B21 = sigma2(p1, 0, point_pairs)
B31 = sigma2(p2, 0, point_pairs)
B = [B11, B21, B31]
solution_0 = np.linalg.solve(A, B)
a_new = solution_0[0]
b_new = solution_0[1] / 5
d_new = solution_0[2] / 10
elif self.num_of_parameters == 3:
a_new = 0
b_new = 0
d_new = 0.1 * sigma2(p2, 0, point_pairs) / sigma1(p2, p2, point_pairs)
else: return
C11 = sigma1(p3, p3, point_pairs)
C12 = sigma1(p3, p4, point_pairs)
C21 = C12
C22 = sigma1(p4, p4, point_pairs)
C = [[C11, C12], [C21, C22]]
D11 = sigma2(p3, 1, point_pairs)
D21 = sigma2(p4, 1, point_pairs)
D = [D11, D21]
solution_1 = np.linalg.solve(C, D)
c_new = solution_1[0] / 5
e_new = solution_1[1] / 10
return [a_new, b_new, c_new, d_new, e_new]
def calculate_point_pairs(self, d_phi):
def dx(tb, tl): return self.bezier.x_t(tb) - self.line.x_t(tl)
def dy(tb, tl): return self.bezier.y_t(tb) - self.line.y_t(tl)
def dxb(tb, tl): return Polynomial(self.bezier.c[0]).derivative().value(tb)
def dxl(tb, tl): return - Polynomial(self.line.c[0]).derivative().value(tl)
def dyb(tb, tl): return Polynomial(self.bezier.c[1]).derivative().value(tb)
def dyl(tb, tl): return - Polynomial(self.line.c[1]).derivative().value(tl)
point_pairs = []
dt = d_phi / (2 * math.pi)
for t in np.arange(0., 1., dt):
self.line.modify_points([[0, [self.r, 0]], [1, self.circle.point_t(t)]])
tb, tl = newton_raphson_2x2(dx, dy, dxb, dxl, dyb, dyl, t, 1, 1.e-12)
K = self.line.point_t(1)
Q = self.bezier.point_t(tb)
point_pairs.append([tb, [K, Q]])
return point_pairs
@staticmethod
def error_function(point_pairs, divisions):
# 1st Approach
# Ex = sum([(pair[1][0][0] - pair[1][1][0]) ** 2 for pair in point_pairs])
# Ey = sum([(pair[1][0][1] - pair[1][1][1]) ** 2 for pair in point_pairs])
# return math.sqrt(Ex + Ey)
# 2nd Approach
return sum([math.sqrt((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1][1]) ** 2)
for pair in point_pairs]) / divisions
def solve(self, d_phi, iterations=3000, error=1e-12, csv_fname=None, plots_path=None):
def convergence():
nonlocal new_parameters
for i in range(len(self.parameters)):
if abs(self.parameters[i] - new_parameters[i]) > error:
return False
return True
def create_csv():
nonlocal csv_exporter
csv_exporter.create_csv()
csv_exporter.write_headers("Iter", "Parameter a", "Parameter b", "Parameter c", "Parameter d",
"Parameter e", "Error Function")
def give_output(i, error_f):
screen_printer.print_results(i, self.parameters, error_f)
if csv_fname:
csv_exporter.append_row(i, *self.parameters, error_f)
if plots_path and (i < 20 or i % 10 == 0.):
bezier_graph = self.bezier.graph(0.01)
plot_exporter.create_plot(self.circle_graph, bezier_graph,
title="Approach Circle w/ Bezier (Iteration {})".format(i), axes_equal=True)
plot_exporter.export_plot("gen_{}".format(str(i).zfill(4)))
divisions = 2 * math.pi / d_phi
csv_exporter = None
if csv_fname:
csv_exporter = ScvExporter(csv_fname, r"results/")
create_csv()
screen_printer = ScreenPrinter()
plot_exporter = None
if plots_path:
plot_exporter = PlotExporter(plots_path)
for i in range(iterations):
point_pairs = self.calculate_point_pairs(d_phi)
error_f = self.error_function(point_pairs, divisions)
give_output(i, error_f)
new_parameters = self.least_squares(point_pairs)
if convergence():
self.refresh_parameters(new_parameters)
point_pairs = self.calculate_point_pairs(d_phi)
error_f = self.error_function(point_pairs, divisions)
give_output(i, error_f)
break
else:
self.refresh_parameters(new_parameters)
self.refresh_bezier()
|
[
"numerical_analysis.root_finding.newton_raphson_2x2",
"numerical_analysis.dependencies.Polynomial",
"output_lib.plot_lib.PlotExporter",
"output_lib.csv_lib.ScvExporter",
"math.sqrt",
"numerical_analysis.splines.bezier.Bezier",
"output_lib.screen_lib.ScreenPrinter",
"math.sin",
"numerical_analysis.dependencies.geometry.StraightLine",
"numpy.array",
"numpy.arange",
"math.cos",
"numpy.linalg.solve"
] |
[((1016, 1056), 'numerical_analysis.dependencies.geometry.StraightLine', 'StraightLine', (['[[0, [0, 0]], [1, [1, 1]]]'], {}), '([[0, [0, 0]], [1, [1, 1]]])\n', (1028, 1056), False, 'from numerical_analysis.dependencies.geometry import StraightLine, Circle\n'), ((1287, 1347), 'numpy.array', 'np.array', (['[[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]]'], {}), '([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])\n', (1295, 1347), True, 'import numpy as np\n'), ((1363, 1373), 'numerical_analysis.splines.bezier.Bezier', 'Bezier', (['cp'], {}), '(cp)\n', (1369, 1373), False, 'from numerical_analysis.splines.bezier import Bezier\n'), ((1458, 1518), 'numpy.array', 'np.array', (['[[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]]'], {}), '([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])\n', (1466, 1518), True, 'import numpy as np\n'), ((3629, 3650), 'numpy.linalg.solve', 'np.linalg.solve', (['C', 'D'], {}), '(C, D)\n', (3644, 3650), True, 'import numpy as np\n'), ((4376, 4399), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', 'dt'], {}), '(0.0, 1.0, dt)\n', (4385, 4399), True, 'import numpy as np\n'), ((6601, 6616), 'output_lib.screen_lib.ScreenPrinter', 'ScreenPrinter', ([], {}), '()\n', (6614, 6616), False, 'from output_lib.screen_lib import ScreenPrinter\n'), ((2990, 3011), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (3005, 3011), True, 'import numpy as np\n'), ((4505, 4564), 'numerical_analysis.root_finding.newton_raphson_2x2', 'newton_raphson_2x2', (['dx', 'dy', 'dxb', 'dxl', 'dyb', 'dyl', 't', '(1)', '(1e-12)'], {}), '(dx, dy, dxb, dxl, dyb, dyl, t, 1, 1e-12)\n', (4523, 4564), False, 'from numerical_analysis.root_finding import newton_raphson_2x2\n'), ((6515, 6549), 'output_lib.csv_lib.ScvExporter', 'ScvExporter', (['csv_fname', '"""results/"""'], {}), "(csv_fname, 'results/')\n", (6526, 6549), False, 'from output_lib.csv_lib import ScvExporter\n'), ((6698, 6722), 'output_lib.plot_lib.PlotExporter', 'PlotExporter', (['plots_path'], {}), '(plots_path)\n', (6710, 6722), False, 'from output_lib.plot_lib import PlotExporter\n'), ((558, 593), 'math.cos', 'math.cos', (['(math.pi - 2 * math.pi * t)'], {}), '(math.pi - 2 * math.pi * t)\n', (566, 593), False, 'import math\n'), ((644, 679), 'math.sin', 'math.sin', (['(math.pi - 2 * math.pi * t)'], {}), '(math.pi - 2 * math.pi * t)\n', (652, 679), False, 'import math\n'), ((5053, 5144), 'math.sqrt', 'math.sqrt', (['((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1][1]) ** 2)'], {}), '((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1\n ][1]) ** 2)\n', (5062, 5144), False, 'import math\n'), ((1739, 1768), 'numpy.array', 'np.array', (['[1, -5, 10, -10, 5]'], {}), '([1, -5, 10, -10, 5])\n', (1747, 1768), True, 'import numpy as np\n'), ((1816, 1843), 'numpy.array', 'np.array', (['[0, 1, -4, 6, -3]'], {}), '([0, 1, -4, 6, -3])\n', (1824, 1843), True, 'import numpy as np\n'), ((1891, 1917), 'numpy.array', 'np.array', (['[0, 0, 1, -2, 1]'], {}), '([0, 0, 1, -2, 1])\n', (1899, 1917), True, 'import numpy as np\n'), ((1965, 1995), 'numpy.array', 'np.array', (['[0, 1, -4, 6, -5, 2]'], {}), '([0, 1, -4, 6, -5, 2])\n', (1973, 1995), True, 'import numpy as np\n'), ((2043, 2073), 'numpy.array', 'np.array', (['[0, 0, 1, -4, 5, -2]'], {}), '([0, 0, 1, -4, 5, -2])\n', (2051, 2073), True, 'import numpy as np\n'), ((3993, 4021), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.bezier.c[0]'], {}), '(self.bezier.c[0])\n', (4003, 4021), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4161, 4189), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.bezier.c[1]'], {}), '(self.bezier.c[1])\n', (4171, 4189), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4079, 4105), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.line.c[0]'], {}), '(self.line.c[0])\n', (4089, 4105), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4247, 4273), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.line.c[1]'], {}), '(self.line.c[1])\n', (4257, 4273), False, 'from numerical_analysis.dependencies import Polynomial\n')]
|
from django.contrib import admin
from categorie.views import categorie
from categorie.models import Category, MotCles, Theme
admin.site.site_header = 'FASTSMART'
admin.site.site_title = "Interface d'administration"
# Register your models here.
admin.site.register(Category)
admin.site.register(MotCles)
admin.site.register(Theme)
|
[
"django.contrib.admin.site.register"
] |
[((247, 276), 'django.contrib.admin.site.register', 'admin.site.register', (['Category'], {}), '(Category)\n', (266, 276), False, 'from django.contrib import admin\n'), ((277, 305), 'django.contrib.admin.site.register', 'admin.site.register', (['MotCles'], {}), '(MotCles)\n', (296, 305), False, 'from django.contrib import admin\n'), ((306, 332), 'django.contrib.admin.site.register', 'admin.site.register', (['Theme'], {}), '(Theme)\n', (325, 332), False, 'from django.contrib import admin\n')]
|
# Generated by Django 2.2.1 on 2019-11-18 10:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotationweb', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='post_processing_method',
field=models.CharField(default='', help_text='Name of post processing method to use', max_length=255),
),
migrations.AlterField(
model_name='label',
name='color_blue',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='label',
name='color_green',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='label',
name='color_red',
field=models.PositiveSmallIntegerField(default=255),
),
]
|
[
"django.db.models.CharField",
"django.db.models.PositiveSmallIntegerField"
] |
[((343, 443), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Name of post processing method to use"""', 'max_length': '(255)'}), "(default='', help_text=\n 'Name of post processing method to use', max_length=255)\n", (359, 443), False, 'from django.db import migrations, models\n'), ((563, 606), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (595, 606), False, 'from django.db import migrations, models\n'), ((732, 775), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (764, 775), False, 'from django.db import migrations, models\n'), ((899, 944), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(255)'}), '(default=255)\n', (931, 944), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
"""
@FileName : lenet.py
@Description : None
@Author : 齐鲁桐
@Email : <EMAIL>
@Time : 2019-05-13 16:51
@Modify : None
"""
from __future__ import absolute_import, division, print_function
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
"""
pytorch 网络基础类(抽象类)
Attributes:
"""
def __init__(self, ):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(
1, 6, (5, 5)) # output (N, C_{out}, H_{out}, W_{out})`
self.conv2 = nn.Conv2d(6, 16, (5, 5))
self.fc1 = nn.Linear(256, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)),
(2, 2)) # F.max_pool2d的返回值是一个Variable
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
|
[
"torch.nn.Conv2d",
"torch.nn.Linear"
] |
[((459, 482), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(6)', '(5, 5)'], {}), '(1, 6, (5, 5))\n', (468, 482), True, 'import torch.nn as nn\n'), ((559, 583), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5, 5)'], {}), '(6, 16, (5, 5))\n', (568, 583), True, 'import torch.nn as nn\n'), ((603, 622), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(120)'], {}), '(256, 120)\n', (612, 622), True, 'import torch.nn as nn\n'), ((642, 660), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (651, 660), True, 'import torch.nn as nn\n'), ((680, 697), 'torch.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (689, 697), True, 'import torch.nn as nn\n')]
|
from __future__ import division
import numpy as np
from sklearn import preprocessing as skpp
__all__ = ['pre', 'post', '_remove_constant', '_add_constant']
def pre(matrix):
"""
Take the training data and put everything needed to undo this operation later into a dictionary.
:param matrixTrain:
:return: matrixTrainPost:
preprocDict:
"""
preprocDict = {}
# constants
matrix, index, constant = _remove_constant(matrix)
preprocDict.update({'index': index, 'constant': constant})
# scale
scaler = skpp.StandardScaler().fit(matrix)
matrix = scaler.transform(matrix)
preprocDict.update({'scaler': scaler})
return matrix, preprocDict
def post(matrix, preprocDict):
"""
Given a reduced matrix, find the full and unnormalised matrix.
:param matrixPost:
preprocDict:
:return:
"""
# scale
scaler = preprocDict['scaler']
matrix = scaler.inverse_transform(matrix)
# constants
matrix = _add_constant(matrix, preprocDict['index'], preprocDict['constant'])
return matrix
def _remove_constant(matrix):
"""
Remove constant features.
:param matrix: matrix with constant features
:return: matrixR: matrix with constant features removed
index: vector of indexes where True == keep, False == constant and removed.
constants: matrix of constants removed for adding back later
"""
index = (np.var(matrix, 0) != 0)
matrix_r = np.zeros([np.shape(matrix)[0], sum(index)])
i = 0
for n in range(len(index)):
if index[n]:
matrix_r[:, i] = matrix[:, n]
i += 1
constants = matrix[0, np.invert(index)]
return matrix_r, index, constants
def _add_constant(matrix, index, constants):
"""
Add constant features back in.
:param matrix: matrix with constant features removed
index: vector of indexes where True == kept, False == constant and removed.
constants: vector of constants previously removed
:return: matrixF matrix with constant features added back in.
"""
# tile the constants for each data point
constants = np.matlib.repmat(constants, np.shape(matrix)[0], 1)
# add constants back in to a full matrix
matrixF = np.zeros((np.shape(matrix)[0], np.shape(index)[0]))
matrixF[:, index] = matrix
matrixF[:, np.invert(index)] = constants
return matrixF
|
[
"numpy.shape",
"numpy.var",
"sklearn.preprocessing.StandardScaler",
"numpy.invert"
] |
[((1521, 1538), 'numpy.var', 'np.var', (['matrix', '(0)'], {}), '(matrix, 0)\n', (1527, 1538), True, 'import numpy as np\n'), ((567, 588), 'sklearn.preprocessing.StandardScaler', 'skpp.StandardScaler', ([], {}), '()\n', (586, 588), True, 'from sklearn import preprocessing as skpp\n'), ((1756, 1772), 'numpy.invert', 'np.invert', (['index'], {}), '(index)\n', (1765, 1772), True, 'import numpy as np\n'), ((2325, 2341), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (2333, 2341), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.invert', 'np.invert', (['index'], {}), '(index)\n', (2516, 2523), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (1579, 1587), True, 'import numpy as np\n'), ((2419, 2435), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (2427, 2435), True, 'import numpy as np\n'), ((2440, 2455), 'numpy.shape', 'np.shape', (['index'], {}), '(index)\n', (2448, 2455), True, 'import numpy as np\n')]
|
# elasticsearch stuff that's completely separate from any models
import json
import requests
from letters.es_settings import ES_CLIENT, ES_ANALYZE, ES_MTERMVECTORS, ES_LETTER_URL, ES_SEARCH
from letters.models import Letter
def analyze_term(term, analyzer):
query = json.dumps({
"analyzer": analyzer,
"text": term
})
result = do_es_analyze(query)
if 'tokens' in result:
analyzed_text = ' '.join(item['token'] for item in result['tokens'])
else:
analyzed_text = ''
return analyzed_text
def get_mtermvectors(ids, fields):
query = json.dumps({
"ids": ids,
"parameters": {
"fields": fields,
"offsets": "false",
"positions": "false",
"field_statistics": "false"
}
})
return do_es_mtermvectors(query)
def get_sentiment_termvector_for_text(text):
query = build_termvector_query(text=text, analyzer='termvector_sentiment_analyzer',
offsets='true', positions='true')
termvector = do_es_termvectors_for_text(query)
return termvector
def build_termvector_query(text, analyzer, offsets, positions):
query = {
"fields": ["contents"],
"per_field_analyzer": {
"contents": analyzer
},
"offsets": offsets,
"positions": positions,
"field_statistics": "false",
}
# Add optional artificial document to query
if text:
query['doc'] = {
"contents": text
}
return json.dumps(query)
def get_termvector_from_result(result):
termvector = {}
if 'term_vectors' in result \
and 'contents' in result['term_vectors'] \
and 'terms' in result['term_vectors']['contents']:
termvector = result['term_vectors']['contents']['terms']
return termvector
def get_stored_fields_for_letter(letter_id, stored_fields):
url = str.format('{0}{1}?stored_fields={2}', ES_LETTER_URL,
str(letter_id), ','.join(stored_fields))
response = requests.get(url)
return json.loads(response.text)
def do_es_analyze(query):
response = requests.get(ES_ANALYZE, data=query)
return json.loads(response.text)
def do_es_mtermvectors(query):
response = requests.get(ES_MTERMVECTORS, data=query)
return json.loads(response.text)
def do_es_termvectors_for_text(query):
termvectors_url = str.format('{0}_termvectors', ES_LETTER_URL)
response = requests.get(termvectors_url, data=query)
result = json.loads(response.text)
return get_termvector_from_result(result)
def do_es_search(query):
response = requests.get(ES_SEARCH, data=query)
return json.loads(response.text)
# Temporarily index a document to use elasticsearch to calculate
# custom sentiment score for a piece of arbitrary text
def index_temp_document(text):
ES_CLIENT.index(
index=Letter._meta.es_index_name,
doc_type=Letter._meta.es_type_name,
id='temp',
refresh=True,
body={'contents': text}
)
def delete_temp_document():
ES_CLIENT.delete(
index=Letter._meta.es_index_name,
doc_type=Letter._meta.es_type_name,
id='temp',
refresh=True,
)
|
[
"json.loads",
"letters.es_settings.ES_CLIENT.delete",
"json.dumps",
"letters.es_settings.ES_CLIENT.index",
"requests.get"
] |
[((282, 330), 'json.dumps', 'json.dumps', (["{'analyzer': analyzer, 'text': term}"], {}), "({'analyzer': analyzer, 'text': term})\n", (292, 330), False, 'import json\n'), ((619, 752), 'json.dumps', 'json.dumps', (["{'ids': ids, 'parameters': {'fields': fields, 'offsets': 'false',\n 'positions': 'false', 'field_statistics': 'false'}}"], {}), "({'ids': ids, 'parameters': {'fields': fields, 'offsets': 'false',\n 'positions': 'false', 'field_statistics': 'false'}})\n", (629, 752), False, 'import json\n'), ((1610, 1627), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (1620, 1627), False, 'import json\n'), ((2149, 2166), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2161, 2166), False, 'import requests\n'), ((2179, 2204), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2189, 2204), False, 'import json\n'), ((2252, 2288), 'requests.get', 'requests.get', (['ES_ANALYZE'], {'data': 'query'}), '(ES_ANALYZE, data=query)\n', (2264, 2288), False, 'import requests\n'), ((2301, 2326), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2311, 2326), False, 'import json\n'), ((2379, 2420), 'requests.get', 'requests.get', (['ES_MTERMVECTORS'], {'data': 'query'}), '(ES_MTERMVECTORS, data=query)\n', (2391, 2420), False, 'import requests\n'), ((2433, 2458), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2443, 2458), False, 'import json\n'), ((2587, 2628), 'requests.get', 'requests.get', (['termvectors_url'], {'data': 'query'}), '(termvectors_url, data=query)\n', (2599, 2628), False, 'import requests\n'), ((2643, 2668), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2653, 2668), False, 'import json\n'), ((2762, 2797), 'requests.get', 'requests.get', (['ES_SEARCH'], {'data': 'query'}), '(ES_SEARCH, data=query)\n', (2774, 2797), False, 'import requests\n'), ((2810, 2835), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2820, 2835), False, 'import json\n'), ((2999, 3139), 'letters.es_settings.ES_CLIENT.index', 'ES_CLIENT.index', ([], {'index': 'Letter._meta.es_index_name', 'doc_type': 'Letter._meta.es_type_name', 'id': '"""temp"""', 'refresh': '(True)', 'body': "{'contents': text}"}), "(index=Letter._meta.es_index_name, doc_type=Letter._meta.\n es_type_name, id='temp', refresh=True, body={'contents': text})\n", (3014, 3139), False, 'from letters.es_settings import ES_CLIENT, ES_ANALYZE, ES_MTERMVECTORS, ES_LETTER_URL, ES_SEARCH\n'), ((3225, 3341), 'letters.es_settings.ES_CLIENT.delete', 'ES_CLIENT.delete', ([], {'index': 'Letter._meta.es_index_name', 'doc_type': 'Letter._meta.es_type_name', 'id': '"""temp"""', 'refresh': '(True)'}), "(index=Letter._meta.es_index_name, doc_type=Letter._meta.\n es_type_name, id='temp', refresh=True)\n", (3241, 3341), False, 'from letters.es_settings import ES_CLIENT, ES_ANALYZE, ES_MTERMVECTORS, ES_LETTER_URL, ES_SEARCH\n')]
|
import unittest
import numpy as np
from small_text.utils.data import list_length
class DataUtilsTest(unittest.TestCase):
def test_list_length(self):
self.assertEqual(10, list_length(list(range(10))))
self.assertEqual(10, list_length(np.random.rand(10, 2)))
|
[
"numpy.random.rand"
] |
[((257, 278), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (271, 278), True, 'import numpy as np\n')]
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the network_util module that require the full network.yaml."""
import os
import unittest
import makani
from makani.avionics.network import network_config
from makani.avionics.network import network_util
class NetworkYamlTest(unittest.TestCase):
def setUp(self):
filename = os.path.join(makani.HOME, 'avionics/network/network.yaml')
self._network_config = network_config.NetworkConfig(filename)
def testCheckForLoopRoutes(self):
config = self._network_config
message_types = config.all_messages
path_finder = network_util.PathFinder(config.GetSwitches(), message_types)
for message in message_types:
graph = network_util.MessageGraph(path_finder, message)
visitor = network_util.MessageGraphVisitor()
graph.VisitSenders(visitor, message.all_senders)
def testCheckForUnintendedRecipients(self):
config = self._network_config
message_types = config.all_messages
path_finder = network_util.PathFinder(config.GetSwitches(), message_types)
for message in message_types:
graph = network_util.MessageGraph(path_finder, message)
network_util.CheckForUnintendedRecipients(graph)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"makani.avionics.network.network_util.MessageGraphVisitor",
"makani.avionics.network.network_util.CheckForUnintendedRecipients",
"makani.avionics.network.network_config.NetworkConfig",
"makani.avionics.network.network_util.MessageGraph",
"os.path.join"
] |
[((1785, 1800), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1798, 1800), False, 'import unittest\n'), ((886, 944), 'os.path.join', 'os.path.join', (['makani.HOME', '"""avionics/network/network.yaml"""'], {}), "(makani.HOME, 'avionics/network/network.yaml')\n", (898, 944), False, 'import os\n'), ((972, 1010), 'makani.avionics.network.network_config.NetworkConfig', 'network_config.NetworkConfig', (['filename'], {}), '(filename)\n', (1000, 1010), False, 'from makani.avionics.network import network_config\n'), ((1249, 1296), 'makani.avionics.network.network_util.MessageGraph', 'network_util.MessageGraph', (['path_finder', 'message'], {}), '(path_finder, message)\n', (1274, 1296), False, 'from makani.avionics.network import network_util\n'), ((1313, 1347), 'makani.avionics.network.network_util.MessageGraphVisitor', 'network_util.MessageGraphVisitor', ([], {}), '()\n', (1345, 1347), False, 'from makani.avionics.network import network_util\n'), ((1651, 1698), 'makani.avionics.network.network_util.MessageGraph', 'network_util.MessageGraph', (['path_finder', 'message'], {}), '(path_finder, message)\n', (1676, 1698), False, 'from makani.avionics.network import network_util\n'), ((1705, 1753), 'makani.avionics.network.network_util.CheckForUnintendedRecipients', 'network_util.CheckForUnintendedRecipients', (['graph'], {}), '(graph)\n', (1746, 1753), False, 'from makani.avionics.network import network_util\n')]
|
from typing import List
from hibpcli.password import Password
from pykeepass import PyKeePass # type: ignore
def check_passwords_from_db(path: str, master_password: str) -> List[str]:
""" - """
kp = PyKeePass(path, password=master_password)
return [
entry for entry in kp.entries if Password(password=entry.password).is_leaked()
]
|
[
"pykeepass.PyKeePass",
"hibpcli.password.Password"
] |
[((211, 252), 'pykeepass.PyKeePass', 'PyKeePass', (['path'], {'password': 'master_password'}), '(path, password=master_password)\n', (220, 252), False, 'from pykeepass import PyKeePass\n'), ((307, 340), 'hibpcli.password.Password', 'Password', ([], {'password': 'entry.password'}), '(password=entry.password)\n', (315, 340), False, 'from hibpcli.password import Password\n')]
|
from fastapi import APIRouter
import json
router = APIRouter()
@router.get('/receita/{produtorId}')
def buscar_receita(produtorId: int):
produtor = {}
with open('api/controllers/dados/receita.json') as file:
dados = json.load(file)
filtro = [
produtor['data'] for produtor in dados if
produtor['produtorId'] == produtorId]
produtor = filtro[0]
produtor['receita_total'] = sum(
produtor['grafico_dados']['receita_mensal'])
return produtor
@router.get('/distribuicao/{produtorId}')
def buscar_distribuicao(produtorId: int):
produtor = {}
with open('api/controllers/dados/distribuicao_vendas.json') as file:
dados = json.load(file)
filtro = [
produtor['data'] for produtor in dados if
produtor['produtorId'] == produtorId]
produtor = filtro[0]
return produtor
@router.get('/vendas-mensais/{produtorId}')
def buscar_vendas_mensais(produtorId: int):
produtor = {}
with open('api/controllers/dados/vendas_mensais.json') as file:
dados = json.load(file)
filtro = [
produtor['dados'] for produtor in dados if
produtor['produtorId'] == produtorId]
produtor = filtro[0]
return produtor
|
[
"json.load",
"fastapi.APIRouter"
] |
[((51, 62), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (60, 62), False, 'from fastapi import APIRouter\n'), ((236, 251), 'json.load', 'json.load', (['file'], {}), '(file)\n', (245, 251), False, 'import json\n'), ((721, 736), 'json.load', 'json.load', (['file'], {}), '(file)\n', (730, 736), False, 'import json\n'), ((1106, 1121), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1115, 1121), False, 'import json\n')]
|
# https://zenpack-sdk.zenoss.com/en/2.0.0/changes.html
from ZenPacks.zenoss.ZenPackLib import zenpacklib
CFG = zenpacklib.load_yaml()
schema = CFG.zenpack_module.schema
|
[
"ZenPacks.zenoss.ZenPackLib.zenpacklib.load_yaml"
] |
[((111, 133), 'ZenPacks.zenoss.ZenPackLib.zenpacklib.load_yaml', 'zenpacklib.load_yaml', ([], {}), '()\n', (131, 133), False, 'from ZenPacks.zenoss.ZenPackLib import zenpacklib\n')]
|
import os
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cm
from matplotlib import rcParams
from sklearn import metrics
from sklearn import tree
rcParams["font.serif"] = "Times New Roman"
rcParams["font.family"] = "serif"
dirs = dict(main="F:\\Masterarbeit\\DLR\\project\\1_truck_detection")
dirs["plots"] = os.path.join("F:" + os.sep + "Masterarbeit", "THESIS", "general", "plots")
dirs["truth"] = os.path.join(dirs["main"], "truth")
rf_file = os.path.join(dirs["main"], "code", "detect_trucks", "rf_model.pickle")
rf = pickle.load(open(rf_file, "rb"))
# read test variables and labels in order to calculate metrics
variables_list = pickle.load(open(os.path.join(dirs["truth"], "validation_variables.pickle"), "rb"))
labels_list = pickle.load(open(os.path.join(dirs["truth"], "validation_labels.pickle"), "rb"))
def plot_random_forest(rf_model, test_variables, test_labels):
test_pred = rf._predict(test_variables)
plot_confusion_matrix(metrics.confusion_matrix(test_labels, test_pred, labels=[2, 3, 4, 1]))
accuracy = metrics.accuracy_score(test_labels, test_pred)
report = metrics.classification_report(test_labels, test_pred)
labels = np.unique(test_labels)
summary = np.zeros((len(labels) + 3, 4), dtype=np.float16)
for i, label in enumerate(labels):
for j, fun in enumerate([metrics.precision_score, metrics.recall_score, metrics.f1_score]):
summary[i, j] = fun(test_labels, test_pred, average="micro", labels=[label])
summary[-3, j] = fun(test_labels, test_pred, average="macro")
summary[-2, j] = fun(test_labels, test_pred, average="weighted")
summary[i, 3] = np.count_nonzero(np.int8(test_labels) == label)
summary[-3, 3] = len(test_labels)
summary[-2, 3] = len(test_labels)
summary[-1, 3] = len(test_labels)
summary[-1, 2] = metrics.accuracy_score(test_labels, test_pred)
columns = ["Precision", "Recall", "F1-score", "Support"]
shape = summary.shape
fig, ax = plt.subplots()
summary_altered = summary.copy() # copy in order to set n label column to 0 for imshow
summary_altered[:, -1] = 0 # np.min(summary[0:-1, 0:3]) - 0.1
summary_altered[summary_altered == 0] = np.nan
cmap = cm.Greens.__copy__()
im = ax.imshow(summary_altered.astype(np.float32), cmap=cmap, aspect=0.3)
ax.set_xticks(np.arange(shape[1]))
ax.set_yticks(np.arange(shape[0]))
ax.set_yticklabels(["Background", "Blue", "Green", "Red", "Macro avg.", "Weighted avg.", "Accuracy"])
ax.set_xticklabels(columns)
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
plt.subplots_adjust(bottom=0.2)
for i in range(shape[0]):
for j in range(shape[1]):
value = summary[i, j]
value = np.round(value, 2) if value <= 1 else np.int32(value)
if value != 0:
text = ax.text(j, i, value, ha="center", va="center", color="black")
fig.tight_layout()
plt.savefig(os.path.join(dirs["plots"], "rf_classification_summary_heatmap.png"), dpi=500)
def plot_confusion_matrix(conf_matrix):
labels = ["blue", "green", "red", "background"]
fig, ax = plt.subplots(figsize=(3.5, 3.5))
cmap = cm.YlGn.__copy__()
im = plt.imshow(conf_matrix, cmap=cmap)
shape = conf_matrix.shape
ax.xaxis.tick_top()
ax.set_xticks(np.arange(shape[1]))
ax.set_yticks(np.arange(shape[0]))
ax.set_yticklabels(labels)
ax.set_xticklabels(labels)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.subplots_adjust(bottom=0.25, left=0.25)
# add numeric labels inside plot
for i in range(shape[0]):
for j in range(shape[1]):
value = str(conf_matrix[i, j])
if len(value) == 2:
value = " %s" % value
elif len(value) == 1:
value = " %s" % value
plt.text(i - 0.2, j + 0.11, value, fontsize=11)
plt.text(1.2, -1.2, "True", fontsize=12, fontweight="bold")
plt.text(-2.8, 2, "Predicted", fontsize=12, fontweight="bold", rotation=90)
plt.tight_layout()
plt.savefig(os.path.join(dirs["plots"], "confusion_matrix.png"), dpi=500)
plt.close()
def plot_feature_importance(rf_model):
fig, ax = plt.subplots(figsize=(10, 1))
left = 0
feature_importances = np.round(rf_model.feature_importances_, 2)
argsort = np.argsort(feature_importances)[::-1]
labels = np.array(["reflectance_variance", "B04_B02_ratio", "B03_B02_ratio", "B04_centered", "B03_centered",
"B02_centered", "B08_centered"])[argsort]
colors = np.array(["#757575", "#dc4ff0", "#39e7ad", "#ff0000", "#00ff00", "#0000ff", "#7c0912"])[argsort]
feature_importances = feature_importances[argsort]
offsets = [0.18, 0.12, 0, -0.1, -0.1, -0.5, -0.3]
for c, importance, label, idx in zip(colors, feature_importances, labels, range(len(labels))):
ax.barh(0, importance, height=0.2, color=c, left=left, edgecolor="black", label="label")
text = ax.text(left + importance * 0.5, -0.01, "%s" % importance, ha="center",
va="center", color="w", weight="bold", fontsize=16)
text = ax.text(left + importance * offsets[idx], [-0.24, 0.16][int(int(idx / 2) == idx / 2)], label, fontsize=16)
left += importance
text = ax.text(-0.015, -0.05, "0", fontsize=16)
text = ax.text(1.005, -0.05, "1", fontsize=16)
ax.set_xlabel("")
plt.ylabel("")
plt.subplots_adjust(bottom=0.8)
plt.subplots_adjust(top=0.9)
plt.xlim(0, left)
positions = feature_importances.copy()
for i in range(len(feature_importances)):
positions[i] = np.sum(feature_importances[:i])
ax.set_xticks([])
ax.set_yticks([])
ax.set_yticklabels("")
plt.tight_layout()
plt.subplots_adjust(left=0.05, bottom=0.3)
plt.savefig(os.path.join(dirs["plots"], "rf_feature_importances_barplot.png"), dpi=500)
plt.close()
if __name__ == "__main__":
#plot_random_forest(rf, variables_list, labels_list)
plot_feature_importance(rf)
|
[
"numpy.sum",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"os.path.join",
"numpy.unique",
"numpy.int8",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"numpy.int32",
"matplotlib.pyplot.subplots",
"matplotlib.cm.YlGn.__copy__",
"matplotlib.cm.Greens.__copy__",
"matplotlib.pyplot.text",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"numpy.array",
"sklearn.metrics.confusion_matrix"
] |
[((393, 467), 'os.path.join', 'os.path.join', (["('F:' + os.sep + 'Masterarbeit')", '"""THESIS"""', '"""general"""', '"""plots"""'], {}), "('F:' + os.sep + 'Masterarbeit', 'THESIS', 'general', 'plots')\n", (405, 467), False, 'import os\n'), ((484, 519), 'os.path.join', 'os.path.join', (["dirs['main']", '"""truth"""'], {}), "(dirs['main'], 'truth')\n", (496, 519), False, 'import os\n'), ((531, 601), 'os.path.join', 'os.path.join', (["dirs['main']", '"""code"""', '"""detect_trucks"""', '"""rf_model.pickle"""'], {}), "(dirs['main'], 'code', 'detect_trucks', 'rf_model.pickle')\n", (543, 601), False, 'import os\n'), ((1120, 1166), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1142, 1166), False, 'from sklearn import metrics\n'), ((1180, 1233), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1209, 1233), False, 'from sklearn import metrics\n'), ((1247, 1269), 'numpy.unique', 'np.unique', (['test_labels'], {}), '(test_labels)\n', (1256, 1269), True, 'import numpy as np\n'), ((1919, 1965), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1941, 1965), False, 'from sklearn import metrics\n'), ((2067, 2081), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2079, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2323), 'matplotlib.cm.Greens.__copy__', 'cm.Greens.__copy__', ([], {}), '()\n', (2321, 2323), False, 'from matplotlib import cm\n'), ((2792, 2823), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (2811, 2823), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3366), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.5, 3.5)'}), '(figsize=(3.5, 3.5))\n', (3346, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3396), 'matplotlib.cm.YlGn.__copy__', 'cm.YlGn.__copy__', ([], {}), '()\n', (3394, 3396), False, 'from matplotlib import cm\n'), ((3406, 3440), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conf_matrix'], {'cmap': 'cmap'}), '(conf_matrix, cmap=cmap)\n', (3416, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3768), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.25)', 'left': '(0.25)'}), '(bottom=0.25, left=0.25)\n', (3744, 3768), True, 'import matplotlib.pyplot as plt\n'), ((4120, 4179), 'matplotlib.pyplot.text', 'plt.text', (['(1.2)', '(-1.2)', '"""True"""'], {'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.2, -1.2, 'True', fontsize=12, fontweight='bold')\n", (4128, 4179), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4259), 'matplotlib.pyplot.text', 'plt.text', (['(-2.8)', '(2)', '"""Predicted"""'], {'fontsize': '(12)', 'fontweight': '"""bold"""', 'rotation': '(90)'}), "(-2.8, 2, 'Predicted', fontsize=12, fontweight='bold', rotation=90)\n", (4192, 4259), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4282), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4280, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4365, 4376), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4374, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4432, 4461), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 1)'}), '(figsize=(10, 1))\n', (4444, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4543), 'numpy.round', 'np.round', (['rf_model.feature_importances_', '(2)'], {}), '(rf_model.feature_importances_, 2)\n', (4509, 4543), True, 'import numpy as np\n'), ((5629, 5643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (5639, 5643), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5679), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.8)'}), '(bottom=0.8)\n', (5667, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5712), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (5703, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5734), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'left'], {}), '(0, left)\n', (5725, 5734), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5972), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5970, 5972), True, 'import matplotlib.pyplot as plt\n'), ((5977, 6019), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'bottom': '(0.3)'}), '(left=0.05, bottom=0.3)\n', (5996, 6019), True, 'import matplotlib.pyplot as plt\n'), ((6116, 6127), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6125, 6127), True, 'import matplotlib.pyplot as plt\n'), ((737, 795), 'os.path.join', 'os.path.join', (["dirs['truth']", '"""validation_variables.pickle"""'], {}), "(dirs['truth'], 'validation_variables.pickle')\n", (749, 795), False, 'import os\n'), ((835, 890), 'os.path.join', 'os.path.join', (["dirs['truth']", '"""validation_labels.pickle"""'], {}), "(dirs['truth'], 'validation_labels.pickle')\n", (847, 890), False, 'import os\n'), ((1034, 1103), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_labels', 'test_pred'], {'labels': '[2, 3, 4, 1]'}), '(test_labels, test_pred, labels=[2, 3, 4, 1])\n', (1058, 1103), False, 'from sklearn import metrics\n'), ((2420, 2439), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (2429, 2439), True, 'import numpy as np\n'), ((2459, 2478), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (2468, 2478), True, 'import numpy as np\n'), ((3147, 3215), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""rf_classification_summary_heatmap.png"""'], {}), "(dirs['plots'], 'rf_classification_summary_heatmap.png')\n", (3159, 3215), False, 'import os\n'), ((3513, 3532), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (3522, 3532), True, 'import numpy as np\n'), ((3552, 3571), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (3561, 3571), True, 'import numpy as np\n'), ((4299, 4350), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""confusion_matrix.png"""'], {}), "(dirs['plots'], 'confusion_matrix.png')\n", (4311, 4350), False, 'import os\n'), ((4558, 4589), 'numpy.argsort', 'np.argsort', (['feature_importances'], {}), '(feature_importances)\n', (4568, 4589), True, 'import numpy as np\n'), ((4609, 4745), 'numpy.array', 'np.array', (["['reflectance_variance', 'B04_B02_ratio', 'B03_B02_ratio', 'B04_centered',\n 'B03_centered', 'B02_centered', 'B08_centered']"], {}), "(['reflectance_variance', 'B04_B02_ratio', 'B03_B02_ratio',\n 'B04_centered', 'B03_centered', 'B02_centered', 'B08_centered'])\n", (4617, 4745), True, 'import numpy as np\n'), ((4787, 4878), 'numpy.array', 'np.array', (["['#757575', '#dc4ff0', '#39e7ad', '#ff0000', '#00ff00', '#0000ff', '#7c0912']"], {}), "(['#757575', '#dc4ff0', '#39e7ad', '#ff0000', '#00ff00', '#0000ff',\n '#7c0912'])\n", (4795, 4878), True, 'import numpy as np\n'), ((5847, 5878), 'numpy.sum', 'np.sum', (['feature_importances[:i]'], {}), '(feature_importances[:i])\n', (5853, 5878), True, 'import numpy as np\n'), ((6036, 6101), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""rf_feature_importances_barplot.png"""'], {}), "(dirs['plots'], 'rf_feature_importances_barplot.png')\n", (6048, 6101), False, 'import os\n'), ((4068, 4115), 'matplotlib.pyplot.text', 'plt.text', (['(i - 0.2)', '(j + 0.11)', 'value'], {'fontsize': '(11)'}), '(i - 0.2, j + 0.11, value, fontsize=11)\n', (4076, 4115), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1773), 'numpy.int8', 'np.int8', (['test_labels'], {}), '(test_labels)\n', (1760, 1773), True, 'import numpy as np\n'), ((2942, 2960), 'numpy.round', 'np.round', (['value', '(2)'], {}), '(value, 2)\n', (2950, 2960), True, 'import numpy as np\n'), ((2980, 2995), 'numpy.int32', 'np.int32', (['value'], {}), '(value)\n', (2988, 2995), True, 'import numpy as np\n')]
|
import random
print("-----------------------------------")
print("-------Rock, paper, scissors-------")
print("Welcome to the game!")
print("The game consists of three rounds.")
print("The winner is the one who scores more points.")
print("\t[r] - rock\n\t[s] - scissors\n\t[p] - paper")
player_score = 0
player_select = 0
comp_score = 0
comp_select = 0
print("---------------------------------------")
print("------------[ START GAME ]-------------")
for i in range(3):
print("\t---------> ROUND № " + str(i + 1) + " <---------")
comp_select = random.choice("rps")
while True:
player_select = input("\tYour choice: ")
if (player_select == "r") or (player_select == "s") or (player_select == "p"):
break
else:
print("\tError")
print("\tComputer: " + comp_select)
if player_select == comp_select:
print("\tDraw!")
elif player_select == "r" and comp_select == "s":
player_score = player_score + 1
print("\tYou win!")
elif player_select == "r" and comp_select == "p":
comp_score = comp_score + 1
print("\tThe computer wins!")
elif player_select == "p" and comp_select == "r":
player_score = player_score + 1
print("\tYou win!")
elif player_select == "p" and comp_select == "s":
comp_score = comp_score + 1
print("\tThe computer wins!")
elif player_select == "s" and comp_select == "p":
player_score = player_score + 1
print("\tYou win!")
elif player_select == "s" and comp_select == "r":
comp_score = comp_score + 1
print("\tThe computer wins!")
print("-----------------------------------")
print("------------Game Result------------")
if player_score > comp_score:
print("Congratulations! You win!")
elif player_score < comp_score:
print("Sorry... The computer wins!")
else:
print("Draw!")
|
[
"random.choice"
] |
[((555, 575), 'random.choice', 'random.choice', (['"""rps"""'], {}), "('rps')\n", (568, 575), False, 'import random\n')]
|
from bluesky.magics import BlueskyMagics
import bluesky.plans as bp
import bluesky.plan_stubs as bps
import os
import pytest
import signal
from types import SimpleNamespace
class FakeIPython:
def __init__(self, user_ns):
self.user_ns = user_ns
def compare_msgs(actual, expected):
for a, e in zip(actual, expected):
# Strip off randomized stuff that cannot be compared.
a.kwargs.pop('group', None)
e.kwargs.pop('group', None)
assert a == e
@pytest.mark.parametrize('pln,plnargs,magic,line,detectors_factory', [
(bps.mv, lambda hw: (hw.motor1, 2),
'mov', 'motor1 2', lambda hw: []),
(bps.mv, lambda hw: (hw.motor1, 2, hw.motor2, 3),
'mov', 'motor1 2 motor2 3', lambda hw: []),
(bps.mvr, lambda hw: (hw.motor1, 2),
'movr', 'motor1 2', lambda hw: []),
(bps.mvr, lambda hw: (hw.motor1, 2, hw.motor2, 3),
'movr', 'motor1 2 motor2 3', lambda hw: []),
(bp.count, lambda hw: ([hw.invariant1],),
'ct', 'favorite_detectors', lambda hw: []),
(bp.count, lambda hw: ([hw.invariant1, hw.invariant2],),
'ct', '', lambda hw: []),
(bp.count, lambda hw: ([hw.invariant1],),
'ct', 'dets', lambda hw: [hw.invariant1, hw.invariant2]),
(bp.count, lambda hw: ([hw.invariant1, hw.invariant2],),
'ct', '', lambda hw: [hw.invariant1, hw.invariant2]),
])
def test_bluesky_magics(pln, plnargs, magic, line, detectors_factory,
RE, hw):
# Build a FakeIPython instance to use the magics with.
dets = [hw.invariant1]
hw.invariant1._ophyd_labels_ = set(['detectors', 'favorite_detectors'])
hw.invariant2._ophyd_labels_ = set(['detectors'])
ip = FakeIPython({'motor1': hw.motor1, 'motor2': hw.motor2,
'invariant1': hw.invariant1, 'invariant2': hw.invariant2,
'dets': dets}
)
sm = BlueskyMagics(ip)
detectors = detectors_factory(hw)
if detectors:
# Test deprecated usage of %ct.
with pytest.warns(UserWarning):
BlueskyMagics.detectors = detectors
# Spy on all msgs processed by RE.
msgs = []
def collect(msg):
msgs.append(msg)
RE.msg_hook = collect
BlueskyMagics.RE.msg_hook = collect
# Test magics cause the RunEngine to execute the messages we expect.
RE(bps.mv(hw.motor1, 10, hw.motor2, 10)) # ensure known initial state
msgs.clear()
RE(pln(*plnargs(hw)))
expected = msgs.copy()
RE(bps.mv(hw.motor1, 10, hw.motor2, 10)) # ensure known initial state
msgs.clear()
if detectors:
# Test deprecated usage of %ct. Must catch warning.
with pytest.warns(UserWarning):
getattr(sm, magic)(line)
else:
# Normal usage, no warning.
getattr(sm, magic)(line)
actual = msgs.copy()
msgs.clear()
compare_msgs(actual, expected)
if detectors:
with pytest.warns(UserWarning):
BlueskyMagics.detectors.clear()
def test_wa(hw):
motor = hw.motor
det = hw.motor
ip = FakeIPython({'motor': motor, 'det': det})
sm = BlueskyMagics(ip)
# Test an empty list with no labels set.
sm.wa('')
# Test again with labeled objects.
motor._ophyd_labels_ = ['motors']
motor._ophyd_labels_ = ['detectors']
# Test an empty list.
sm.wa('')
# Test with a label whitelist
sm.wa('motors')
sm.wa('motors detectors')
sm.wa('motors typo')
with pytest.raises(ValueError):
sm.wa('[motors, detectors]')
# The %wa magic doesn't use a RunEngine or a plan.
def test_wa_legacy(hw):
motor = hw.motor
ip = FakeIPython({'motor': motor})
sm = BlueskyMagics(ip)
BlueskyMagics.positioners.extend([motor])
with pytest.warns(UserWarning):
sm.wa('')
# Make motor support more attributes.
motor.limits = (-1, 1)
with pytest.warns(UserWarning):
sm.wa('')
motor.user_offset = SimpleNamespace(get=lambda: 0)
with pytest.warns(UserWarning):
sm.wa('[motor]')
with pytest.warns(UserWarning):
BlueskyMagics.positioners.clear()
def test_magics_missing_ns_key(RE, hw):
ip = FakeIPython({})
sm = BlueskyMagics(ip)
with pytest.raises(NameError):
sm.mov('motor1 5')
ip.user_ns['motor1'] = hw.motor1
sm.mov('motor1 5')
def test_interrupted(RE, hw):
motor = hw.motor
motor.delay = 10
ip = FakeIPython({})
sm = BlueskyMagics(ip)
ip.user_ns['motor'] = motor
pid = os.getpid()
def sim_kill(n=1):
for j in range(n):
print('KILL')
os.kill(pid, signal.SIGINT)
motor.loop = sm.RE.loop
sm.RE.loop.call_later(1, sim_kill, 2)
sm.mov('motor 1')
assert sm.RE.state == 'idle'
|
[
"bluesky.magics.BlueskyMagics.detectors.clear",
"os.getpid",
"bluesky.magics.BlueskyMagics.positioners.extend",
"pytest.warns",
"bluesky.magics.BlueskyMagics.positioners.clear",
"os.kill",
"pytest.raises",
"bluesky.plan_stubs.mv",
"pytest.mark.parametrize",
"types.SimpleNamespace",
"bluesky.magics.BlueskyMagics"
] |
[((494, 1323), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pln,plnargs,magic,line,detectors_factory"""', "[(bps.mv, lambda hw: (hw.motor1, 2), 'mov', 'motor1 2', lambda hw: []), (\n bps.mv, lambda hw: (hw.motor1, 2, hw.motor2, 3), 'mov',\n 'motor1 2 motor2 3', lambda hw: []), (bps.mvr, lambda hw: (hw.motor1, 2\n ), 'movr', 'motor1 2', lambda hw: []), (bps.mvr, lambda hw: (hw.motor1,\n 2, hw.motor2, 3), 'movr', 'motor1 2 motor2 3', lambda hw: []), (bp.\n count, lambda hw: ([hw.invariant1],), 'ct', 'favorite_detectors', lambda\n hw: []), (bp.count, lambda hw: ([hw.invariant1, hw.invariant2],), 'ct',\n '', lambda hw: []), (bp.count, lambda hw: ([hw.invariant1],), 'ct',\n 'dets', lambda hw: [hw.invariant1, hw.invariant2]), (bp.count, lambda\n hw: ([hw.invariant1, hw.invariant2],), 'ct', '', lambda hw: [hw.\n invariant1, hw.invariant2])]"], {}), "('pln,plnargs,magic,line,detectors_factory', [(bps.\n mv, lambda hw: (hw.motor1, 2), 'mov', 'motor1 2', lambda hw: []), (bps.\n mv, lambda hw: (hw.motor1, 2, hw.motor2, 3), 'mov', 'motor1 2 motor2 3',\n lambda hw: []), (bps.mvr, lambda hw: (hw.motor1, 2), 'movr', 'motor1 2',\n lambda hw: []), (bps.mvr, lambda hw: (hw.motor1, 2, hw.motor2, 3),\n 'movr', 'motor1 2 motor2 3', lambda hw: []), (bp.count, lambda hw: ([hw\n .invariant1],), 'ct', 'favorite_detectors', lambda hw: []), (bp.count, \n lambda hw: ([hw.invariant1, hw.invariant2],), 'ct', '', lambda hw: []),\n (bp.count, lambda hw: ([hw.invariant1],), 'ct', 'dets', lambda hw: [hw.\n invariant1, hw.invariant2]), (bp.count, lambda hw: ([hw.invariant1, hw.\n invariant2],), 'ct', '', lambda hw: [hw.invariant1, hw.invariant2])])\n", (517, 1323), False, 'import pytest\n'), ((1888, 1905), 'bluesky.magics.BlueskyMagics', 'BlueskyMagics', (['ip'], {}), '(ip)\n', (1901, 1905), False, 'from bluesky.magics import BlueskyMagics\n'), ((3102, 3119), 'bluesky.magics.BlueskyMagics', 'BlueskyMagics', (['ip'], {}), '(ip)\n', (3115, 3119), False, 'from bluesky.magics import BlueskyMagics\n'), ((3669, 3686), 'bluesky.magics.BlueskyMagics', 'BlueskyMagics', (['ip'], {}), '(ip)\n', (3682, 3686), False, 'from bluesky.magics import BlueskyMagics\n'), ((3691, 3732), 'bluesky.magics.BlueskyMagics.positioners.extend', 'BlueskyMagics.positioners.extend', (['[motor]'], {}), '([motor])\n', (3723, 3732), False, 'from bluesky.magics import BlueskyMagics\n'), ((3935, 3966), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'get': '(lambda : 0)'}), '(get=lambda : 0)\n', (3950, 3966), False, 'from types import SimpleNamespace\n'), ((4183, 4200), 'bluesky.magics.BlueskyMagics', 'BlueskyMagics', (['ip'], {}), '(ip)\n', (4196, 4200), False, 'from bluesky.magics import BlueskyMagics\n'), ((4432, 4449), 'bluesky.magics.BlueskyMagics', 'BlueskyMagics', (['ip'], {}), '(ip)\n', (4445, 4449), False, 'from bluesky.magics import BlueskyMagics\n'), ((4493, 4504), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4502, 4504), False, 'import os\n'), ((2340, 2376), 'bluesky.plan_stubs.mv', 'bps.mv', (['hw.motor1', '(10)', 'hw.motor2', '(10)'], {}), '(hw.motor1, 10, hw.motor2, 10)\n', (2346, 2376), True, 'import bluesky.plan_stubs as bps\n'), ((2485, 2521), 'bluesky.plan_stubs.mv', 'bps.mv', (['hw.motor1', '(10)', 'hw.motor2', '(10)'], {}), '(hw.motor1, 10, hw.motor2, 10)\n', (2491, 2521), True, 'import bluesky.plan_stubs as bps\n'), ((3459, 3484), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3472, 3484), False, 'import pytest\n'), ((3742, 3767), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3754, 3767), False, 'import pytest\n'), ((3866, 3891), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3878, 3891), False, 'import pytest\n'), ((3976, 4001), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3988, 4001), False, 'import pytest\n'), ((4038, 4063), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (4050, 4063), False, 'import pytest\n'), ((4073, 4106), 'bluesky.magics.BlueskyMagics.positioners.clear', 'BlueskyMagics.positioners.clear', ([], {}), '()\n', (4104, 4106), False, 'from bluesky.magics import BlueskyMagics\n'), ((4210, 4234), 'pytest.raises', 'pytest.raises', (['NameError'], {}), '(NameError)\n', (4223, 4234), False, 'import pytest\n'), ((2015, 2040), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2027, 2040), False, 'import pytest\n'), ((2661, 2686), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2673, 2686), False, 'import pytest\n'), ((2912, 2937), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2924, 2937), False, 'import pytest\n'), ((2951, 2982), 'bluesky.magics.BlueskyMagics.detectors.clear', 'BlueskyMagics.detectors.clear', ([], {}), '()\n', (2980, 2982), False, 'from bluesky.magics import BlueskyMagics\n'), ((4594, 4621), 'os.kill', 'os.kill', (['pid', 'signal.SIGINT'], {}), '(pid, signal.SIGINT)\n', (4601, 4621), False, 'import os\n')]
|
import string
import numpy as np
import sys
import random
import os
from shutil import copyfile
import subprocess
from rpt_ele import rpt_ele
import update_process_model_input_file as up
import swmm_mpc as sm
def get_flood_cost_from_dict(rpt, node_flood_weight_dict):
node_flood_costs = []
for nodeid, weight in node_flood_weight_dict.iteritems():
# if user put "Node J3" for nodeid instead of just "J3" make \
# nodeid "J3"
if len(nodeid.split()) > 0:
nodeid = nodeid.split()[-1]
# try/except used here in case there is no flooding for one or \
# more of the nodes
if nodeid not in rpt.node_ids:
print("warning node {} is not in model".format(nodeid))
try:
# flood volume is in column, 5
node_flood_volume = float(rpt.flooding_df.loc[nodeid, 5])
node_flood_cost = (weight*node_flood_volume)
node_flood_costs.append(node_flood_cost)
except:
pass
return sum(node_flood_costs)
def get_flood_cost(rpt, node_flood_weight_dict):
if rpt.total_flooding > 0 and node_flood_weight_dict:
return get_flood_cost_from_dict(rpt, node_flood_weight_dict)
else:
return rpt.total_flooding
def get_deviation_cost(rpt, target_depth_dict):
node_deviation_costs = []
if target_depth_dict:
for nodeid, data in target_depth_dict.iteritems():
depth = rpt.get_ele_df(nodeid)['Depth']
depth_dev = abs(depth - data['target'])
avg_dev = depth_dev.sum()/len(depth_dev)
weighted_deviation = avg_dev*data['weight']
node_deviation_costs.append(weighted_deviation)
return sum(node_deviation_costs)
def get_cost(rpt_file, node_flood_weight_dict, flood_weight, target_depth_dict,
dev_weight):
# read the output file
rpt = rpt_ele('{}'.format(rpt_file))
# get flooding costs
node_fld_cost = get_flood_cost(rpt, node_flood_weight_dict)
# get deviation costs
deviation_cost = get_deviation_cost(rpt, target_depth_dict)
# convert the contents of the output file into a cost
cost = flood_weight*node_fld_cost + dev_weight*deviation_cost
return cost
def bits_to_decimal(bits):
bits_as_string = "".join(str(i) for i in bits)
return float(int(bits_as_string, 2))
def bits_max_val(bit_len):
bit_ones = [1 for i in range(bit_len)]
return bits_to_decimal(bit_ones)
def bits_to_perc(bits):
bit_dec = bits_to_decimal(bits)
max_bits = bits_max_val(len(bits))
return round(bit_dec/max_bits, 3)
def bit_to_on_off(bit):
"""
convert single bit to "ON" or "OFF"
bit: [int] or [list]
"""
if type(bit) == list:
if len(bit) > 1:
raise ValueError('you passed more than one bit to this fxn')
else:
bit = bit[0]
if bit == 1:
return "ON"
elif bit == 0:
return "OFF"
else:
raise ValueError('was expecting 1 or 0 and got {}'.format(bit))
def split_gene_by_ctl_ts(gene, control_str_ids, n_steps):
"""
split a list of bits representing a gene into the bits that correspond with
each control id according to the control type for each time step
ASSUMPTION: 3 bits for ORIFICE or WEIR, 1 for PUMP
gene: [list] bits for a gene (e.g., [1, 0, 1, 1, 1, 0, 0, 1])
control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1'])
n_steps: [int] number of control steps (e.g., 2)
returns: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]]
"""
split_gene = []
for control_id in control_str_ids:
# get the control type (i.e. PUMP, WEIR, ORIFICE)
control_type = control_id.split()[0]
if control_type == 'ORIFICE' or control_type == 'WEIR':
bits_per_type = 3
# get the number of control elements that are for the current ctl
elif control_type == 'PUMP':
bits_per_type = 1
# the number of bits per control structure
n_bits = bits_per_type*n_steps
# get the segment for the control
gene_seg = gene[:n_bits]
# split to get the different time steps
gene_seg_per_ts = split_list(gene_seg, n_steps)
# add the gene segment to the overall list
split_gene.append(gene_seg_per_ts)
# move the beginning of the gene to the end of the current ctl segment
gene = gene[n_bits:]
return split_gene
def split_list(a_list, n):
"""
split one list into n lists of equal size. In this case, we are splitting
the list that represents the policy of a single each control structure
so that each time step is separate
"""
portions = len(a_list)/n
split_lists = []
for i in range(n):
split_lists.append(a_list[i*portions: (i+1)*portions])
return split_lists
def gene_to_policy_dict(gene, control_str_ids, n_control_steps):
"""
converts a gene to a policy dictionary that with the format specified in
up.update_controls_and_hotstart
format a policy given the control_str_ids and splitted_gene
control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1'])
splitted_gene: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]]
returns: [dict] (e.g., {'ORIFICE r1'}
"""
fmted_policies = dict()
splitted_gene = split_gene_by_ctl_ts(gene, control_str_ids,
n_control_steps)
for i, control_id in enumerate(control_str_ids):
control_type = control_id.split()[0]
seg = splitted_gene[i]
if control_type == 'ORIFICE' or control_type == 'WEIR':
# change the lists of bits into percent openings
fmtd_seg = [bits_to_perc(setting) for setting in seg]
elif control_type == 'PUMP':
# change the lists of bits into on/off
fmtd_seg = [bit_to_on_off(bit[0]) for bit in seg]
fmted_policies[control_id] = fmtd_seg
return fmted_policies
def list_to_policy(policy, control_str_ids, n_control_steps):
"""
ASSUMPTION: round decimal number to BOOLEAN
"""
split_policies = split_list(policy, len(control_str_ids))
fmted_policies = dict()
for i, control_id in enumerate(control_str_ids):
control_type = control_id.split()[0]
if control_type == 'ORIFICE' or control_type == 'WEIR':
fmted_policies[control_id] = split_policies[i]
elif control_type == 'PUMP':
on_off = [bit_to_on_off(round(p)) for p in split_policies[i]]
fmted_policies[control_id] = on_off
return fmted_policies
def format_policies(policy, control_str_ids, n_control_steps, opt_method):
if opt_method == 'genetic_algorithm':
return gene_to_policy_dict(policy, control_str_ids, n_control_steps)
elif opt_method == 'bayesian_opt':
return list_to_policy(policy, control_str_ids, n_control_steps)
def prep_tmp_files(proc_inp, work_dir):
# make process model tmp file
rand_string = ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(9))
# make a copy of the process model input file
tmp_proc_base = proc_inp.replace('.inp',
'_tmp_{}'.format(rand_string))
tmp_proc_inp = tmp_proc_base + '.inp'
tmp_proc_rpt = tmp_proc_base + '.rpt'
copyfile(proc_inp, tmp_proc_inp)
# make copy of hs file
hs_file_path = up.read_hs_filename(proc_inp)
hs_file_name = os.path.split(hs_file_path)[-1]
tmp_hs_file_name = hs_file_name.replace('.hsf',
'_{}.hsf'.format(rand_string))
tmp_hs_file_path = os.path.join(sm.run.work_dir, tmp_hs_file_name)
copyfile(hs_file_path, tmp_hs_file_path)
return tmp_proc_inp, tmp_proc_rpt, tmp_hs_file_path
def evaluate(*individual):
"""
evaluate the performance of an individual given the inp file of the process
model, the individual, the control params (ctl_str_ids, horizon, step),
and the cost function params (dev_weight/dict, flood weight/dict)
"""
FNULL = open(os.devnull, 'w')
# prep files
tmp_inp, tmp_rpt, tmp_hs = prep_tmp_files(sm.run.inp_process_file_path,
sm.run.work_dir)
# format policies
if sm.run.opt_method == 'genetic_algorithm':
individual = individual[0]
elif sm.run.opt_method == 'bayesian_opt':
individual = np.squeeze(individual)
fmted_policies = format_policies(individual, sm.run.ctl_str_ids,
sm.run.n_ctl_steps, sm.run.opt_method)
# update controls
up.update_controls_and_hotstart(tmp_inp,
sm.run.ctl_time_step,
fmted_policies,
tmp_hs)
# run the swmm model
if os.name == 'nt':
swmm_exe_cmd = 'swmm5.exe'
elif sys.platform.startswith('linux'):
swmm_exe_cmd = 'swmm5'
cmd = '{} {} {}'.format(swmm_exe_cmd, tmp_inp,
tmp_rpt)
subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
# get cost
cost = get_cost(tmp_rpt,
sm.run.node_flood_weight_dict,
sm.run.flood_weight,
sm.run.target_depth_dict,
sm.run.dev_weight)
os.remove(tmp_inp)
os.remove(tmp_rpt)
os.remove(tmp_hs)
return cost
|
[
"sys.platform.startswith",
"os.remove",
"update_process_model_input_file.update_controls_and_hotstart",
"update_process_model_input_file.read_hs_filename",
"random.choice",
"subprocess.call",
"shutil.copyfile",
"numpy.squeeze",
"os.path.split",
"os.path.join"
] |
[((7428, 7460), 'shutil.copyfile', 'copyfile', (['proc_inp', 'tmp_proc_inp'], {}), '(proc_inp, tmp_proc_inp)\n', (7436, 7460), False, 'from shutil import copyfile\n'), ((7508, 7537), 'update_process_model_input_file.read_hs_filename', 'up.read_hs_filename', (['proc_inp'], {}), '(proc_inp)\n', (7527, 7537), True, 'import update_process_model_input_file as up\n'), ((7739, 7786), 'os.path.join', 'os.path.join', (['sm.run.work_dir', 'tmp_hs_file_name'], {}), '(sm.run.work_dir, tmp_hs_file_name)\n', (7751, 7786), False, 'import os\n'), ((7791, 7831), 'shutil.copyfile', 'copyfile', (['hs_file_path', 'tmp_hs_file_path'], {}), '(hs_file_path, tmp_hs_file_path)\n', (7799, 7831), False, 'from shutil import copyfile\n'), ((8719, 8809), 'update_process_model_input_file.update_controls_and_hotstart', 'up.update_controls_and_hotstart', (['tmp_inp', 'sm.run.ctl_time_step', 'fmted_policies', 'tmp_hs'], {}), '(tmp_inp, sm.run.ctl_time_step,\n fmted_policies, tmp_hs)\n', (8750, 8809), True, 'import update_process_model_input_file as up\n'), ((9165, 9237), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)', 'stdout': 'FNULL', 'stderr': 'subprocess.STDOUT'}), '(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)\n', (9180, 9237), False, 'import subprocess\n'), ((9465, 9483), 'os.remove', 'os.remove', (['tmp_inp'], {}), '(tmp_inp)\n', (9474, 9483), False, 'import os\n'), ((9488, 9506), 'os.remove', 'os.remove', (['tmp_rpt'], {}), '(tmp_rpt)\n', (9497, 9506), False, 'import os\n'), ((9511, 9528), 'os.remove', 'os.remove', (['tmp_hs'], {}), '(tmp_hs)\n', (9520, 9528), False, 'import os\n'), ((7557, 7584), 'os.path.split', 'os.path.split', (['hs_file_path'], {}), '(hs_file_path)\n', (7570, 7584), False, 'import os\n'), ((9008, 9040), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (9031, 9040), False, 'import sys\n'), ((7094, 7147), 'random.choice', 'random.choice', (['(string.ascii_lowercase + string.digits)'], {}), '(string.ascii_lowercase + string.digits)\n', (7107, 7147), False, 'import random\n'), ((8523, 8545), 'numpy.squeeze', 'np.squeeze', (['individual'], {}), '(individual)\n', (8533, 8545), True, 'import numpy as np\n')]
|
import descarteslabs as dl
# The bounding box geometry of Haiti
haiti = {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-74.520263671875,
17.98918266463051
],
[
-71.685791015625,
17.98918266463051
],
[
-71.685791015625,
19.94236918954201
],
[
-74.520263671875,
19.94236918954201
],
[
-74.520263671875,
17.98918266463051
]
]
]
}
}
# Create a SceneCollection
scenes, ctx = dl.scenes.search(haiti['geometry'],
products=["sentinel-2:L1C"],
start_datetime="2018-05-01",
end_datetime="2018-05-03",
cloud_fraction=0.7,
limit=5)
print("There are {} scenes in the collection".format(len(scenes)))
# Mosaic returned scenes and display
mosaic = scenes.mosaic(bands = "red green blue", ctx= ctx)
dl.scenes.display(mosaic, title="Haiti Mosaic")
|
[
"descarteslabs.scenes.display",
"descarteslabs.scenes.search"
] |
[((755, 913), 'descarteslabs.scenes.search', 'dl.scenes.search', (["haiti['geometry']"], {'products': "['sentinel-2:L1C']", 'start_datetime': '"""2018-05-01"""', 'end_datetime': '"""2018-05-03"""', 'cloud_fraction': '(0.7)', 'limit': '(5)'}), "(haiti['geometry'], products=['sentinel-2:L1C'],\n start_datetime='2018-05-01', end_datetime='2018-05-03', cloud_fraction=\n 0.7, limit=5)\n", (771, 913), True, 'import descarteslabs as dl\n'), ((1171, 1218), 'descarteslabs.scenes.display', 'dl.scenes.display', (['mosaic'], {'title': '"""Haiti Mosaic"""'}), "(mosaic, title='Haiti Mosaic')\n", (1188, 1218), True, 'import descarteslabs as dl\n')]
|
import xml.etree.ElementTree as ET
import requests
from bs4 import BeautifulSoup
from discord import Embed
from utils.classes.Hero import Hero
from utils.library import files
from utils.classes.Const import config
def get_last_update(url, embed=None):
try:
if embed is None:
embed = Embed(
title="Последние изменения",
color=config.info
)
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246'
response = requests.get(url, headers={"User-Agent": f"{user_agent}"})
soup = BeautifulSoup(response.text, 'html.parser')
patch = soup.findAll("div", {"class": "panel panel-primary"})
name: str = "Последнее изменение героя"
value = patch[0].h3 # берем только первый патч
value_links = value.findAll('a', class_='pull-right')
value_link = value_links[0].get('href')
embed.add_field(
name=name,
value=f"[{value.text}]({value_link})",
inline=True
)
except Exception:
pass
return embed
def last_pn(hero=None, author=''):
patch_summary = 'https://heroespatchnotes.com/feed/patch-summary.xml'
patchlink = 'https://heroespatchnotes.com/patch/summary.html'
response = requests.get(patch_summary)
tree = ET.fromstring(response.text)
if hero is not None:
embed = Embed(
title="{} / {} : Последний патч".format(hero.en, hero.ru),
color=config.info
)
else:
embed = Embed(
title="Патчноут",
color=config.info
)
response = requests.get('https://heroespatchnotes.com/patch/summary.html')
soup = BeautifulSoup(response.text, 'html.parser')
embed.add_field(
name="Последний патч",
value=f"[{soup.ol.li.a.text}]({soup.ol.li.a['href']})",
inline=False
)
#print(soup.ol.li.a)
for child in tree.find('{http://www.w3.org/2005/Atom}entry'):
if child.tag == '{http://www.w3.org/2005/Atom}title':
title = child.text
date, patch_number = title.split(' ', maxsplit=1)
if child.tag == '{http://www.w3.org/2005/Atom}content':
# print(child.text)
soup = BeautifulSoup(child.text, 'html.parser')
herolinks = ''
for link in soup.findAll('a'):
hero_url = link.get('href')
hero = Hero(link.text)
if hero is not None:
# herolinks = herolinks + '[' + hero['name_ru'] + '](' + hero_url + '), '
herolinks += hero.ru + ', '
# print('Герой: {} \nПоследние изменения: {}'.format(hero['name_ru'], hero_url))
herolinks = herolinks[:-2]
embed.add_field(
name=f"Последние измененные герои ({date})",
value=f"{herolinks}",
inline=False
)
embed.set_footer(
text=f"Информация для: {author}"
)
return embed
|
[
"utils.classes.Hero.Hero",
"discord.Embed",
"xml.etree.ElementTree.fromstring",
"requests.get",
"bs4.BeautifulSoup"
] |
[((1361, 1388), 'requests.get', 'requests.get', (['patch_summary'], {}), '(patch_summary)\n', (1373, 1388), False, 'import requests\n'), ((1400, 1428), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['response.text'], {}), '(response.text)\n', (1413, 1428), True, 'import xml.etree.ElementTree as ET\n'), ((1706, 1769), 'requests.get', 'requests.get', (['"""https://heroespatchnotes.com/patch/summary.html"""'], {}), "('https://heroespatchnotes.com/patch/summary.html')\n", (1718, 1769), False, 'import requests\n'), ((1781, 1824), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (1794, 1824), False, 'from bs4 import BeautifulSoup\n'), ((580, 638), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent': f'{user_agent}'}"}), "(url, headers={'User-Agent': f'{user_agent}'})\n", (592, 638), False, 'import requests\n'), ((654, 697), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (667, 697), False, 'from bs4 import BeautifulSoup\n'), ((1614, 1656), 'discord.Embed', 'Embed', ([], {'title': '"""Патчноут"""', 'color': 'config.info'}), "(title='Патчноут', color=config.info)\n", (1619, 1656), False, 'from discord import Embed\n'), ((310, 363), 'discord.Embed', 'Embed', ([], {'title': '"""Последние изменения"""', 'color': 'config.info'}), "(title='Последние изменения', color=config.info)\n", (315, 363), False, 'from discord import Embed\n'), ((2329, 2369), 'bs4.BeautifulSoup', 'BeautifulSoup', (['child.text', '"""html.parser"""'], {}), "(child.text, 'html.parser')\n", (2342, 2369), False, 'from bs4 import BeautifulSoup\n'), ((2507, 2522), 'utils.classes.Hero.Hero', 'Hero', (['link.text'], {}), '(link.text)\n', (2511, 2522), False, 'from utils.classes.Hero import Hero\n')]
|
import logarithmoforecast
import pandas as pd
from pathlib import Path
def create_ml_dataframe(station_name, phase, pickle_dir=Path('pickles')):
path = pickle_dir / station_name
df_ml = pd.read_pickle(path / ("h_phase"+str(phase)))
df_ml.drop(columns=['ServiceDeliveryPoint'])
print(df_ml)
def main():
station_name = 'NW000000000000000000000NBSNST0888'
test_dir = Path('testPickles')
create_ml_dataframe(station_name, 0, test_dir)
main()
|
[
"pathlib.Path"
] |
[((129, 144), 'pathlib.Path', 'Path', (['"""pickles"""'], {}), "('pickles')\n", (133, 144), False, 'from pathlib import Path\n'), ((392, 411), 'pathlib.Path', 'Path', (['"""testPickles"""'], {}), "('testPickles')\n", (396, 411), False, 'from pathlib import Path\n')]
|
from _context import sparse
from sparse import util
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.distributions as dist
import numpy as np
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import random, tqdm, sys, math
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from util import d
# import warnings
# warnings.simplefilter("error")
# warnings.simplefilter("ignore", DeprecationWarning)
# from util import tic, toc
# NB, the enwik8 data contains tokens from 9 to 240
NUM_TOKENS = 256
LOG2E = math.log2(math.e)
MARGIN = 0.1
def sample(lnprobs, temperature=1.0):
if temperature == 0.0:
return lnprobs.argmax()
p = F.softmax(lnprobs / temperature, dim=0)
cd = dist.Categorical(p)
return cd.sample()
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
b, h, w = matrices.size()
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[:, indices[0], indices[1]] = maskval
class MSparseSelfAttention(nn.Module):
"""
Masked sparse self attention (two degrees of freedom)
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05, sigma_scale=1.0):
"""
:param emb:
:param k: Number of connections to the input in total
:param gadditional:
:param radditional:
:param region:
:param heads:
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.means = nn.Parameter(torch.randn((k, 2)))
self.sigmas = nn.Parameter(torch.randn((k, )))
self.register_buffer('mvalues', torch.ones((k, )))
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
# generate the continuous parameters
means = self.means[None, None, :, :].expand(b, 1, k, 2)
sigmas = self.sigmas[None, None, :].expand(b, 1, k)
values = self.mvalues[None, None, :].expand(b, 1, k)
means = util.flip(means.contiguous()) # flip everything to below the diagonal of the matrix
s = (t, t)
means, sigmas = sparse.transform_means(means, s), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t, t),
relative_range=(self.region, self.region), cuda=x.is_cuda)
indices = util.flip(indices)
indfl = indices.float()
vs = k * (4 + self.radditional + self.gadditional)
assert indices.size() == (b, 1, vs, 2), f'{indices.size()}, {(b, 1, vs, 2)}'
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props = sparse.densities(indfl, means, sigmas).clone()
props[dups, :] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
assert indices.size() == (b, 1, vs, 2), f'{indices.size()}, {(b, 1, vs, 2)}'
assert weights.size() == (b, 1, vs), f'{weights.size()}, {(b, 1, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, 1, vs, 2).contiguous().view(b*h, vs, 2)
weights = weights[:, None, :, :].expand(b, h, 1, vs).contiguous().view(b*h, vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, vs).contiguous().view(b*h*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h, vs)
dot = sparse.logsoftmax(indices, weights * dot, s)
# - dot now has row-wise self-attention probabilities
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class ASH2DSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05,
sigma_scale=0.1, mmult = 1.0):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult = mmult
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 3) # two means, one sigma
)
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
# Generate coords
coords = torch.arange(t, dtype=torch.float, device=d(x)) / t
coords = coords[None, :, None,].expand(b, t, 1)
input = torch.cat([x, coords], dim=2)
params = self.toparams(input) # (b, t, k*3)
assert not util.contains_nan(params), \
f'params contain NaN\n intput {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits that correspond to the diagonals of the matrix
diags = torch.arange(t, dtype=torch.float, device=d(x))
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, t, k, 2)
means = params[:, :, :k*2].view(b, t, k, 2)
sigmas = params[:, :, k*2:].view(b, t, k)
values = self.mvalues[None, None, :].expand(b, t, k)
means = diags + self.mmult * means
means = util.flip(means)
# means = util.flip(means.contiguous()) # flip everything to below the diagonal of the matrix
s = (t, t)
means, sigmas = sparse.transform_means(means, s), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t, t),
relative_range=(self.region, self.region), cuda=x.is_cuda)
indices = util.flip(indices)
indfl = indices.float()
vs = k * (4 + self.radditional + self.gadditional)
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props = sparse.densities(indfl, means, sigmas).clone()
props[dups, :] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
assert weights.size() == (b, t, vs), f'{weights.size()}, {(b, t, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, t, vs, 2).contiguous().view(b*h, t*vs, 2)
weights = weights[:, None, :, :].expand(b, h, t, vs).contiguous().view(b*h, t*vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*t*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*vs).contiguous().view(b*h*t*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*vs)
#print(f'dot before {dot.min()}, {dot.mean()}, {dot.max()}')
assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
#print(f'dot after {dot.min()}, {dot.mean()}, {dot.max()}\n')
dot = sparse.logsoftmax(indices, weights * dot, s).exp()
# - dot now has row-wise self-attention probabilities
assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}'
return out
class ASH1DSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05, sigma_scale=0.1,
mmult = 1.0, norm_method='softmax', outputs=-1, clamp=True):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult, self.norm_method, self.clamp = mmult, norm_method, clamp
if clamp:
self.mmult *= 3.0
self.outputs = outputs
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 2) # one mean, one sigma
)
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
o = t if self.outputs < -1 else self.outputs
# Generate coords
coords = torch.arange(t, dtype=torch.float, device=d(x)) / t
coords = coords[None, :, None,].expand(b, t, 1)
input = torch.cat([x, coords], dim=2)
params = self.toparams(input) # (b, o, k*2)
assert not util.contains_nan(params), \
f'params contain NaN\n intput {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits that correspond to the horizontal coordinate of the current word
diags = torch.arange(t, dtype=torch.float, device=d(x))
if not self.clamp:
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, t, k, 1)
means = params[:, :, :k].view(b, t, k, 1)
sigmas = params[:, :, k:].view(b, t, k)
values = self.mvalues[None, None, :].expand(b, t, k)
means = diags - self.mmult * F.softplus(means)
s = (t,)
means, sigmas = sparse.transform_means(means, s, method='clamp' if self.clamp else 'sigmoid'), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t,),
relative_range=(self.region, ), cuda=x.is_cuda)
indfl = indices.float()
vs = k * (2 + self.radditional + self.gadditional)
assert indices.size() == (b, t, vs, 1), f'{indices.size()}, {(b, t, vs, 1)}'
m = torch.arange(t, dtype=torch.long, device=d(indices))[None, :, None, None].expand(b, t, vs, k)
props = sparse.densities(indfl, means, sigmas).clone() # (b, t, vs, k)
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props[dups, :] = 0
props[indices > m] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
out = torch.arange(t, device=d(indices))[None, :, None, None].expand(b, t, vs, 1)
indices = torch.cat([out, indices], dim=3)
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
assert weights.size() == (b, t, vs), f'{weights.size()}, {(b, t, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, t, vs, 2).contiguous().view(b*h, t*vs, 2)
weights = weights[:, None, :, :].expand(b, h, t, vs).contiguous().view(b*h, t*vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*t*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*vs).contiguous().view(b*h*t*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*vs)
assert not util.contains_inf(dot), f'dot contains inf (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, weights * dot, s).exp()
else:
dot = sparse.simple_normalize(indices, weights * dot, s, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
assert not util.contains_inf(dot), f'dot contains inf (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class StridedSparseSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, stride=32, mask=False, min_sigma=0.05, sigma_scale=0.1,
mmult = 1.0, norm_method='softmax', clamp=True, **kwargs):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult, self.norm_method, self.clamp = mmult, norm_method, clamp
self.stride = stride
if clamp:
self.mmult *= 3.0
s = emb // heads
self.tokeys = nn.Linear(s, s, bias=False)
self.toqueries = nn.Linear(s, s, bias=False)
self.tovalues = nn.Linear(s, s, bias=False)
self.unifyheads = nn.Linear(s * heads, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(2 * emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 2) # one mean, one sigma
)
# -- input is the current token's embedding vector, the sum of preceding embedding vectors, and the coordinate.
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
r = self.stride
s = (t,)
# Generate input selection
selection = torch.arange(t//r, dtype=torch.long, device=d(x))
selection = (selection + 1) * r - 1
tp = selection.size(0)
# Generate coords
coords = torch.arange(tp, dtype=torch.float, device=d(x)) / tp
coords = coords[None, :, None,].expand(b, tp, 1)
summed = (x.cumsum(dim=1) - x) / torch.arange(start=1, end=t+1, device=d(), dtype=torch.float)[None, :, None]
input = torch.cat([x[:, selection, :], coords, summed[:, selection, :]], dim=2)
params = self.toparams(input) # (b, tp, k*2)
assert not util.contains_nan(params), \
f'params contain NaN\n input {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
assert not util.contains_inf(params), \
f'params contain inf\n input {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits/coordinates that correspond to the horizontal coordinate of the current word
diags = selection.to(torch.float)
if not self.clamp:
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, tp, k, 1)
means = params[:, :, :k].view(b, tp, k, 1)
sigmas = params[:, :, k:].view(b, tp, k)
values = self.mvalues[None, None, :].expand(b, tp, k) # all ones atm
means = diags - self.mmult * F.softplus(means)
means, sigmas = sparse.transform_means(means, s, method='clamp' if self.clamp else 'sigmoid'), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
r = self.stride
# Generate input selection (the fixed output indices, which are 'stride' units apart)
selection = torch.arange(t//r, dtype=torch.long, device=d(x))
selection = (selection + 1) * r - 1
tp = selection.size(0)
size = (t, t)
means, sigmas, mvalues = self.hyper(x)
s = e // h
x = x.view(b, t, h, s)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t,),
relative_range=(self.region, ), cuda=x.is_cuda, epsilon=10e-5)
indfl = indices.float()
vs = k * (2 + self.radditional + self.gadditional) # number of sampled integer index tuples
assert indices.size() == (b, tp, vs, 1), f'{indices.size()}, {(b, tp, vs, 1)}'
m = selection[None, :, None, None].expand(b, tp, vs, k)
props = sparse.densities(indfl, means, sigmas).clone() # (b, tp, vs, k)
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props[dups, :] = 0
props[indices > m] = 0 # mask out any forward connections
# -- note that while all the continuous index tuples are guaranteed to point backwards, the sampled discrete
# index tuples might point forward, so they still need to be zeroed out here.
props = props / props.sum(dim=2, keepdim=True) # normalize over all remaining points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
out = selection[None, :, None, None].expand(b, tp, vs, 1) # output indices
indices = torch.cat([out, indices], dim=3)
assert indices.size() == (b, tp, vs, 2), f'{indices.size()}, {(b, tp, vs, 2)}'
assert weights.size() == (b, tp, vs), f'{weights.size()}, {(b, tp, vs)}'
assert not util.contains_inf(weights), f'weights contains inf (before norm) {weights.min()}, {weights.mean()}, {weights.max()}'
assert not util.contains_nan(weights), f'weights contains nan (before norm) {weights.min()}, {weights.mean()}, {weights.max()}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, tp, vs, 2).contiguous().view(b*h, tp*vs, 2)
weights = weights[:, None, :, :].expand(b, h, tp, vs).contiguous().view(b*h, tp*vs)
# compute keys, queries, values
keys = self.tokeys(x) # note: t not tp, we compute _all_ queries, keys and values
queries = self.toqueries(x)
values = self.tovalues(x)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous() .view(b * h, t, s)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, s)
values = values.transpose(1, 2).contiguous() .view(b * h, t, s)
# -- We could actually select first, and _then_ transform to kqv's. May be better for very large contexts and
# small batches
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*tp*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, tp*vs).contiguous().view(b*h*tp*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,tp*vs)
dot_logits = dot.data.clone()
assert not util.contains_inf(dot), f'dot contains inf (before norm) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (before norm) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, weights * dot, size).exp()
else:
dot = sparse.simple_normalize(indices, weights * dot, size, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
assert not util.contains_inf(dot), f'dot contains inf (after norm) {dot.min()}, {dot.mean()}, {dot.max()}'
try:
assert not util.contains_nan(dot), f'dot contains nan (after norm) {dot.min()}, {dot.mean()}, {dot.max()}'
except AssertionError:
print(dot.sum(dim=1))
print('\n\n\n')
for i in range(b*h):
print(f'*** {i}')
print(indices[i])
print(dot_logits[i])
print((weights * dot_logits)[i])
print('\n\n\n')
sys.exit()
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=size, xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * s)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class ConvSelfAttention(nn.Module):
"""
Self-attention with a hardwired convolutional sparsity pattern. That is, each node depends on the k
nodes before.
Wiring is always "causal" (ie. layer only looks into the past).
Padding is addded to the input to ensure the input and output have the same length.
"""
def __init__(self, emb, heads=8, norm_method='softmax', k=32, **kwargs):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads = emb, heads
self.norm_method = norm_method
s = emb // heads
self.tokeys = nn.Linear(s, s, bias=False)
self.toqueries = nn.Linear(s, s, bias=False)
self.tovalues = nn.Linear(s, s, bias=False)
self.unifyheads = nn.Linear(s * heads, emb)
self.k = k
def forward(self, x):
b, t, e = x.size()
h, k = self.heads, self.k
s = e // h
x = x.view(b, t, h, s)
tp = t + k - 1
size = (t, tp)
xp = F.pad(x, [0, 0, 0, 0, k-1, 0, 0, 0]) # zero pad the beginning of x
assert xp.size() == (b, tp, h, s), f'{xp.size()} vs {(b, tp, h, s)}'
# compute keys, queries, values (note that the self attention matrix is slightly rectangular)
queries = self.toqueries(x)
keys = self.tokeys(xp)
values = self.tovalues(xp)
# - fold heads into the batch dimension
queries = queries.transpose(1, 2) .contiguous().view(b * h, t, s)
keys = keys.transpose(1, 2) .contiguous().view(b * h, tp, s)
values = values.transpose(1, 2) .contiguous().view(b * h, tp, s)
queries = queries / (e ** (1/4)) # shoudl this be s?
keys = keys / (e ** (1/4))
# Get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# generate the indices (t*k pairs of integers per attention head)
indices = torch.arange(t, dtype=torch.long, device=d(x))[:, None, None].expand(t, k, 2).contiguous()
deltas = torch.arange(k, dtype=torch.long, device=d(x))[None, :, None].expand(t, k, 1)
indices[:, :, 1:] += deltas
indices = indices[None, None, :, :, :].expand(b, h, t, k, 2).contiguous()
indflat = indices.view(b*h*t*k, 2)
# select the queries and the keys (left and right column of index matrix) and take their dot
# product (note that they are already scaled)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*k).contiguous().view(b*h*t*k)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*k)
indices = indices.view(b*h, t*k, 2)
# assert not util.contains_inf(dot), f'dot contains inf (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, dot, size).exp()
else:
dot = sparse.simple_normalize(indices, dot, size, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
# assert not util.contains_inf(dot), f'dot contains inf (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=size, xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * s)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class SelfAttention(nn.Module):
"""
Plain, dense self attention
"""
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, x):
b, t, e = x.size()
h = self.heads
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# compute scaled dot-product self-attention
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
# - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
# This should be more memory efficient
# - get dot product of queries and keys, and scale
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t), f'Matrix has size {dot.size()}, expected {(b*h, t, t)}.'
if self.mask: # mask out the lower half of the dot matrix,including the diagonal
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = F.softmax(dot, dim=2) # dot now has row-wise self-attention probabilities
assert not util.contains_nan(dot[:, 1:, :]) # only the forst row may contain nan
if self.mask == 'first':
dot = dot.clone()
dot[:, :1, :] = 0.0
# - The first row of the first attention matrix is entirely masked out, so the softmax operation results
# in a division by zero. We set this row to zero by hand to get rid of the NaNs
# apply the self attention to the values
out = torch.bmm(dot, values).view(b, h, t, e)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class TransformerBlock(nn.Module):
def __init__(self, emb, heads, mask, ff_hidden_mult=4, dropout=0.0, type='dense', oned=True, **kwargs):
super().__init__()
if type == 'sparse':
if mask:
if oned:
self.attention = ASH1DSelfAttention(emb, heads=heads, **kwargs)
else:
self.attention = ASH2DSelfAttention(emb, heads=heads, **kwargs)
else:
raise Exception('Not implemented yet')
elif type == 'strided':
self.attention = StridedSparseSelfAttention(emb, heads=heads, **kwargs)
elif type == 'conv':
self.attention = ConvSelfAttention(emb, heads, **kwargs)
elif type == 'dense':
self.attention = SelfAttention(emb, heads=heads, mask=mask)
elif type == 'mixed':
layers = []
for type in kwargs['mixture']:
if type == 'c':
layers.append(ConvSelfAttention(emb, heads, **kwargs))
elif type == 's':
strided = StridedSparseSelfAttention(emb, heads=heads, **kwargs)
layers.append(strided)
self.toplot = strided
else:
raise Exception(f'layer type {type} not recognized/')
self.attention = nn.Sequential(*layers)
else:
raise Exception('Not implemented yet')
self.norm1 = nn.LayerNorm(emb)
self.norm2 = nn.LayerNorm(emb)
self.ff = nn.Sequential(
nn.Linear(emb, ff_hidden_mult * emb),
nn.ReLU(),
nn.Linear(ff_hidden_mult * emb, emb)
)
self.do = nn.Dropout(dropout)
def forward(self, x):
b, t, e = x.size()
attended = self.attention(x)
x = self.norm1(attended + x)
x = self.do(x)
fedforward = self.ff(x)
x = self.norm2(fedforward + x)
x = self.do(x)
return x
class GTransformer(nn.Module):
"""
Transformer for generating text (character by character).
"""
def __init__(self, emb, heads, depth, seq_length, num_tokens, sparse=False, **kwargs):
"""
:param emb:
:param heads:
:param depth:
:param seq_length:
:param num_tokens:
:param sparse:
:param kwargs: Are passed to the sparse self attention
"""
super().__init__()
self.num_tokens = num_tokens
self.token_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=num_tokens)
self.pos_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=seq_length)
tblocks = []
for i in range(depth):
tblocks.append(
TransformerBlock(emb=emb, heads=heads, seq_length=seq_length, mask=True, sparse=sparse, **kwargs))
self.tblocks = nn.Sequential(*tblocks)
self.toprobs = nn.Linear(emb, num_tokens)
def forward(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
tokens = self.token_embedding(x)
b, t, e = tokens.size()
positions = self.pos_embedding(torch.arange(t, device=d(x)))[None, :, :].expand(b, t, e)
x = tokens + positions
x = self.tblocks(x)
x = self.toprobs(x.view(b*t, e)).view(b, t, self.num_tokens)
return F.log_softmax(x, dim=2)
def forward_for_plot(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
means, sigmas, values = [], [], []
tokens = self.token_embedding(x)
b, t, e = tokens.size()
positions = self.pos_embedding(torch.arange(t, device=d(x)))[None, :, :].expand(b, t, e)
x = tokens + positions
for tblock in self.tblocks:
if type(tblock.attention) is not nn.Sequential:
m, s, v = tblock.attention.hyper(x)
means.append(m)
sigmas.append(s)
values.append(v)
else:
xc = x.clone()
for layer in tblock.attention: # walk through the attention layers
if type(layer) == StridedSparseSelfAttention:
m, s, v = layer.hyper(xc)
means.append(m)
sigmas.append(s)
values.append(v)
xc = layer(xc)
x = tblock(x)
return means, sigmas, values
def enwik8(path, n_train=int(90e6), n_valid=int(5e6), n_test=int(5e6)):
"""
From https://github.com/openai/blocksparse/blob/master/examples/transformer/enwik8.py
:param path:
:param n_train:
:param n_valid:
:param n_test:
:return:
"""
X = np.fromstring(open(path).read(n_train + n_valid + n_test), dtype=np.uint8)
trX, vaX, teX = np.split(X, [n_train, n_train + n_valid])
return torch.from_numpy(trX), torch.from_numpy(vaX), torch.from_numpy(teX)
def go(arg):
util.makedirs('./transformer-plots/')
if arg.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
else:
torch.manual_seed(arg.seed)
dv = 'cuda' if arg.cuda else 'cpu'
tbw = SummaryWriter(log_dir=arg.tb_dir)
# load the data
data_train, data_val, data_test = enwik8(arg.data)
data_test = data_test if arg.final else data_val
# create the model
if arg.model.startswith('sparse'):
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, sparse=True, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
oned=(arg.model == 'sparse1d'), norm_method=arg.norm_method, clamp=arg.clamp)
elif arg.model == 'strided':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
norm_method=arg.norm_method, clamp=arg.clamp, stride=arg.stride, type='strided')
elif arg.model == 'conv':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context, k=arg.kconv,
num_tokens=NUM_TOKENS, type='conv', norm_method=arg.norm_method)
elif arg.model == 'dense':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS)
elif arg.model == 'mixed':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
norm_method=arg.norm_method, clamp=arg.clamp, stride=arg.stride, type='mixed',
kconv=arg.kconv, mixture=arg.mixture)
else:
raise Exception(f'Model name unknown: {arg.model}')
if arg.cuda:
model.cuda()
opt = torch.optim.Adam(lr=arg.lr, params=model.parameters())
# training loop
for i in tqdm.trange(arg.num_batches):
if arg.lr_warmup > 0 and i < arg.lr_warmup:
lr = max( (arg.lr / arg.lr_warmup) * i, 1e-10)
opt.lr = lr
opt.zero_grad()
# sample batches
starts = torch.randint(size=(arg.batch_size, ), low=0, high=data_train.size(0) - arg.context - 1)
seqs_source = [data_train[start :start+arg.context ] for start in starts]
seqs_target = [data_train[start+1:start+arg.context+1] for start in starts]
source = torch.cat([s[None, :] for s in seqs_source ], dim=0).to(torch.long)
target = torch.cat([s[None, :] for s in seqs_target ], dim=0).to(torch.long)
if arg.cuda:
source, target = source.cuda(), target.cuda()
source, target = Variable(source), Variable(target)
output = model(source)
loss = F.nll_loss(output.transpose(2, 1), target, reduction='none')
loss = loss.mean()
tbw.add_scalar('transformer/train-loss', float(loss.item()) * LOG2E, i * arg.batch_size)
assert loss.item() == loss.item(), f'Loss is nan {loss}'
loss.backward()
assert not util.contains_nan(model.parameters()), f'Parameters have become NaN {model.parameters()}'
if arg.cuda and (i == 0 or random.random() < 0.0005): # occasionally print peak GPU memory usage
print(f'\nPeak gpu memory use is {torch.cuda.max_memory_cached() / 1e9:.2} Gb')
# clip gradients
if arg.gradient_clipping is not None:
nn.utils.clip_grad_norm_(model.parameters(), arg.gradient_clipping)
opt.step()
if (arg.model.startswith('sparse') or arg.model == 'strided' or arg.model == 'mixed') and arg.plot_every > 0 and i % arg.plot_every == 0:
shape = (arg.context, arg.context)
means, sigmas, values = model.forward_for_plot(source)
for t, (m, s, v) in enumerate(zip(means, sigmas, values)):
b, c, k, r = m.size()
m = m.view(b, c*k, r)
s = s.view(b, c*k, r)
v = v.reshape(b, c*k)
plt.figure(figsize=(7, 7))
plt.cla()
if arg.model == 'sparse1d':
ind = torch.arange(c, dtype=torch.float, device=d(m))[None, :, None].expand(b, c, k).reshape(b, c*k, 1)
m = torch.cat([ind, m], dim=2)
util.plot1d(m[0].data, s[0].data, v[0].data, shape=shape)
elif arg.model == 'strided' or arg.model == 'mixed':
r = arg.stride
ind = torch.arange(c, dtype=torch.float, device=d(m))
ind = (ind + 1) * r - 1
ind = ind[None, :, None].expand(b, c, k).reshape(b, c*k, 1)
m = torch.cat([ind, m], dim=2)
util.plot1d(m[0].data, s[0].data, v[0].data, shape=shape)
else:
util.plot(m, s, v, shape=shape)
plt.xlim((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))
plt.ylim((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))
plt.savefig(f'./transformer-plots/means{i:06}.{t}.pdf')
if i != 0 and (i % arg.test_every == 0 or i == arg.num_batches - 1):
upto = data_test.size(0) if i == arg.num_batches - 1 else arg.test_subset
data_sub = data_test[:upto]
with torch.no_grad():
bits, tot = 0.0, 0
batch = []
for current in range(data_sub.size(0)):
fr = max(0, current - arg.context)
to = current + 1
context = data_sub[fr:to].to(torch.long)
if context.size(0) < arg.context + 1:
pad = torch.zeros(size=(arg.context + 1 - context.size(0),), dtype=torch.long)
context = torch.cat([pad, context], dim=0)
assert context.size(0) == arg.context + 1
if arg.cuda:
context = context.cuda()
batch.append(context[None, :])
if len(batch) == arg.test_batchsize or current == data_sub.size(0) - 1:
b = len(batch)
tot += b
all = torch.cat(batch, dim=0)
source = all[:, :-1]
target = all[:, -1]
output = model(source)
lnprobs = output[torch.arange(b, device=dv), -1, target]
log2probs = lnprobs * LOG2E
bits += - log2probs.sum()
batch = []
assert tot == data_sub.size(0)
bits_per_byte = bits / data_sub.size(0)
print(f'epoch{i}: {bits_per_byte:.4} bits per byte')
# print(f'epoch{i}: {bits:.4} total bits')
tbw.add_scalar(f'transformer/eval-loss', bits_per_byte, i * arg.batch_size)
# Generate from seed
GENSIZE = 600
TEMP = 0.5
seedfr = random.randint(0, data_test.size(0) - arg.context)
input = data_test[seedfr:seedfr + arg.context].to(torch.long)
if arg.cuda:
input = input.cuda()
input = Variable(input)
print('[', end='', flush=True)
for c in input:
print(str(chr(c)), end='', flush=True)
print(']', end='', flush=True)
for _ in range(GENSIZE):
output = model(input[None, :])
c = sample(output[0, -1, :], TEMP)
print(str(chr(max(32, c))), end='', flush=True)
input = torch.cat([input[1:], c[None]], dim=0)
print()
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-N", "--num-batches",
dest="num_batches",
help="Number of batches to train on. Each batch contains randomly sampled subsequences of the data.",
default=1_000_000, type=int)
parser.add_argument("-m", "--model",
dest="model",
help="Which model to train (dense, sparse1d, sparse2d, conv, mixed).",
default='dense', type=str)
parser.add_argument("--mixture",
dest="mixture",
help="Character string describing the sequence of convotlutions (c) and strided attentions (s).",
default='cccs', type=str)
parser.add_argument("--norm",
dest="norm_method",
help="How to normalize the attention matrix (softmax, softplus, abs).",
default='softmax', type=str)
parser.add_argument("-b", "--batch-size",
dest="batch_size",
help="The batch size.",
default=64, type=int)
parser.add_argument("-k", "--num-points",
dest="k",
help="Number of index tuples per output in the sparse transformer.",
default=32, type=int)
parser.add_argument("--k-conv",
dest="kconv",
help="Convolution kernel size.",
default=3, type=int)
parser.add_argument("-a", "--gadditional",
dest="gadditional",
help="Number of additional points sampled globally",
default=8, type=int)
parser.add_argument("-A", "--radditional",
dest="radditional",
help="Number of additional points sampled locally",
default=8, type=int)
parser.add_argument("-R", "--region",
dest="region",
help="Size of the (square) region to use for local sampling.",
default=8, type=int)
parser.add_argument("-c", "--cuda", dest="cuda",
help="Whether to use cuda.",
action="store_true")
parser.add_argument("-D", "--data", dest="data",
help="Data file",
default=None)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.0001, type=float)
parser.add_argument("-S", "--sigma-mult",
dest="sigma_mult",
help="Sigma multiplier.",
default=0.1, type=float)
parser.add_argument("-M", "--min-sigma",
dest="min_sigma",
help="Minimum value of sigma.",
default=0.01, type=float)
parser.add_argument("-T", "--tb_dir", dest="tb_dir",
help="Data directory",
default=None)
parser.add_argument("-f", "--final", dest="final",
help="Whether to run on the real test set (if not included, the validation set is used).",
action="store_true")
parser.add_argument("-E", "--embedding", dest="embedding_size",
help="Size of the character embeddings.",
default=70, type=int)
parser.add_argument("-H", "--heads", dest="num_heads",
help="Number of attention heads.",
default=8, type=int)
parser.add_argument("-C", "--context", dest="context",
help="Length of the sequences extracted from the corpus (and the context used during inference).",
default=300, type=int)
parser.add_argument("-d", "--depth", dest="depth",
help="Depth of the network (nr of self-attention layers)",
default=4, type=int)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random",
default=1, type=int)
parser.add_argument("--stride",
dest="stride",
help="Stride length for the strided self attention",
default=32, type=int)
parser.add_argument("--test-every",
dest="test_every",
help="How many batches between tests.",
default=1000, type=int)
parser.add_argument("--plot-every",
dest="plot_every",
help="How many batches between plotting the sparse indices.",
default=1000, type=int)
parser.add_argument("--test-subset",
dest="test_subset",
help="A subset for the validation tests.",
default=100000, type=int)
parser.add_argument("--test-batchsize",
dest="test_batchsize",
help="Batch size for computing the validation loss.",
default=1024, type=int)
parser.add_argument("--gradient-clipping",
dest="gradient_clipping",
help="Gradient clipping.",
default=1.0, type=float)
parser.add_argument("--lr-warmup",
dest="lr_warmup",
help="Learning rate warmup.",
default=5000, type=int)
parser.add_argument("--clamp", dest="clamp",
help="Use the clamp operation to fit the parameters to the space of index tuples.",
action="store_true")
options = parser.parse_args()
print('OPTIONS ', options)
go(options)
|
[
"torch.nn.Dropout",
"torch.distributions.Categorical",
"argparse.ArgumentParser",
"sparse.util.inv",
"torch.bmm",
"torch.nn.Embedding",
"torch.cat",
"torch.randn",
"matplotlib.pyplot.figure",
"sparse.util.plot1d",
"torch.arange",
"torch.no_grad",
"torch.nn.functional.pad",
"sparse.util.plot",
"torch.ones",
"_context.sparse.batchmm",
"random.randint",
"torch.triu_indices",
"torch.nn.LayerNorm",
"torch.cuda.max_memory_cached",
"matplotlib.pyplot.cla",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"torch.nn.functional.log_softmax",
"sparse.util.contains_nan",
"_context.sparse.logsoftmax",
"util.d",
"_context.sparse.transform_sigmas",
"tqdm.trange",
"matplotlib.pyplot.ylim",
"torch.manual_seed",
"sparse.util.makedirs",
"torch.autograd.Variable",
"random.random",
"matplotlib.use",
"math.log2",
"sys.exit",
"torch.from_numpy",
"matplotlib.pyplot.xlim",
"_context.sparse.transform_means",
"torch.nn.ReLU",
"_context.sparse.ngenerate",
"sparse.util.flip",
"torch.nn.Sequential",
"_context.sparse.simple_normalize",
"torch.nn.functional.softmax",
"numpy.split",
"sparse.util.nduplicates",
"sparse.util.contains_inf",
"torch.nn.functional.softplus",
"matplotlib.pyplot.savefig",
"_context.sparse.densities"
] |
[((355, 369), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (362, 369), True, 'import matplotlib as mpl\n'), ((635, 652), 'math.log2', 'math.log2', (['math.e'], {}), '(math.e)\n', (644, 652), False, 'import random, tqdm, sys, math\n'), ((774, 813), 'torch.nn.functional.softmax', 'F.softmax', (['(lnprobs / temperature)'], {'dim': '(0)'}), '(lnprobs / temperature, dim=0)\n', (783, 813), True, 'import torch.nn.functional as F\n'), ((823, 842), 'torch.distributions.Categorical', 'dist.Categorical', (['p'], {}), '(p)\n', (839, 842), True, 'import torch.distributions as dist\n'), ((1150, 1208), 'torch.triu_indices', 'torch.triu_indices', (['h', 'w'], {'offset': '(0 if mask_diagonal else 1)'}), '(h, w, offset=0 if mask_diagonal else 1)\n', (1168, 1208), False, 'import torch\n'), ((41273, 41314), 'numpy.split', 'np.split', (['X', '[n_train, n_train + n_valid]'], {}), '(X, [n_train, n_train + n_valid])\n', (41281, 41314), True, 'import numpy as np\n'), ((41413, 41450), 'sparse.util.makedirs', 'util.makedirs', (['"""./transformer-plots/"""'], {}), "('./transformer-plots/')\n", (41426, 41450), False, 'from sparse import util\n'), ((41649, 41682), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'arg.tb_dir'}), '(log_dir=arg.tb_dir)\n', (41662, 41682), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((44009, 44037), 'tqdm.trange', 'tqdm.trange', (['arg.num_batches'], {}), '(arg.num_batches)\n', (44020, 44037), False, 'import random, tqdm, sys, math\n'), ((50009, 50025), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (50023, 50025), False, 'from argparse import ArgumentParser\n'), ((1891, 1930), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (1900, 1930), False, 'from torch import nn\n'), ((1956, 1995), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (1965, 1995), False, 'from torch import nn\n'), ((2020, 2059), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (2029, 2059), False, 'from torch import nn\n'), ((2087, 2114), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (2096, 2114), False, 'from torch import nn\n'), ((3416, 3550), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t, t)', 'relative_range': '(self.region, self.region)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t, t),\n relative_range=(self.region, self.region), cuda=x.is_cuda)\n', (3432, 3550), False, 'from _context import sparse\n'), ((3600, 3618), 'sparse.util.flip', 'util.flip', (['indices'], {}), '(indices)\n', (3609, 3618), False, 'from sparse import util\n'), ((5910, 5954), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (5927, 5954), False, 'from _context import sparse\n'), ((6081, 6138), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (6095, 6138), False, 'from _context import sparse\n'), ((7084, 7123), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7093, 7123), False, 'from torch import nn\n'), ((7149, 7188), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7158, 7188), False, 'from torch import nn\n'), ((7213, 7252), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7222, 7252), False, 'from torch import nn\n'), ((7280, 7307), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (7289, 7307), False, 'from torch import nn\n'), ((8015, 8044), 'torch.cat', 'torch.cat', (['[x, coords]'], {'dim': '(2)'}), '([x, coords], dim=2)\n', (8024, 8044), False, 'import torch\n'), ((8414, 8435), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (8422, 8435), False, 'from sparse import util\n'), ((8724, 8740), 'sparse.util.flip', 'util.flip', (['means'], {}), '(means)\n', (8733, 8740), False, 'from sparse import util\n'), ((9410, 9544), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t, t)', 'relative_range': '(self.region, self.region)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t, t),\n relative_range=(self.region, self.region), cuda=x.is_cuda)\n', (9426, 9544), False, 'from _context import sparse\n'), ((9595, 9613), 'sparse.util.flip', 'util.flip', (['indices'], {}), '(indices)\n', (9604, 9613), False, 'from sparse import util\n'), ((12473, 12530), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (12487, 12530), False, 'from _context import sparse\n'), ((13848, 13887), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13857, 13887), False, 'from torch import nn\n'), ((13913, 13952), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13922, 13952), False, 'from torch import nn\n'), ((13977, 14016), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13986, 14016), False, 'from torch import nn\n'), ((14044, 14071), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (14053, 14071), False, 'from torch import nn\n'), ((14832, 14861), 'torch.cat', 'torch.cat', (['[x, coords]'], {'dim': '(2)'}), '([x, coords], dim=2)\n', (14841, 14861), False, 'import torch\n'), ((16190, 16310), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t,)', 'relative_range': '(self.region,)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t,),\n relative_range=(self.region,), cuda=x.is_cuda)\n', (16206, 16310), False, 'from _context import sparse\n'), ((17389, 17421), 'torch.cat', 'torch.cat', (['[out, indices]'], {'dim': '(3)'}), '([out, indices], dim=3)\n', (17398, 17421), False, 'import torch\n'), ((19750, 19807), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (19764, 19807), False, 'from _context import sparse\n'), ((21206, 21233), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21215, 21233), False, 'from torch import nn\n'), ((21259, 21286), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21268, 21286), False, 'from torch import nn\n'), ((21312, 21339), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21321, 21339), False, 'from torch import nn\n'), ((21367, 21392), 'torch.nn.Linear', 'nn.Linear', (['(s * heads)', 'emb'], {}), '(s * heads, emb)\n', (21376, 21392), False, 'from torch import nn\n'), ((22567, 22638), 'torch.cat', 'torch.cat', (['[x[:, selection, :], coords, summed[:, selection, :]]'], {'dim': '(2)'}), '([x[:, selection, :], coords, summed[:, selection, :]], dim=2)\n', (22576, 22638), False, 'import torch\n'), ((24329, 24465), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t,)', 'relative_range': '(self.region,)', 'cuda': 'x.is_cuda', 'epsilon': '(0.0001)'}), '(means, self.gadditional, self.radditional, rng=(t,),\n relative_range=(self.region,), cuda=x.is_cuda, epsilon=0.0001)\n', (24345, 24465), False, 'from _context import sparse\n'), ((25789, 25821), 'torch.cat', 'torch.cat', (['[out, indices]'], {'dim': '(3)'}), '([out, indices], dim=3)\n', (25798, 25821), False, 'import torch\n'), ((28985, 29040), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': 'size', 'xmatrix': 'values'}), '(indices, dot, size=size, xmatrix=values)\n', (28999, 29040), False, 'from _context import sparse\n'), ((30220, 30247), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30229, 30247), False, 'from torch import nn\n'), ((30273, 30300), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30282, 30300), False, 'from torch import nn\n'), ((30326, 30353), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30335, 30353), False, 'from torch import nn\n'), ((30381, 30406), 'torch.nn.Linear', 'nn.Linear', (['(s * heads)', 'emb'], {}), '(s * heads, emb)\n', (30390, 30406), False, 'from torch import nn\n'), ((30629, 30667), 'torch.nn.functional.pad', 'F.pad', (['x', '[0, 0, 0, 0, k - 1, 0, 0, 0]'], {}), '(x, [0, 0, 0, 0, k - 1, 0, 0, 0])\n', (30634, 30667), True, 'import torch.nn.functional as F\n'), ((33286, 33341), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': 'size', 'xmatrix': 'values'}), '(indices, dot, size=size, xmatrix=values)\n', (33300, 33341), False, 'from _context import sparse\n'), ((33958, 33997), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (33967, 33997), False, 'from torch import nn\n'), ((34023, 34062), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (34032, 34062), False, 'from torch import nn\n'), ((34087, 34126), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (34096, 34126), False, 'from torch import nn\n'), ((34154, 34181), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (34163, 34181), False, 'from torch import nn\n'), ((35448, 35469), 'torch.nn.functional.softmax', 'F.softmax', (['dot'], {'dim': '(2)'}), '(dot, dim=2)\n', (35457, 35469), True, 'import torch.nn.functional as F\n'), ((37637, 37654), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['emb'], {}), '(emb)\n', (37649, 37654), False, 'from torch import nn\n'), ((37676, 37693), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['emb'], {}), '(emb)\n', (37688, 37693), False, 'from torch import nn\n'), ((37879, 37898), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (37889, 37898), False, 'from torch import nn\n'), ((38695, 38753), 'torch.nn.Embedding', 'nn.Embedding', ([], {'embedding_dim': 'emb', 'num_embeddings': 'num_tokens'}), '(embedding_dim=emb, num_embeddings=num_tokens)\n', (38707, 38753), False, 'from torch import nn\n'), ((38783, 38841), 'torch.nn.Embedding', 'nn.Embedding', ([], {'embedding_dim': 'emb', 'num_embeddings': 'seq_length'}), '(embedding_dim=emb, num_embeddings=seq_length)\n', (38795, 38841), False, 'from torch import nn\n'), ((39062, 39085), 'torch.nn.Sequential', 'nn.Sequential', (['*tblocks'], {}), '(*tblocks)\n', (39075, 39085), False, 'from torch import nn\n'), ((39110, 39136), 'torch.nn.Linear', 'nn.Linear', (['emb', 'num_tokens'], {}), '(emb, num_tokens)\n', (39119, 39136), False, 'from torch import nn\n'), ((39680, 39703), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (39693, 39703), True, 'import torch.nn.functional as F\n'), ((41326, 41347), 'torch.from_numpy', 'torch.from_numpy', (['trX'], {}), '(trX)\n', (41342, 41347), False, 'import torch\n'), ((41349, 41370), 'torch.from_numpy', 'torch.from_numpy', (['vaX'], {}), '(vaX)\n', (41365, 41370), False, 'import torch\n'), ((41372, 41393), 'torch.from_numpy', 'torch.from_numpy', (['teX'], {}), '(teX)\n', (41388, 41393), False, 'import torch\n'), ((41488, 41514), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (41502, 41514), False, 'import random, tqdm, sys, math\n'), ((41570, 41597), 'torch.manual_seed', 'torch.manual_seed', (['arg.seed'], {}), '(arg.seed)\n', (41587, 41597), False, 'import torch\n'), ((2278, 2297), 'torch.randn', 'torch.randn', (['(k, 2)'], {}), '((k, 2))\n', (2289, 2297), False, 'import torch\n'), ((2334, 2351), 'torch.randn', 'torch.randn', (['(k,)'], {}), '((k,))\n', (2345, 2351), False, 'import torch\n'), ((2394, 2410), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (2404, 2410), False, 'import torch\n'), ((2895, 2927), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {}), '(means, s)\n', (2917, 2927), False, 'from _context import sparse\n'), ((7476, 7492), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (7486, 7492), False, 'import torch\n'), ((7632, 7658), 'torch.nn.Linear', 'nn.Linear', (['(emb + 1)', 'hidden'], {}), '(emb + 1, hidden)\n', (7641, 7658), False, 'from torch import nn\n'), ((7660, 7669), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7667, 7669), False, 'from torch import nn\n'), ((7683, 7707), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 3)'], {}), '(hidden, k * 3)\n', (7692, 7707), False, 'from torch import nn\n'), ((8117, 8142), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (8134, 8142), False, 'from sparse import util\n'), ((8889, 8921), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {}), '(means, s)\n', (8911, 8921), False, 'from _context import sparse\n'), ((11991, 12013), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (12008, 12013), False, 'from sparse import util\n'), ((12310, 12332), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (12327, 12332), False, 'from sparse import util\n'), ((12690, 12712), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (12707, 12712), False, 'from sparse import util\n'), ((14240, 14256), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (14250, 14256), False, 'import torch\n'), ((14396, 14422), 'torch.nn.Linear', 'nn.Linear', (['(emb + 1)', 'hidden'], {}), '(emb + 1, hidden)\n', (14405, 14422), False, 'from torch import nn\n'), ((14424, 14433), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14431, 14433), False, 'from torch import nn\n'), ((14447, 14471), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 2)'], {}), '(hidden, k * 2)\n', (14456, 14471), False, 'from torch import nn\n'), ((14934, 14959), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (14951, 14959), False, 'from sparse import util\n'), ((15280, 15301), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (15288, 15301), False, 'from sparse import util\n'), ((15624, 15701), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {'method': "('clamp' if self.clamp else 'sigmoid')"}), "(means, s, method='clamp' if self.clamp else 'sigmoid')\n", (15646, 15701), False, 'from _context import sparse\n'), ((18948, 18970), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (18965, 18970), False, 'from sparse import util\n'), ((19067, 19089), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (19084, 19089), False, 'from sparse import util\n'), ((19311, 19386), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', '(weights * dot)', 's'], {'method': 'self.norm_method'}), '(indices, weights * dot, s, method=self.norm_method)\n', (19334, 19386), False, 'from _context import sparse\n'), ((19469, 19491), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (19486, 19491), False, 'from sparse import util\n'), ((19587, 19609), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (19604, 19609), False, 'from sparse import util\n'), ((19967, 19989), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (19984, 19989), False, 'from sparse import util\n'), ((21561, 21577), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (21571, 21577), False, 'import torch\n'), ((21717, 21747), 'torch.nn.Linear', 'nn.Linear', (['(2 * emb + 1)', 'hidden'], {}), '(2 * emb + 1, hidden)\n', (21726, 21747), False, 'from torch import nn\n'), ((21749, 21758), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (21756, 21758), False, 'from torch import nn\n'), ((21772, 21796), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 2)'], {}), '(hidden, k * 2)\n', (21781, 21796), False, 'from torch import nn\n'), ((22712, 22737), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (22729, 22737), False, 'from sparse import util\n'), ((22869, 22894), 'sparse.util.contains_inf', 'util.contains_inf', (['params'], {}), '(params)\n', (22886, 22894), False, 'from sparse import util\n'), ((23204, 23225), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (23212, 23225), False, 'from sparse import util\n'), ((23550, 23627), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {'method': "('clamp' if self.clamp else 'sigmoid')"}), "(means, s, method='clamp' if self.clamp else 'sigmoid')\n", (23572, 23627), False, 'from _context import sparse\n'), ((26012, 26038), 'sparse.util.contains_inf', 'util.contains_inf', (['weights'], {}), '(weights)\n', (26029, 26038), False, 'from sparse import util\n'), ((26148, 26174), 'sparse.util.contains_nan', 'util.contains_nan', (['weights'], {}), '(weights)\n', (26165, 26174), False, 'from sparse import util\n'), ((27832, 27854), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (27849, 27854), False, 'from sparse import util\n'), ((27948, 27970), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (27965, 27970), False, 'from sparse import util\n'), ((28192, 28270), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', '(weights * dot)', 'size'], {'method': 'self.norm_method'}), '(indices, weights * dot, size, method=self.norm_method)\n', (28215, 28270), False, 'from _context import sparse\n'), ((28353, 28375), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (28370, 28375), False, 'from sparse import util\n'), ((29200, 29222), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (29217, 29222), False, 'from sparse import util\n'), ((32850, 32918), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', 'dot', 'size'], {'method': 'self.norm_method'}), '(indices, dot, size, method=self.norm_method)\n', (32873, 32918), False, 'from _context import sparse\n'), ((33501, 33523), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (33518, 33523), False, 'from sparse import util\n'), ((35542, 35574), 'sparse.util.contains_nan', 'util.contains_nan', (['dot[:, 1:, :]'], {}), '(dot[:, 1:, :])\n', (35559, 35574), False, 'from sparse import util\n'), ((37740, 37776), 'torch.nn.Linear', 'nn.Linear', (['emb', '(ff_hidden_mult * emb)'], {}), '(emb, ff_hidden_mult * emb)\n', (37749, 37776), False, 'from torch import nn\n'), ((37790, 37799), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (37797, 37799), False, 'from torch import nn\n'), ((37813, 37849), 'torch.nn.Linear', 'nn.Linear', (['(ff_hidden_mult * emb)', 'emb'], {}), '(ff_hidden_mult * emb, emb)\n', (37822, 37849), False, 'from torch import nn\n'), ((44777, 44793), 'torch.autograd.Variable', 'Variable', (['source'], {}), '(source)\n', (44785, 44793), False, 'from torch.autograd import Variable\n'), ((44795, 44811), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (44803, 44811), False, 'from torch.autograd import Variable\n'), ((2955, 3015), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (2978, 3015), False, 'from _context import sparse\n'), ((3850, 3875), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (3866, 3875), False, 'from sparse import util\n'), ((3986, 4024), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (4002, 4024), False, 'from _context import sparse\n'), ((5831, 5881), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (5840, 5881), False, 'import torch\n'), ((8392, 8396), 'util.d', 'd', (['x'], {}), '(x)\n', (8393, 8396), False, 'from util import d\n'), ((8949, 9009), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (8972, 9009), False, 'from _context import sparse\n'), ((9845, 9870), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (9861, 9870), False, 'from sparse import util\n'), ((9981, 10019), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (9997, 10019), False, 'from _context import sparse\n'), ((11836, 11886), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (11845, 11886), False, 'import torch\n'), ((12177, 12221), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (12194, 12221), False, 'from _context import sparse\n'), ((15227, 15231), 'util.d', 'd', (['x'], {}), '(x)\n', (15228, 15231), False, 'from util import d\n'), ((15564, 15581), 'torch.nn.functional.softplus', 'F.softplus', (['means'], {}), '(means)\n', (15574, 15581), True, 'import torch.nn.functional as F\n'), ((15729, 15789), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (15752, 15789), False, 'from _context import sparse\n'), ((16644, 16682), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (16660, 16682), False, 'from _context import sparse\n'), ((16760, 16785), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (16776, 16785), False, 'from sparse import util\n'), ((18862, 18912), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (18871, 18912), False, 'import torch\n'), ((22195, 22199), 'util.d', 'd', (['x'], {}), '(x)\n', (22196, 22199), False, 'from util import d\n'), ((23507, 23524), 'torch.nn.functional.softplus', 'F.softplus', (['means'], {}), '(means)\n', (23517, 23524), True, 'import torch.nn.functional as F\n'), ((23655, 23715), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (23678, 23715), False, 'from _context import sparse\n'), ((24063, 24067), 'util.d', 'd', (['x'], {}), '(x)\n', (24064, 24067), False, 'from util import d\n'), ((24799, 24837), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (24815, 24837), False, 'from _context import sparse\n'), ((24916, 24941), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (24932, 24941), False, 'from sparse import util\n'), ((27707, 27757), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (27716, 27757), False, 'import torch\n'), ((28486, 28508), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (28503, 28508), False, 'from sparse import util\n'), ((28910, 28920), 'sys.exit', 'sys.exit', ([], {}), '()\n', (28918, 28920), False, 'import random, tqdm, sys, math\n'), ((32361, 32411), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (32370, 32411), False, 'import torch\n'), ((35983, 36005), 'torch.bmm', 'torch.bmm', (['dot', 'values'], {}), '(dot, values)\n', (35992, 36005), False, 'import torch\n'), ((44518, 44569), 'torch.cat', 'torch.cat', (['[s[None, :] for s in seqs_source]'], {'dim': '(0)'}), '([s[None, :] for s in seqs_source], dim=0)\n', (44527, 44569), False, 'import torch\n'), ((44603, 44654), 'torch.cat', 'torch.cat', (['[s[None, :] for s in seqs_target]'], {'dim': '(0)'}), '([s[None, :] for s in seqs_target], dim=0)\n', (44612, 44654), False, 'import torch\n'), ((46119, 46145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (46129, 46145), True, 'import matplotlib.pyplot as plt\n'), ((46162, 46171), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (46169, 46171), True, 'import matplotlib.pyplot as plt\n'), ((46993, 47062), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))\n', (47001, 47062), True, 'import matplotlib.pyplot as plt\n'), ((47079, 47148), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))\n', (47087, 47148), True, 'import matplotlib.pyplot as plt\n'), ((47166, 47221), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./transformer-plots/means{i:06}.{t}.pdf"""'], {}), "(f'./transformer-plots/means{i:06}.{t}.pdf')\n", (47177, 47221), True, 'import matplotlib.pyplot as plt\n'), ((47445, 47460), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (47458, 47460), False, 'import torch\n'), ((49418, 49433), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (49426, 49433), False, 'from torch.autograd import Variable\n'), ((7932, 7936), 'util.d', 'd', (['x'], {}), '(x)\n', (7933, 7936), False, 'from util import d\n'), ((14749, 14753), 'util.d', 'd', (['x'], {}), '(x)\n', (14750, 14753), False, 'from util import d\n'), ((19228, 19272), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (19245, 19272), False, 'from _context import sparse\n'), ((22363, 22367), 'util.d', 'd', (['x'], {}), '(x)\n', (22364, 22367), False, 'from util import d\n'), ((28106, 28153), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 'size'], {}), '(indices, weights * dot, size)\n', (28123, 28153), False, 'from _context import sparse\n'), ((32774, 32811), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', 'dot', 'size'], {}), '(indices, dot, size)\n', (32791, 32811), False, 'from _context import sparse\n'), ((45282, 45297), 'random.random', 'random.random', ([], {}), '()\n', (45295, 45297), False, 'import random, tqdm, sys, math\n'), ((46365, 46391), 'torch.cat', 'torch.cat', (['[ind, m]'], {'dim': '(2)'}), '([ind, m], dim=2)\n', (46374, 46391), False, 'import torch\n'), ((46412, 46469), 'sparse.util.plot1d', 'util.plot1d', (['m[0].data', 's[0].data', 'v[0].data'], {'shape': 'shape'}), '(m[0].data, s[0].data, v[0].data, shape=shape)\n', (46423, 46469), False, 'from sparse import util\n'), ((49865, 49903), 'torch.cat', 'torch.cat', (['[input[1:], c[None]]'], {'dim': '(0)'}), '([input[1:], c[None]], dim=0)\n', (49874, 49903), False, 'import torch\n'), ((22511, 22514), 'util.d', 'd', ([], {}), '()\n', (22512, 22514), False, 'from util import d\n'), ((46796, 46822), 'torch.cat', 'torch.cat', (['[ind, m]'], {'dim': '(2)'}), '([ind, m], dim=2)\n', (46805, 46822), False, 'import torch\n'), ((46843, 46900), 'sparse.util.plot1d', 'util.plot1d', (['m[0].data', 's[0].data', 'v[0].data'], {'shape': 'shape'}), '(m[0].data, s[0].data, v[0].data, shape=shape)\n', (46854, 46900), False, 'from sparse import util\n'), ((46944, 46975), 'sparse.util.plot', 'util.plot', (['m', 's', 'v'], {'shape': 'shape'}), '(m, s, v, shape=shape)\n', (46953, 46975), False, 'from sparse import util\n'), ((47931, 47963), 'torch.cat', 'torch.cat', (['[pad, context]'], {'dim': '(0)'}), '([pad, context], dim=0)\n', (47940, 47963), False, 'import torch\n'), ((48364, 48387), 'torch.cat', 'torch.cat', (['batch'], {'dim': '(0)'}), '(batch, dim=0)\n', (48373, 48387), False, 'import torch\n'), ((16574, 16584), 'util.d', 'd', (['indices'], {}), '(indices)\n', (16575, 16584), False, 'from util import d\n'), ((17318, 17328), 'util.d', 'd', (['indices'], {}), '(indices)\n', (17319, 17328), False, 'from util import d\n'), ((31778, 31782), 'util.d', 'd', (['x'], {}), '(x)\n', (31779, 31782), False, 'from util import d\n'), ((37526, 37548), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (37539, 37548), False, 'from torch import nn\n'), ((45398, 45428), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (45426, 45428), False, 'import torch\n'), ((39499, 39503), 'util.d', 'd', (['x'], {}), '(x)\n', (39500, 39503), False, 'from util import d\n'), ((40119, 40123), 'util.d', 'd', (['x'], {}), '(x)\n', (40120, 40123), False, 'from util import d\n'), ((46642, 46646), 'util.d', 'd', (['m'], {}), '(m)\n', (46643, 46646), False, 'from util import d\n'), ((48567, 48593), 'torch.arange', 'torch.arange', (['b'], {'device': 'dv'}), '(b, device=dv)\n', (48579, 48593), False, 'import torch\n'), ((31669, 31673), 'util.d', 'd', (['x'], {}), '(x)\n', (31670, 31673), False, 'from util import d\n'), ((5661, 5665), 'util.d', 'd', (['x'], {}), '(x)\n', (5662, 5665), False, 'from util import d\n'), ((11662, 11666), 'util.d', 'd', (['x'], {}), '(x)\n', (11663, 11666), False, 'from util import d\n'), ((18688, 18692), 'util.d', 'd', (['x'], {}), '(x)\n', (18689, 18692), False, 'from util import d\n'), ((27531, 27535), 'util.d', 'd', (['x'], {}), '(x)\n', (27532, 27535), False, 'from util import d\n'), ((32189, 32193), 'util.d', 'd', (['x'], {}), '(x)\n', (32190, 32193), False, 'from util import d\n'), ((46285, 46289), 'util.d', 'd', (['m'], {}), '(m)\n', (46286, 46289), False, 'from util import d\n')]
|
# Daftar package yang kita pakai
from flask import Flask, request, jsonify, make_response
from flaskext.mysql import MySQL
from flask_restful import Resource, Api
# Create an instance of Flask
app = Flask(__name__)
# Create an instance of MySQL
mysql = MySQL()
# Create an instance of Flask RESTful API
api = Api(app)
# Set database credentials.
app.config["MYSQL_DATABASE_USER"] = "remote"
app.config["MYSQL_DATABASE_PASSWORD"] = "<PASSWORD>"
app.config["MYSQL_DATABASE_DB"] = "contohdatabase"
app.config["MYSQL_DATABASE_HOST"] = "localhost"
# Initialize the MySQL extension
mysql.init_app(app)
# Mendapatkan dan menampilkan data semua user
class UserList(Resource):
# Method for get all users
def get():
try:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("""select * from user""")
row_headers = [x[0] for x in cursor.description]
result = cursor.fetchall()
json_data = []
for r in result:
json_data.append(dict(zip(row_headers, r)))
return make_response(jsonify({"data": json_data}), 200)
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
# Mendapatkan dan menampilkan data user berdasarkan id
class User(Resource):
# Method to get user by id
def get(user_id):
try:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("select * from user where id = %s", user_id)
row_headers = [x[0] for x in cursor.description]
result = cursor.fetchall()
json_data = []
for r in result:
json_data.append(dict(zip(row_headers, r)))
return make_response(jsonify({"data": json_data}), 200)
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
# Menambahkan data User
class AddUser(Resource):
def post():
# Method for create new user
try:
conn = mysql.connect()
cursor = conn.cursor()
_email = request.form["email"]
_password = request.form["password"]
_name = request.form["name"]
insert_user_cmd = (
"""INSERT INTO user(email, password, name) VALUES(%s, %s, %s)"""
)
cursor.execute(insert_user_cmd, (_email, _password, _name))
conn.commit()
response = jsonify(message="User added successfully.", id=cursor.lastrowid)
response.status_code = 200
except Exception as e:
print(e)
response = jsonify("Failed to add user.")
response.status_code = 400
finally:
cursor.close()
conn.close()
return response
# Mengupdate data user berdasarkan id
class Update(Resource):
# Method to edit / update
def put(user_id):
try:
conn = mysql.connect()
cursor = conn.cursor()
# Fungsi agar kita mudah dalam mengedit
def edit(tabel, value, user_id):
update_user_cmd = (
"""UPDATE user SET """
+ tabel
+ """ = '"""
+ value
+ """' WHERE user.id = %s"""
)
cursor.execute(update_user_cmd, (user_id))
conn.commit()
email = request.form.get("email")
password = request.form.get("password")
name = request.form.get("name")
if email:
edit("email", email, user_id)
if password:
edit("password", password, user_id)
if name:
edit("name", name, user_id)
response = jsonify("User updated successfully.")
response.status_code = 200
except Exception as e:
print(e)
response = jsonify("Failed to update user.")
response.status_code = 400
finally:
cursor.close()
conn.close()
return response
# Menghapus data user berdasarkan id
class Delete(Resource):
# Method to delete
def delete(user_id):
try:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("delete from user where id = %s", user_id)
conn.commit()
response = jsonify("User deleted successfully.")
response.status_code = 200
except Exception as e:
print(e)
response = jsonify("Failed to delete user.")
response.status_code = 400
finally:
cursor.close()
conn.close()
return response
# API resource routes
api.add_resource(UserList, "/users", endpoint="users")
api.add_resource(AddUser, "/adduser", endpoint="adduser")
api.add_resource(User, "/user/<int:user_id>", endpoint="user")
api.add_resource(Update, "/update/<int:user_id>", endpoint="update")
api.add_resource(Delete, "/delete/<int:user_id>", endpoint="delete")
# Api running di localhost dengan port 2020
if __name__ == "__main__":
app.run(host="127.0.0.1", port=2020)
|
[
"flask_restful.Api",
"flask.request.form.get",
"flask.Flask",
"flaskext.mysql.MySQL",
"flask.jsonify"
] |
[((200, 215), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'from flask import Flask, request, jsonify, make_response\n'), ((255, 262), 'flaskext.mysql.MySQL', 'MySQL', ([], {}), '()\n', (260, 262), False, 'from flaskext.mysql import MySQL\n'), ((312, 320), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (315, 320), False, 'from flask_restful import Resource, Api\n'), ((2522, 2586), 'flask.jsonify', 'jsonify', ([], {'message': '"""User added successfully."""', 'id': 'cursor.lastrowid'}), "(message='User added successfully.', id=cursor.lastrowid)\n", (2529, 2586), False, 'from flask import Flask, request, jsonify, make_response\n'), ((3509, 3534), 'flask.request.form.get', 'request.form.get', (['"""email"""'], {}), "('email')\n", (3525, 3534), False, 'from flask import Flask, request, jsonify, make_response\n'), ((3558, 3586), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (3574, 3586), False, 'from flask import Flask, request, jsonify, make_response\n'), ((3606, 3630), 'flask.request.form.get', 'request.form.get', (['"""name"""'], {}), "('name')\n", (3622, 3630), False, 'from flask import Flask, request, jsonify, make_response\n'), ((3868, 3905), 'flask.jsonify', 'jsonify', (['"""User updated successfully."""'], {}), "('User updated successfully.')\n", (3875, 3905), False, 'from flask import Flask, request, jsonify, make_response\n'), ((4503, 4540), 'flask.jsonify', 'jsonify', (['"""User deleted successfully."""'], {}), "('User deleted successfully.')\n", (4510, 4540), False, 'from flask import Flask, request, jsonify, make_response\n'), ((1106, 1134), 'flask.jsonify', 'jsonify', (["{'data': json_data}"], {}), "({'data': json_data})\n", (1113, 1134), False, 'from flask import Flask, request, jsonify, make_response\n'), ((1798, 1826), 'flask.jsonify', 'jsonify', (["{'data': json_data}"], {}), "({'data': json_data})\n", (1805, 1826), False, 'from flask import Flask, request, jsonify, make_response\n'), ((2701, 2731), 'flask.jsonify', 'jsonify', (['"""Failed to add user."""'], {}), "('Failed to add user.')\n", (2708, 2731), False, 'from flask import Flask, request, jsonify, make_response\n'), ((4020, 4053), 'flask.jsonify', 'jsonify', (['"""Failed to update user."""'], {}), "('Failed to update user.')\n", (4027, 4053), False, 'from flask import Flask, request, jsonify, make_response\n'), ((4655, 4688), 'flask.jsonify', 'jsonify', (['"""Failed to delete user."""'], {}), "('Failed to delete user.')\n", (4662, 4688), False, 'from flask import Flask, request, jsonify, make_response\n')]
|
import argparse
import yaml
from pathlib import Path
from . import name as package_name
from . import archive_org_repos
def _handle_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config-yaml',
help='Set the YAML file for the Github organization configuration',
dest='config',
default='config.yaml')
group = parser.add_mutually_exclusive_group()
group.add_argument('--skip',
help='List of repositories to skip',
dest='skip',
nargs='*',
type=str)
group.add_argument('--only',
help='List explictly the repo names in the org to archive',
dest='only',
nargs='*',
type=str)
parser.prog = package_name
return parser.parse_args()
def read_config(config_file : Path):
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return config
def main():
args = _handle_args()
config = read_config(Path(args.config))
#Run the archiver
#TODO validate config options???
archive_org_repos(config['org'], config['token'], Path(config['destination']), config['repo_type'], args.skip, args.only)
if __name__ == '__main__':
main()
|
[
"yaml.load",
"argparse.ArgumentParser",
"pathlib.Path"
] |
[((154, 233), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (177, 233), False, 'import argparse\n'), ((1081, 1120), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (1090, 1120), False, 'import yaml\n'), ((1208, 1225), 'pathlib.Path', 'Path', (['args.config'], {}), '(args.config)\n', (1212, 1225), False, 'from pathlib import Path\n'), ((1341, 1368), 'pathlib.Path', 'Path', (["config['destination']"], {}), "(config['destination'])\n", (1345, 1368), False, 'from pathlib import Path\n')]
|
from mechanicalsoup.stateful_browser import _BrowserState
import bs4
import cssselect
import logging
import lxml.html
import mechanicalsoup
import re
import requests
log = logging.getLogger(__name__)
# requests offers no easy way to customize the response class (response_hook
# and copy everything over to a new instance, anyone?), but since we only want
# two simple helper methods, monkey patching them should be quite alright.
def css(self, selector):
xpath = cssselect.HTMLTranslator().css_to_xpath(selector)
return self.xpath(xpath)
def xpath(self, selector):
if not hasattr(self, 'parsed'):
self.parsed = lxml.html.document_fromstring(self.text)
return self.parsed.xpath(selector)
requests.models.Response.css = css
requests.models.Response.xpath = xpath
class Browser(mechanicalsoup.StatefulBrowser):
"""Wraps a requests.Session to add some helpful features.
- instantiate with a base url, and then only use paths:
`http = Browser('https://example.com'); http.get('/foo')`
will request https://example.com/foo
- can use call instead of get, because it's just that little bit shorter
(`http('/foo')` instead of `http.get('/foo')`)
- fill and submit forms, powered by mechanicalsoup
(note that we override the "state" mechanics so beautifulsoup parsing
is only performed when it's actually needed)
"""
def __init__(self, baseurl=None, sso_url=None, *args, **kw):
self.baseurl = baseurl
self.sso_url = sso_url
kw.setdefault('session', HeaderPrintingSession())
super().__init__(*args, **kw)
@property
def headers(self):
return self.session.headers
def get(self, *args, **kw):
return self.request('get', *args, **kw)
def __call__(self, *args, **kw):
return self.get(*args, **kw)
def open(self, url, *args, **kw):
return self.request('get', url, *args, **kw)
def head(self, *args, **kw):
kw.setdefault('allow_redirects', False)
return self.request('head', *args, **kw)
def patch(self, *args, **kw):
return self.request('patch', *args, **kw)
def put(self, *args, **kw):
return self.request('put', *args, **kw)
def post(self, *args, **kw):
return self.request('post', *args, **kw)
def delete(self, *args, **kw):
return self.request('delete', *args, **kw)
def request(self, method, url, *args, **kw):
if url.startswith('/') and self.baseurl:
url = self.baseurl + url
r = self.session.request(method, url, *args, **kw)
# Taken from StatefulBrowser.open()
self._StatefulBrowser__state = LazySoupBrowserState(
r, self.soup_config, url=r.url, request=r.request)
return r
def submit(self, form=None, url=None, submit=None, **kw):
# This combines StatefulBrowser.submit_selected() and Browser.submit()
# and bases it all on self.request()
if form is None:
form = self.form
url = self._StatefulBrowser__state.url
self.form.choose_submit(submit)
if isinstance(form, mechanicalsoup.Form):
form = form.form
return self.request(**self.get_request_kwargs(form, url, **kw))
submit_selected = NotImplemented # Use our customized submit() instead
def links(self, url_regex=None, link_text=None, exact_text=False,
*args, **kw):
"""Enhanced to support contains instead of equals for link_text."""
links = self.page.find_all('a', href=True, *args, **kw)
if url_regex is not None:
return [a for a in links if re.search(url_regex, a['href'])]
if link_text is not None:
if exact_text:
return [a for a in links if a.text == link_text]
else:
return [a for a in links if link_text in a.text]
def sso_login(self, username, password, url=None):
"""Performs login on meine.zeit.de. Opens either the configured sso_url,
or the given one (useful if e.g. it contains a return `?url` parameter)
and fills in and submits the form.
"""
if url is None:
url = self.sso_url
if url is None:
raise ValueError('No url given and no sso_url configured')
self.get(url)
self.select_form()
self.form['email'] = username
self.form['pass'] = password
return self.submit()
class LazySoupBrowserState(_BrowserState):
"""Only parse with beautifulsoup if a client wants to use features that
need it (form filling, link selection)."""
def __init__(self, response, soup_config, **kw):
self.soup_config = soup_config
self.response = response
self._page = None
super().__init__(**kw)
@property
def page(self):
if self._page is None:
# Taken from mechanicalsoup.Browser.add_soup()
self._page = bs4.BeautifulSoup(
self.response.content, **self.soup_config)
return self._page
@page.setter
def page(self, value):
pass
class HeaderPrintingSession(requests.Session):
"""Prints request+response headers, to help understanding test failures."""
def request(self, method, url, *args, **kw):
log.info('> %s %s', method.upper(), url)
response = super().request(method, url, *args, **kw)
request = response.request
lines = ['< %s %s' % (request.method, request.url)]
lines.extend(['> %s: %s' % x for x in request.headers.items()])
lines.append('---')
resp = {'Status': response.status_code}
resp.update(response.headers)
lines.extend(['< %s: %s' % x for x in resp.items()])
log.info('\n'.join(lines))
return response
|
[
"bs4.BeautifulSoup",
"cssselect.HTMLTranslator",
"re.search",
"logging.getLogger"
] |
[((174, 201), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (191, 201), False, 'import logging\n'), ((472, 498), 'cssselect.HTMLTranslator', 'cssselect.HTMLTranslator', ([], {}), '()\n', (496, 498), False, 'import cssselect\n'), ((4982, 5042), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['self.response.content'], {}), '(self.response.content, **self.soup_config)\n', (4999, 5042), False, 'import bs4\n'), ((3664, 3695), 're.search', 're.search', (['url_regex', "a['href']"], {}), "(url_regex, a['href'])\n", (3673, 3695), False, 'import re\n')]
|
from unittest import TestCase
from femtoweb import server
from femtoweb.server import (
CouldNotParse,
as_choice,
as_nonempty,
as_type,
get_file_path_content_type,
maybe_as,
with_default_as,
)
class Tester(TestCase):
def test_as_type_int(self):
as_int = as_type(int)
for a, b in (
(None, CouldNotParse),
('', CouldNotParse),
('0', 0),
('1', 1),
('-1', -1),
('1.1', CouldNotParse),
('a', CouldNotParse),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, as_int, a)
else:
self.assertEqual(as_int(a), b)
def test_as_type_float(self):
as_int = as_type(float)
for a, b in (
(None, CouldNotParse),
('', CouldNotParse),
('0', 0.0),
('1', 1.0),
('-1', -1.0),
('1.1', 1.1),
('a', CouldNotParse),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, as_int, a)
else:
self.assertEqual(as_int(a), b)
def test_as_choice(self):
parser = as_choice('yes', 'no')
for a, b in (
(None, CouldNotParse),
('', CouldNotParse),
('0', CouldNotParse),
('1', CouldNotParse),
('-1', CouldNotParse),
('1.1', CouldNotParse),
('a', CouldNotParse),
('yes', 'yes'),
('no', 'no'),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, parser, a)
else:
self.assertEqual(parser(a), b)
def test_maybe_as(self):
parser = maybe_as(as_type(int))
for a, b in (
(None, None),
('', CouldNotParse),
('0', 0),
('1', 1),
('-1', -1),
('1.1', CouldNotParse),
('a', CouldNotParse),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, parser, a)
else:
self.assertEqual(parser(a), b)
def test_as_nonempty(self):
parser = as_nonempty(as_type(str))
for a, b in (
(None, CouldNotParse),
('', CouldNotParse),
('0', '0'),
('1', '1'),
('-1', '-1'),
('1.1', '1.1'),
('a', 'a'),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, parser, a)
else:
self.assertEqual(parser(a), b)
def test_with_default_as(self):
parser = with_default_as(as_type(int), 0)
for a, b in (
(None, 0),
('', 0),
('0', 0),
('1', 1),
('-1', -1),
('1.1', 0),
('a', 0),
):
if b is CouldNotParse:
self.assertRaises(CouldNotParse, parser, a)
else:
self.assertEqual(parser(a), b)
def test_get_file_path_content_type(self):
for a, b in (
('', server.APPLICATION_OCTET_STREAM),
('test', server.APPLICATION_OCTET_STREAM),
('test.unsupported', server.APPLICATION_OCTET_STREAM),
('test.js', server.APPLICATION_JAVASCRIPT),
('test.schema.json', server.APPLICATION_SCHEMA_JSON),
('test.json', server.APPLICATION_JSON),
('test.gif', server.IMAGE_GIF),
('test.jpeg', server.IMAGE_JPEG),
('test.jpg', server.IMAGE_JPEG),
('test.png', server.IMAGE_PNG),
('test.html', server.TEXT_HTML),
('test.py', server.APPLICATION_PYTHON),
('test.txt', server.TEXT_PLAIN),
):
self.assertEqual(get_file_path_content_type(a), b)
|
[
"femtoweb.server.as_choice",
"femtoweb.server.get_file_path_content_type",
"femtoweb.server.as_type"
] |
[((298, 310), 'femtoweb.server.as_type', 'as_type', (['int'], {}), '(int)\n', (305, 310), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((795, 809), 'femtoweb.server.as_type', 'as_type', (['float'], {}), '(float)\n', (802, 809), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((1286, 1308), 'femtoweb.server.as_choice', 'as_choice', (['"""yes"""', '"""no"""'], {}), "('yes', 'no')\n", (1295, 1308), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((1894, 1906), 'femtoweb.server.as_type', 'as_type', (['int'], {}), '(int)\n', (1901, 1906), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((2393, 2405), 'femtoweb.server.as_type', 'as_type', (['str'], {}), '(str)\n', (2400, 2405), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((2897, 2909), 'femtoweb.server.as_type', 'as_type', (['int'], {}), '(int)\n', (2904, 2909), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n'), ((4132, 4161), 'femtoweb.server.get_file_path_content_type', 'get_file_path_content_type', (['a'], {}), '(a)\n', (4158, 4161), False, 'from femtoweb.server import CouldNotParse, as_choice, as_nonempty, as_type, get_file_path_content_type, maybe_as, with_default_as\n')]
|
from django import template
register = template.Library()
@register.filter()
def redact(text, case):
return case.redact_obj(text)
@register.filter()
def elide(text, case):
return case.elide_obj(text)
|
[
"django.template.Library"
] |
[((41, 59), 'django.template.Library', 'template.Library', ([], {}), '()\n', (57, 59), False, 'from django import template\n')]
|
from unittest import TestCase
from messageDecode import MessageDecode
import time
import unittest
__author__ = '<NAME>'
inputstring = "Hi @Ramki how are you (smiles)http://www.cubrid.org/blog/dev-platform/understanding-jvm-internals/"
messageTime = time.ctime()
class TestMessageDecode(TestCase):
def setUp(self):
self.inputstring = inputstring
self.messageTime = messageTime
def test_removeduplicates(self):
self.fail()
def test_decode(self):
retruned_json = MessageDecode()
expected_json = {
"emoticons": [
"smiles"
],
"links": [
{
"id": 0,
"title": "Understanding JVM Internals | CUBRID Blog",
"url": "http://www.cubrid.org/blog/dev-platform/understanding-jvm-internals/"
}
],
"mentions": [
"Ramki"
],
"message_time": "Mon Mar 09 04:16:51 2015"
}
self.assertEquals("Both expected and actual results are same",expected_json,retruned_json)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"time.ctime",
"messageDecode.MessageDecode"
] |
[((251, 263), 'time.ctime', 'time.ctime', ([], {}), '()\n', (261, 263), False, 'import time\n'), ((1397, 1412), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1410, 1412), False, 'import unittest\n'), ((511, 526), 'messageDecode.MessageDecode', 'MessageDecode', ([], {}), '()\n', (524, 526), False, 'from messageDecode import MessageDecode\n')]
|
from discord import errors
from discord.ext import commands
import requests
import json
from modules.Search import queryAnime, queryChar
from utils.helpers import quick_embed
class ani(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Python Bot is now online")
@commands.command()
async def ping(self, ctx):
await ctx.send(f'Pong! **{round(self.client.latency *1000)}ms**')
@commands.command()
@commands.cooldown(2, 5, commands.BucketType.user)
async def aniSearch(self,ctx, *, animeName):
query = await queryAnime()
#print(animeName)
variables = {
'name': animeName
}
url = 'https://graphql.anilist.co'
# Make the HTTP Api request
response = requests.post(url, json={'query': query, 'variables': variables})
response = json.loads(response.text)
try:
color = int(hex(int(str(response['data']['Media']['coverImage']['color']).replace("#", ""), 16)), 0)
except ValueError:
print('color error has been caught')
color = 16777215
try:
banner = response['data']['Media']['bannerImage']
if(banner is None):
raise Exception
except Exception:
print("banner is unavailable")
banner = 'https://i.pinimg.com/originals/05/0f/0a/050f0a3bd19ce811522f0c63256637e9.jpg'
embedDescription = "**Average Score: "+ str(response['data']['Media']['averageScore'])+ "\nSeason: "+ str(response['data']['Media']['season']).capitalize()+" "+str(response['data']['Media']['seasonYear']) + "\nPopularity: "+ str(response['data']['Media']['popularity']) +"\nStatus: "+ str(response['data']['Media']['status']).capitalize() + "\nGenres: " + str(response['data']['Media']['genres'])[1:-1].replace("'","") + "**\n"+ "\n"+(response['data']['Media']['description']).replace('<br>','').replace('<b>','').replace('</b>','')
await quick_embed(
ctx,
title = str(response['data']['Media']['title']['english']) +" ("+ str(response['data']['Media']['title']['native']) +")",
description= embedDescription,
color= color,
thumbnail = str(response['data']['Media']['coverImage']['extraLarge']),
image_url = banner,
)
@commands.command()
@commands.cooldown(2, 5, commands.BucketType.user)
async def charSearch(self, ctx,*, charName):
query = await queryChar()
variables = {
'search': charName,
}
url = 'https://graphql.anilist.co'
response = requests.post(url, json={'query': query, 'variables': variables})
response = json.loads(response.text)
embedDescription = "Age: "+ str(response['data']['Character']['age']) +"\n"+ str(response['data']['Character']['description'])
bannerImage = response['data']['Character']['media']['edges'][0]['node']['bannerImage']
await quick_embed(
ctx,
description= embedDescription,
title = str(response['data']['Character']['name']['full']),
thumbnail = str(response['data']['Character']['image']['large']),
image_url = bannerImage,
)
def setup(bot):
bot.add_cog(ani(bot))
|
[
"discord.ext.commands.command",
"json.loads",
"discord.ext.commands.Cog.listener",
"discord.ext.commands.cooldown",
"modules.Search.queryChar",
"requests.post",
"modules.Search.queryAnime"
] |
[((262, 285), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (283, 285), False, 'from discord.ext import commands\n'), ((365, 383), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (381, 383), False, 'from discord.ext import commands\n'), ((496, 514), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (512, 514), False, 'from discord.ext import commands\n'), ((520, 569), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(2)', '(5)', 'commands.BucketType.user'], {}), '(2, 5, commands.BucketType.user)\n', (537, 569), False, 'from discord.ext import commands\n'), ((2430, 2448), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2446, 2448), False, 'from discord.ext import commands\n'), ((2454, 2503), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(2)', '(5)', 'commands.BucketType.user'], {}), '(2, 5, commands.BucketType.user)\n', (2471, 2503), False, 'from discord.ext import commands\n'), ((837, 902), 'requests.post', 'requests.post', (['url'], {'json': "{'query': query, 'variables': variables}"}), "(url, json={'query': query, 'variables': variables})\n", (850, 902), False, 'import requests\n'), ((922, 947), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (932, 947), False, 'import json\n'), ((2710, 2775), 'requests.post', 'requests.post', (['url'], {'json': "{'query': query, 'variables': variables}"}), "(url, json={'query': query, 'variables': variables})\n", (2723, 2775), False, 'import requests\n'), ((2795, 2820), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2805, 2820), False, 'import json\n'), ((641, 653), 'modules.Search.queryAnime', 'queryAnime', ([], {}), '()\n', (651, 653), False, 'from modules.Search import queryAnime, queryChar\n'), ((2576, 2587), 'modules.Search.queryChar', 'queryChar', ([], {}), '()\n', (2585, 2587), False, 'from modules.Search import queryAnime, queryChar\n')]
|
#@<> Setup
testutil.deploy_sandbox(__mysql_sandbox_port1, "root")
#@<> Setup cluster
import mysqlsh
mydba = mysqlsh.connect_dba(__sandbox_uri1)
cluster = mydba.create_cluster("mycluster")
cluster.disconnect()
#@<> Catch error through mysqlsh.Error
try:
mydba.get_cluster("badcluster")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.Error as e:
EXPECT_EQ(51101, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
#@<> dba.session
mydba.session.run_sql("select 1")
#@<> DbError should be a subclass of Error
try:
mydba.session.run_sql("badquery")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.DBError as e:
EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
try:
mydba.session.run_sql("badquery")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.Error as e:
EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
#@<> Check for __qualname__ and __name__ in wrapped methods
EXPECT_EQ("Testutils.deploy_sandbox", testutil.deploy_sandbox.__qualname__)
EXPECT_EQ("Dba.create_cluster", dba.create_cluster.__qualname__)
EXPECT_EQ("deploy_sandbox", testutil.deploy_sandbox.__name__)
EXPECT_EQ("create_cluster", dba.create_cluster.__name__)
#@<> check that isatty exists (checking the return value depends on how the tests are ran)
sys.stdout.isatty()
sys.stdin.isatty()
sys.stderr.isatty()
#@<> Cleanup
mydba.session.close()
testutil.destroy_sandbox(__mysql_sandbox_port1)
|
[
"mysqlsh.connect_dba"
] |
[((112, 147), 'mysqlsh.connect_dba', 'mysqlsh.connect_dba', (['__sandbox_uri1'], {}), '(__sandbox_uri1)\n', (131, 147), False, 'import mysqlsh\n')]
|
import docspec
import pytest
@pytest.fixture
def module() -> docspec.Module:
module = docspec.Module('a', None, None, [
docspec.Class('foo', None, docspec.Docstring('This is class foo.', None), None, None, None, [
docspec.Data('val', None, None, 'int', '42'),
docspec.Function('__init__', None, None, None, [
docspec.Argument('self', docspec.Argument.Type.POSITIONAL, None, None, None)
], None, None),
]),
])
module.sync_hierarchy()
return module
@pytest.fixture
def typed_module() -> docspec.Module:
module = docspec.Module('a', docspec.Location('test.py', 0), None, [
docspec.Indirection('Union', docspec.Location('test.py', 1), None, 'typing.Union'),
docspec.Class('foo', docspec.Location('test.py', 2), docspec.Docstring('This is class foo.', docspec.Location('test.py', 3)), None, None, None, [
docspec.Data('val', docspec.Location('test.py', 4), None, 'Union[int, float]', '42'),
docspec.Function('__init__', docspec.Location('test.py', 5), None, None, [
docspec.Argument('self', docspec.Argument.Type.POSITIONAL, None, None, None)
], None, None),
]),
])
module.sync_hierarchy()
return module
|
[
"docspec.Docstring",
"docspec.Argument",
"docspec.Location",
"docspec.Data"
] |
[((579, 609), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(0)'], {}), "('test.py', 0)\n", (595, 609), False, 'import docspec\n'), ((156, 201), 'docspec.Docstring', 'docspec.Docstring', (['"""This is class foo."""', 'None'], {}), "('This is class foo.', None)\n", (173, 201), False, 'import docspec\n'), ((652, 682), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(1)'], {}), "('test.py', 1)\n", (668, 682), False, 'import docspec\n'), ((732, 762), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(2)'], {}), "('test.py', 2)\n", (748, 762), False, 'import docspec\n'), ((229, 273), 'docspec.Data', 'docspec.Data', (['"""val"""', 'None', 'None', '"""int"""', '"""42"""'], {}), "('val', None, None, 'int', '42')\n", (241, 273), False, 'import docspec\n'), ((804, 834), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(3)'], {}), "('test.py', 3)\n", (820, 834), False, 'import docspec\n'), ((883, 913), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(4)'], {}), "('test.py', 4)\n", (899, 913), False, 'import docspec\n'), ((984, 1014), 'docspec.Location', 'docspec.Location', (['"""test.py"""', '(5)'], {}), "('test.py', 5)\n", (1000, 1014), False, 'import docspec\n'), ((338, 414), 'docspec.Argument', 'docspec.Argument', (['"""self"""', 'docspec.Argument.Type.POSITIONAL', 'None', 'None', 'None'], {}), "('self', docspec.Argument.Type.POSITIONAL, None, None, None)\n", (354, 414), False, 'import docspec\n'), ((1038, 1114), 'docspec.Argument', 'docspec.Argument', (['"""self"""', 'docspec.Argument.Type.POSITIONAL', 'None', 'None', 'None'], {}), "('self', docspec.Argument.Type.POSITIONAL, None, None, None)\n", (1054, 1114), False, 'import docspec\n')]
|
from pathlib import Path
from sphinx_rtd_theme import setup as base_setup
from ._version_git import __version__
# See https://www.sphinx-doc.org/en/master/development/theming.html
# #distribute-your-theme-as-a-python-package
def setup(app):
# Register the theme that can be referenced without adding a theme path
app.add_html_theme(
"sphinx_rtd_theme_github_versions", Path(__file__).absolute().parent
)
return base_setup(app)
__all__ = ["setup", "__version__"]
|
[
"pathlib.Path",
"sphinx_rtd_theme.setup"
] |
[((447, 462), 'sphinx_rtd_theme.setup', 'base_setup', (['app'], {}), '(app)\n', (457, 462), True, 'from sphinx_rtd_theme import setup as base_setup\n'), ((397, 411), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (401, 411), False, 'from pathlib import Path\n')]
|
import socket
import time
import logging
import uuid
import random
from db.models import WeatherStation as WeatherStationModel, MetricType as MetricTypeModel, Metric as MetricModel
from db.create_db import session
from datetime import datetime
LOG_FORMAT = ('%(levelname) -5s %(asctime)s %(name) -5s %(funcName) -5s %(lineno) -5d: %(message)s')
LOG_LEVEL = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=LOG_LEVEL)
class WeatherStation(object):
def __init__(self):
self.interval = 5
self.base_temperature = random.uniform(15.0, 20.0)
self.base_humidity = random.uniform(45.0, 75.0)
self.model = WeatherStationModel()
self.model.name = 'ws_%s' % socket.gethostname()
self.model.id = str(uuid.uuid5(uuid.NAMESPACE_DNS, self.model.name))
if not self._exists():
self.model.deleted = 0
self.model.is_sent = 0
self.model.latitude = float("{0:.4f}".format(random.uniform(51.0, 54.0)))
self.model.longitude = float("{0:.4f}".format(random.uniform(15.0, 23.0)))
self.model.metric_types = self._available_metric_types()
logging.debug('WS does not exists. Creating new: %s @ (%s N, %s E)' % (self.model.name,
self.model.latitude,
self.model.longitude))
session.merge(self.model)
session.commit()
else:
logging.debug('WS already exists. Doing nothing.')
self.model = session.query(WeatherStationModel).filter_by(id=self.model.id).one()
def _exists(self):
weather_stations = session.query(WeatherStationModel).filter_by(id=self.model.id).all()
return len(weather_stations) == 1
def run(self):
try:
while True:
self._generate_metrics_data()
time.sleep(self.interval)
except KeyboardInterrupt as keyboard_interrupt:
logging.info('Stopped.')
def __str__(self):
return str(self.model.__dict__)
def _available_metric_types(self):
return session.query(MetricTypeModel).all()
def _generate_metrics_data(self):
logging.info('Added new metrics [%s]:' % len(self.model.metric_types))
for metric_type in self.model.metric_types:
new_metric = MetricModel()
new_metric.metric_type = metric_type
new_metric.metric_type_id = metric_type.id
new_metric.id = str(uuid.uuid4())
new_metric.is_sent = 0
if metric_type.name == 'Temperature':
new_metric.value = float("{0:.2f}".format(random.uniform(self.base_temperature-5,
self.base_temperature+5)))
elif metric_type.name == 'Humidity':
new_metric.value = float("{0:.2f}".format(random.uniform(self.base_humidity-10,
self.base_humidity+10)))
else:
new_metric.value = random.uniform(metric_type.min_value, metric_type.max_value)
new_metric.weather_station = self.model
new_metric.weather_station_id = self.model.id
new_metric.timestamp = datetime.utcnow().replace(microsecond=0)
session.add(new_metric)
logging.debug('\t%s :\t%s %s' % (str(metric_type.name), new_metric.value, metric_type.unit))
session.commit()
if __name__ == '__main__':
ws = WeatherStation()
ws.run()
|
[
"uuid.uuid4",
"logging.debug",
"logging.basicConfig",
"random.uniform",
"db.models.WeatherStation",
"time.sleep",
"db.create_db.session.add",
"socket.gethostname",
"logging.info",
"db.create_db.session.commit",
"db.models.Metric",
"uuid.uuid5",
"db.create_db.session.merge",
"datetime.datetime.utcnow",
"db.create_db.session.query"
] |
[((373, 428), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'LOG_FORMAT', 'level': 'LOG_LEVEL'}), '(format=LOG_FORMAT, level=LOG_LEVEL)\n', (392, 428), False, 'import logging\n'), ((544, 570), 'random.uniform', 'random.uniform', (['(15.0)', '(20.0)'], {}), '(15.0, 20.0)\n', (558, 570), False, 'import random\n'), ((600, 626), 'random.uniform', 'random.uniform', (['(45.0)', '(75.0)'], {}), '(45.0, 75.0)\n', (614, 626), False, 'import random\n'), ((648, 669), 'db.models.WeatherStation', 'WeatherStationModel', ([], {}), '()\n', (667, 669), True, 'from db.models import WeatherStation as WeatherStationModel, MetricType as MetricTypeModel, Metric as MetricModel\n'), ((3585, 3601), 'db.create_db.session.commit', 'session.commit', ([], {}), '()\n', (3599, 3601), False, 'from db.create_db import session\n'), ((706, 726), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (724, 726), False, 'import socket\n'), ((755, 802), 'uuid.uuid5', 'uuid.uuid5', (['uuid.NAMESPACE_DNS', 'self.model.name'], {}), '(uuid.NAMESPACE_DNS, self.model.name)\n', (765, 802), False, 'import uuid\n'), ((1159, 1295), 'logging.debug', 'logging.debug', (["('WS does not exists. Creating new: %s @ (%s N, %s E)' % (self.model.name,\n self.model.latitude, self.model.longitude))"], {}), "('WS does not exists. Creating new: %s @ (%s N, %s E)' % (self\n .model.name, self.model.latitude, self.model.longitude))\n", (1172, 1295), False, 'import logging\n'), ((1469, 1494), 'db.create_db.session.merge', 'session.merge', (['self.model'], {}), '(self.model)\n', (1482, 1494), False, 'from db.create_db import session\n'), ((1507, 1523), 'db.create_db.session.commit', 'session.commit', ([], {}), '()\n', (1521, 1523), False, 'from db.create_db import session\n'), ((1550, 1600), 'logging.debug', 'logging.debug', (['"""WS already exists. Doing nothing."""'], {}), "('WS already exists. Doing nothing.')\n", (1563, 1600), False, 'import logging\n'), ((2446, 2459), 'db.models.Metric', 'MetricModel', ([], {}), '()\n', (2457, 2459), True, 'from db.models import WeatherStation as WeatherStationModel, MetricType as MetricTypeModel, Metric as MetricModel\n'), ((3448, 3471), 'db.create_db.session.add', 'session.add', (['new_metric'], {}), '(new_metric)\n', (3459, 3471), False, 'from db.create_db import session\n'), ((1976, 2001), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (1986, 2001), False, 'import time\n'), ((2070, 2094), 'logging.info', 'logging.info', (['"""Stopped."""'], {}), "('Stopped.')\n", (2082, 2094), False, 'import logging\n'), ((2214, 2244), 'db.create_db.session.query', 'session.query', (['MetricTypeModel'], {}), '(MetricTypeModel)\n', (2227, 2244), False, 'from db.create_db import session\n'), ((2596, 2608), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2606, 2608), False, 'import uuid\n'), ((962, 988), 'random.uniform', 'random.uniform', (['(51.0)', '(54.0)'], {}), '(51.0, 54.0)\n', (976, 988), False, 'import random\n'), ((1049, 1075), 'random.uniform', 'random.uniform', (['(15.0)', '(23.0)'], {}), '(15.0, 23.0)\n', (1063, 1075), False, 'import random\n'), ((3189, 3249), 'random.uniform', 'random.uniform', (['metric_type.min_value', 'metric_type.max_value'], {}), '(metric_type.min_value, metric_type.max_value)\n', (3203, 3249), False, 'import random\n'), ((3395, 3412), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3410, 3412), False, 'from datetime import datetime\n'), ((1746, 1780), 'db.create_db.session.query', 'session.query', (['WeatherStationModel'], {}), '(WeatherStationModel)\n', (1759, 1780), False, 'from db.create_db import session\n'), ((2753, 2821), 'random.uniform', 'random.uniform', (['(self.base_temperature - 5)', '(self.base_temperature + 5)'], {}), '(self.base_temperature - 5, self.base_temperature + 5)\n', (2767, 2821), False, 'import random\n'), ((1626, 1660), 'db.create_db.session.query', 'session.query', (['WeatherStationModel'], {}), '(WeatherStationModel)\n', (1639, 1660), False, 'from db.create_db import session\n'), ((3000, 3064), 'random.uniform', 'random.uniform', (['(self.base_humidity - 10)', '(self.base_humidity + 10)'], {}), '(self.base_humidity - 10, self.base_humidity + 10)\n', (3014, 3064), False, 'import random\n')]
|
from flask import Flask
import json
import os
import datetime
import sys
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask import render_template
from flask import request
sys.path.append('.')
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
hosts = {"bbn.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"berkeley-gdp.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"case.edge-net.io":{"lon":"-81.525800", "lat":"41.602500"},
"cenic-2.edge-net.io":{"lon":"-122.636000", "lat":"38.957600"},
"cenic.edge-net.io":{"lon":"-122.636000", "lat":"38.957600"},
"clemson.edge-net.io":{"lon":"-82.837400", "lat":"34.683400"},
"cornell-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"cornell.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"edgenet.planet-lab.eu":{"lon":"2.338700", "lat":"48.858200"},
"edgenet1.planet-lab.eu":{"lon":"2.328100", "lat":"48.860700"},
"edgenet2.planet-lab.eu":{"lon":"2.328100", "lat":"48.860700"},
"gatech-2.edge-net.io":{"lon":"-84.397300", "lat":"33.774600"},
"gatech.edge-net.io":{"lon":"-84.397300", "lat":"33.774600"},
"gpeni-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"gpeni.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"hawaii-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"hawaii.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"ilabt.edge-net.io":{"lon":"4.000000", "lat":"50.833300"},
"illinois-2.edge-net.io":{"lon":"-88.206200", "lat":"40.104700"},
"illinois.edge-net.io":{"lon":"-88.206200", "lat":"40.104700"},
"iu-2.edge-net.io":{"lon":"-86.469200", "lat":"39.230300"},
"iu.edge-net.io":{"lon":"-86.469200", "lat":"39.230300"},
"kettering.edge-net.io":{"lon":"-83.749800", "lat":"43.057300"},
"louisiana.edge-net.io":{"lon":"-91.188600", "lat":"30.403000"},
"metrodatacenter-2.edge-net.io":{"lon":"-83.113100", "lat":"40.110400"},
"metrodatacenter.edge-net.io":{"lon":"-83.113100", "lat":"40.110400"},
"missouri-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"missouri.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"naist.edge-net.io":{"lon":"135.833300", "lat":"34.683300"},
"northwestern.edge-net.io":{"lon":"-87.684200", "lat":"42.059800"},
"nps.edge-net.io":{"lon":"-121.793500", "lat":"36.621700"},
"nysernet-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"nysernet.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"nyu.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"osu.edge-net.io":{"lon":"-82.755300", "lat":"39.907200"},
"princeton-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"princeton.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"sox-2.edge-net.io":{"lon":"-84.397300", "lat":"33.774600"},
"sox.edge-net.io":{"lon":"-84.397300", "lat":"33.774600"},
"stanford-2.edge-net.io":{"lon":"-122.163900", "lat":"37.423000"},
"stanford.edge-net.io":{"lon":"-122.163900", "lat":"37.423000"},
"uchicago-2.edge-net.io":{"lon":"-87.604600", "lat":"41.782100"},
"uchicago.edge-net.io":{"lon":"-87.604600", "lat":"41.782100"},
"ucla-2.edge-net.io":{"lon":"-118.441400", "lat":"34.064800"},
"ucla.edge-net.io":{"lon":"-118.441400", "lat":"34.064800"},
"ucsd-2.edge-net.io":{"lon":"-117.276700", "lat":"32.848700"},
"ucsd.edge-net.io":{"lon":"-117.276700", "lat":"32.848700"},
"uky-2.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"uky-3.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"uky.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"umich.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
"umkc-2.edge-net.io":{"lon":"-94.573700", "lat":"39.038300"},
"umkc.edge-net.io":{"lon":"-94.573700", "lat":"39.038300"},
"utdallas.edge-net.io":{"lon":"-96.777600", "lat":"32.767300"},
"uvm.edge-net.io":{"lon":"-73.082500", "lat":"44.442100"},
"vcu.edge-net.io":{"lon":"-97.822000", "lat":"37.751000"},
}
class DownloadForm(FlaskForm):
nickname = StringField('Nickname', validators=[DataRequired()])
submit = SubmitField('Get Yaml!')
hellos = []
def make_output_strings(hello_array):
hello_strings = ['from host %s, user %s at %s' % (hello["hostname"], hello["username"], hello["timestamp"]) for hello in hello_array]
return '\n'.join(hello_strings)
@app.route('/get_yaml')
def get_yaml():
# form = DownloadForm()
return render_template('download.html', title='Get YAML!')
anonymous_user = 0
@app.route('/download')
def download():
form = DownloadForm()
nickname = request.args['nickname']
if (nickname == None):
nickname = 'anonymous%d' % anonymous_user
anonymous_user = anonymous_user + 1
return render_template('deploy.yaml', nickname=nickname)
@app.route('/')
def hello_word():
return 'Hello, World'
@app.route('/hello/<hostname>/<username>')
def hello_hostname(hostname, username):
hellos.append({"hostname": hostname, "username": username, "timestamp": datetime.datetime.now().isoformat()})
return 'hello %s at %s' % (username, hostname)
@app.route('/clear')
def clear():
hellos = []
return 'Hellos cleared'
@app.route('/show_hellos')
def show_hellos():
return make_output_strings(hellos)
@app.route('/user_hellos/<username>')
def user_hellos(username):
return make_output_strings([hello for hello in hellos if hello["username"] == username])
@app.route('/site_hellos/<sitename>')
def site_hellos(sitename):
return make_output_strings([hello for hello in hellos if hello["hostname"] == sitename])
@app.route('/get_hellos')
def get_hellos():
result = []
for hello in hellos:
host = hello['hostname']
if host in hosts:
record = hosts[host]
result.append({'hostname': host, 'username': hello['username'], 'lat': record['lat'], 'lng': record['lon'], 'timestamp': hello['timestamp']})
return json.dumps(result)
def valid_line(line):
transfer = line.find('->') >= 0
address_valid = line.find('*') < 0
return transfer and address_valid
@app.route('/get_traceroute/<container>/<address>')
def get_traceroute(container, address):
traceroute_result = os.popen("./paris_traceroute.sh %s %s" % (container, address)).read()
lines = traceroute_result.split('\n')
valid_lines = [line for line in lines if valid_line(line)]
bodies = [line.split(':')[1] for line in valid_lines]
addresses = [body.split('->')[0].strip() for body in bodies]
return json.dumps(addresses)
|
[
"sys.path.append",
"flask.Flask",
"os.popen",
"json.dumps",
"datetime.datetime.now",
"wtforms.SubmitField",
"flask.render_template",
"wtforms.validators.DataRequired"
] |
[((254, 274), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (269, 274), False, 'import sys\n'), ((309, 324), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'from flask import Flask\n'), ((4010, 4034), 'wtforms.SubmitField', 'SubmitField', (['"""Get Yaml!"""'], {}), "('Get Yaml!')\n", (4021, 4034), False, 'from wtforms import StringField, SubmitField\n'), ((4341, 4392), 'flask.render_template', 'render_template', (['"""download.html"""'], {'title': '"""Get YAML!"""'}), "('download.html', title='Get YAML!')\n", (4356, 4392), False, 'from flask import render_template\n'), ((4652, 4701), 'flask.render_template', 'render_template', (['"""deploy.yaml"""'], {'nickname': 'nickname'}), "('deploy.yaml', nickname=nickname)\n", (4667, 4701), False, 'from flask import render_template\n'), ((5853, 5871), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (5863, 5871), False, 'import json\n'), ((6436, 6457), 'json.dumps', 'json.dumps', (['addresses'], {}), '(addresses)\n', (6446, 6457), False, 'import json\n'), ((6127, 6189), 'os.popen', 'os.popen', (["('./paris_traceroute.sh %s %s' % (container, address))"], {}), "('./paris_traceroute.sh %s %s' % (container, address))\n", (6135, 6189), False, 'import os\n'), ((3980, 3994), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3992, 3994), False, 'from wtforms.validators import DataRequired\n'), ((4933, 4956), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4954, 4956), False, 'import datetime\n')]
|
import copy
import torch
import torch.nn as nn
from Result import Result
class Dlg:
def __init__(self, setting):
self.setting = setting
self.defenses = setting.defenses
self.criterion = nn.CrossEntropyLoss().to(setting.device)
self.gradient = None
self.dummy_data = None
self.dummy_label = None
self.seperated_gradients = []
def victim_side(self):
para = self.setting.parameter
para["orig_data"] = [None]*para["local_iterations"]
para["orig_label"] = [None]*para["local_iterations"]
self.seperated_gradients = []
# calculate orig gradients
for i in range(para["local_iterations"]):
para["orig_data"][i], para["orig_label"][i] = \
self.setting.dataloader.get_batch(para["dataset"], para["targets"], para["batch_size"])
para["orig_data"][i] = para["orig_data"][i].to(self.setting.device)
para["orig_label"][i] = para["orig_label"][i].to(self.setting.device)
orig_out = self.setting.model(para["orig_data"][i])
y = self.criterion(orig_out, para["orig_label"][i])
grad = torch.autograd.grad(y, self.setting.model.parameters())
# local train iteration
if self.setting.parameter["local_training"]:
self.setting.train(1, [para["orig_data"][i], para["orig_label"][i]], victim=True)
self.seperated_gradients.append(list((_.detach().clone() for _ in grad)))
# Copy the structure of a grad, but make it zeroes
aggregated = list(x.zero_() for x in grad)
# iterate over the gradients for each local iteration
for grad in self.seperated_gradients:
# there iterate through the gradients and add to the aggregator
for i_g,g in enumerate(grad):
aggregated[i_g] = torch.add(aggregated[i_g], g)
self.defenses.apply(aggregated, para["num_users"]-1)
self.gradient = list(torch.div(x, 1) for x in aggregated)
if para["differential_privacy"] or para["compression"]:
self.defenses.inject(self.seperated_gradients, aggregated, self.setting.model)
def reconstruct(self):
# abbreviations
parameter = self.setting.parameter
device = self.setting.device
model = self.setting.model
setting = self.setting
self.dummy_data = torch.randn(
(parameter["batch_size"]*parameter["local_iterations"], setting.parameter["channel"], setting.parameter["shape_img"][0],
setting.parameter["shape_img"][1])).to(
setting.device).requires_grad_(True)
self.dummy_label = torch.randn((parameter["batch_size"]*parameter["local_iterations"], setting.parameter["num_classes"])).to(
setting.device).requires_grad_(True)
self.dummy_pred = None
# optimizer setup
if parameter["version"].lower() == "dlg":
optimizer = torch.optim.LBFGS([self.dummy_data, self.dummy_label], lr=parameter["dlg_lr"])
else:
optimizer = torch.optim.LBFGS([self.dummy_data, ], lr=parameter["dlg_lr"])
# predict label of dummy gradient
pred = torch.Tensor(self.setting.predictor.prediction).long().to(device).reshape(
(parameter["batch_size"]*parameter["local_iterations"],)).requires_grad_(False)
# Prepare Result Object
res = Result(self.setting)
for iteration in range(parameter["dlg_iterations"]):
# clears gradients, computes loss, returns loss
def closure():
optimizer.zero_grad()
self.dummy_pred = model(self.dummy_data)
if parameter["version"].lower() == "dlg":
dummy_loss = - torch.mean(
torch.sum(torch.softmax(self.dummy_label, -1) * torch.log(torch.softmax(self.dummy_pred, -1)),
dim=-1))
else:
dummy_loss = self.criterion(self.dummy_pred, pred)
dummy_gradient = torch.autograd.grad(dummy_loss, model.parameters(), create_graph=True)
grad_diff = torch.Tensor([0]).to(device)
for gx, gy in zip(dummy_gradient, self.gradient):
grad_diff += ((gx - gy) ** 2).sum()
grad_diff.backward()
res.add_loss(grad_diff.item())
return grad_diff
optimizer.step(closure)
if iteration % parameter["log_interval"] == 0:
res.add_snapshot(self.dummy_data.cpu().detach().numpy())
if self.setting.parameter["version"].lower() == "dlg":
self.setting.predictor.prediction = [self.dummy_label[x].argmax().item() for x in range(parameter["batch_size"]*parameter["local_iterations"])]
self.setting.predictor.update_accuracy()
#res.update_figures(),
self.setting.result = res
|
[
"torch.add",
"torch.nn.CrossEntropyLoss",
"torch.randn",
"torch.softmax",
"Result.Result",
"torch.Tensor",
"torch.div",
"torch.optim.LBFGS"
] |
[((3452, 3472), 'Result.Result', 'Result', (['self.setting'], {}), '(self.setting)\n', (3458, 3472), False, 'from Result import Result\n'), ((2985, 3063), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[self.dummy_data, self.dummy_label]'], {'lr': "parameter['dlg_lr']"}), "([self.dummy_data, self.dummy_label], lr=parameter['dlg_lr'])\n", (3002, 3063), False, 'import torch\n'), ((3102, 3162), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[self.dummy_data]'], {'lr': "parameter['dlg_lr']"}), "([self.dummy_data], lr=parameter['dlg_lr'])\n", (3119, 3162), False, 'import torch\n'), ((216, 237), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (235, 237), True, 'import torch.nn as nn\n'), ((1881, 1910), 'torch.add', 'torch.add', (['aggregated[i_g]', 'g'], {}), '(aggregated[i_g], g)\n', (1890, 1910), False, 'import torch\n'), ((2003, 2018), 'torch.div', 'torch.div', (['x', '(1)'], {}), '(x, 1)\n', (2012, 2018), False, 'import torch\n'), ((2422, 2600), 'torch.randn', 'torch.randn', (["(parameter['batch_size'] * parameter['local_iterations'], setting.parameter\n ['channel'], setting.parameter['shape_img'][0], setting.parameter[\n 'shape_img'][1])"], {}), "((parameter['batch_size'] * parameter['local_iterations'],\n setting.parameter['channel'], setting.parameter['shape_img'][0],\n setting.parameter['shape_img'][1]))\n", (2433, 2600), False, 'import torch\n'), ((2697, 2805), 'torch.randn', 'torch.randn', (["(parameter['batch_size'] * parameter['local_iterations'], setting.parameter\n ['num_classes'])"], {}), "((parameter['batch_size'] * parameter['local_iterations'],\n setting.parameter['num_classes']))\n", (2708, 2805), False, 'import torch\n'), ((4212, 4229), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (4224, 4229), False, 'import torch\n'), ((3857, 3892), 'torch.softmax', 'torch.softmax', (['self.dummy_label', '(-1)'], {}), '(self.dummy_label, -1)\n', (3870, 3892), False, 'import torch\n'), ((3905, 3939), 'torch.softmax', 'torch.softmax', (['self.dummy_pred', '(-1)'], {}), '(self.dummy_pred, -1)\n', (3918, 3939), False, 'import torch\n'), ((3230, 3277), 'torch.Tensor', 'torch.Tensor', (['self.setting.predictor.prediction'], {}), '(self.setting.predictor.prediction)\n', (3242, 3277), False, 'import torch\n')]
|
# Generated by Django 3.1.12 on 2021-06-25 12:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0017_merge_20210624_1913'),
]
operations = [
migrations.DeleteModel(
name='SectionRecommendation',
),
]
|
[
"django.db.migrations.DeleteModel"
] |
[((226, 278), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SectionRecommendation"""'}), "(name='SectionRecommendation')\n", (248, 278), False, 'from django.db import migrations\n')]
|
"""Implementation of delayed impact lending simulator based on Liu et al.
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018, July). Delayed
Impact of Fair Machine Learning. In International Conference on Machine
Learning. (https://arxiv.org/abs/1803.04383)
"""
import copy
import dataclasses
import os
from typing import Callable
import whynot as wn
import whynot.traceable_numpy as np
from whynot.dynamics import BaseConfig, BaseState, BaseIntervention
from whynot.simulators.delayed_impact.fico import get_data_args as get_FICO_data
#################################
# Globally accessible FICO params
#################################
DATAPATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
INV_CDFS, LOAN_REPAY_PROBS, _, GROUP_SIZE_RATIO, _, _ = get_FICO_data(DATAPATH)
def default_credit_scorer(score):
"""Report the underlying score without modification."""
return score
@dataclasses.dataclass
class Config(BaseConfig):
# pylint: disable-msg=too-few-public-methods
"""Parameters for the simulation dynamics."""
#: Maps the true credit score to the reported score
credit_scorer: Callable = default_credit_scorer
#: Lending threshold for group 0
threshold_g0: float = 650
#: Lending threshold for group 1
threshold_g1: float = 650
#: Bank repayment utility
repayment_utility: float = 1.0
#: Bank default utility
default_utility: float = -4.0
#: Applicant's score change after repayment
repayment_score_change: float = 75
#: Applican't score change after default
default_score_change: float = -150
#: Minimum credit score
min_score: int = 350
max_score: int = 800
#: Simulation start step (in rounds)
start_time: float = 0
#: Simulation end step (in rounds)
end_time: float = 1
#: Simulator step size (Unused)
delta_t: float = 1
@dataclasses.dataclass
class State(BaseState):
# pylint: disable-msg=too-few-public-methods
"""State of the lending simulator."""
#: Group membership (sensitive attribute) 0 or 1
group: int = 0
#: Agent credit score
credit_score: int = 700
#: Running total of the banks profit/loss for the agent
profits: float = 0
class Intervention(BaseIntervention):
# pylint: disable-msg=too-few-public-methods
"""Parameterization of an intervention in the lending model.
Examples
--------
>>> # Change the group 0 threshold to 700
>>> Intervention(time=0, threshold_g0=700)
"""
def __init__(self, time=100, **kwargs):
"""Specify an intervention in the dynamical system.
Parameters
----------
time: int
Time of the intervention (days)
kwargs: dict
Only valid keyword arguments are parameters of Config.
"""
super(Intervention, self).__init__(Config, time, **kwargs)
def lending_policy(config, group, score):
"""Determine whether or not a bank gives a loan."""
# P(T = 1 | X, A=j) = 1 if X >= tau_j
# 0 otherwise.
return (score >= config.threshold_g0) ** (1 - group) * (
score >= config.threshold_g1
) ** group
def determine_repayment(rng, group, score):
"""Determine whether or not the agent repays the loan."""
repayment_rate = (
LOAN_REPAY_PROBS[0](score) ** (1 - group) * LOAN_REPAY_PROBS[1](score) ** group
)
# Sample a Bernoulli with the Gumbel-max trick to permit causal graph
# tracing
uniform = rng.uniform()
return (
np.log(repayment_rate / (1.0 - repayment_rate))
+ np.log(uniform / (1.0 - uniform))
) > 0.0
def update_score(config, score, loan_approved, repaid):
"""Update the agent's credit score after a lending interaction."""
score_change = (
config.repayment_score_change ** repaid ** loan_approved
* config.default_score_change ** (1 - repaid) ** loan_approved
* 0.0 ** (1 - loan_approved)
)
new_score = score + score_change
return np.minimum(np.maximum(new_score, config.min_score), config.max_score)
def update_profits(config, profits, loan_approved, repaid):
"""Update the running total bank profit for the individual."""
profit_change = (
config.repayment_utility ** repaid ** loan_approved
* config.default_utility ** (1 - repaid) ** loan_approved
* 0.0 ** (1 - loan_approved)
)
return profits + profit_change
def dynamics(state, time, config, intervention=None, rng=None):
"""Update equations for the lending simulaton.
Performs one round of interaction between the agent (represented
by the state) and the bank.
Parameters
----------
state: whynot.simulators.delayed_impact.State
Agent state at the beginning of the interaction.
time: int
Current round of interaction
config: whynot.simulators.delayed_impact.Config
Configuration object controlling the interaction, e.g. lending
threshold and credit scoring
intervention: whynot.simulators.delayed_impact.Intervention
Intervention object specifying when and how to update the dynamics.
rng: np.RandomState
Seed random number generator for all randomness (optional)
Returns
-------
state: whynot.simulators.delayed_impact.State
Agent state after one lending interaction.
"""
if intervention and time >= intervention.time:
config = config.update(intervention)
if rng is None:
rng = np.random.RandomState(None)
group, score, individual_profits = state
# Credit bureau measures the agent's score
measured_score = config.credit_scorer(score)
# Bank decides whether or not to extend the user a loan
loan_approved = lending_policy(config, group, measured_score)
# The user (potentially) repays the loan
repaid = determine_repayment(rng, group, score)
# The credit score updates in response
new_score = update_score(config, score, loan_approved, repaid)
new_profits = update_profits(config, individual_profits, loan_approved, repaid)
return group, new_score, new_profits
def simulate(initial_state, config, intervention=None, seed=None):
"""Simulate a run of the lending simulator.
The simulation starts at initial_state, representing an agent before
interacting with the lending institution. The simulator evolves the agent
state through (repeated) interaction between the agent and the lender. The
dynamics encapsulate how the lending decisions and policies effect both the
agent and the lender's profit. The parameters of the dynamics, e.g. the
lending thresholds or the repayment model, are specified in the Config.
Parameters
----------
initial_state: `whynot.simulators.delayed_impact.State`
Initial State object, which is used as x_{t_0} for the simulator.
config: `whynot.simulators.delayed_impact.Config`
Config object that encapsulates the parameters that define the dynamics.
intervention: `whynot.simulators.delayed_impact.Intervention`
Intervention object that specifies what, if any, intervention to perform.
seed: int
Seed to set internal randomness.
Returns
-------
run: `whynot.dynamics.Run`
Rollout of the model.
"""
rng = np.random.RandomState(seed)
# Iterate the discrete dynamics
times = [config.start_time]
states = [initial_state]
state = copy.deepcopy(initial_state)
for step in range(config.start_time, config.end_time):
next_state = dynamics(state.values(), step, config, intervention, rng)
state = State(*next_state)
states.append(state)
times.append(step + 1)
return wn.dynamics.Run(states=states, times=times)
if __name__ == "__main__":
print(simulate(State(), Config(end_time=20)))
|
[
"copy.deepcopy",
"whynot.traceable_numpy.maximum",
"whynot.dynamics.Run",
"os.path.realpath",
"whynot.traceable_numpy.random.RandomState",
"whynot.simulators.delayed_impact.fico.get_data_args",
"whynot.traceable_numpy.log"
] |
[((776, 799), 'whynot.simulators.delayed_impact.fico.get_data_args', 'get_FICO_data', (['DATAPATH'], {}), '(DATAPATH)\n', (789, 799), True, 'from whynot.simulators.delayed_impact.fico import get_data_args as get_FICO_data\n'), ((7453, 7480), 'whynot.traceable_numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7474, 7480), True, 'import whynot.traceable_numpy as np\n'), ((7591, 7619), 'copy.deepcopy', 'copy.deepcopy', (['initial_state'], {}), '(initial_state)\n', (7604, 7619), False, 'import copy\n'), ((7865, 7908), 'whynot.dynamics.Run', 'wn.dynamics.Run', ([], {'states': 'states', 'times': 'times'}), '(states=states, times=times)\n', (7880, 7908), True, 'import whynot as wn\n'), ((683, 709), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (699, 709), False, 'import os\n'), ((4057, 4096), 'whynot.traceable_numpy.maximum', 'np.maximum', (['new_score', 'config.min_score'], {}), '(new_score, config.min_score)\n', (4067, 4096), True, 'import whynot.traceable_numpy as np\n'), ((5588, 5615), 'whynot.traceable_numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (5609, 5615), True, 'import whynot.traceable_numpy as np\n'), ((3565, 3612), 'whynot.traceable_numpy.log', 'np.log', (['(repayment_rate / (1.0 - repayment_rate))'], {}), '(repayment_rate / (1.0 - repayment_rate))\n', (3571, 3612), True, 'import whynot.traceable_numpy as np\n'), ((3623, 3656), 'whynot.traceable_numpy.log', 'np.log', (['(uniform / (1.0 - uniform))'], {}), '(uniform / (1.0 - uniform))\n', (3629, 3656), True, 'import whynot.traceable_numpy as np\n')]
|
from maya import cmds, mel
import wave, struct
import os.path, math, array, time
from cmath import exp,pi
class WavReader:
"""
This class is the responsible of managing the way the wav files open and their information
"""
def __init__(self, filePath):
# Save the path to the file
self.fileName = filePath
# Open the wav file in read mode
self.waveFile = wave.open(filePath, "r")
# Get the file's information
self.frameRate = self.waveFile.getframerate() # The samples per second
self.nFrames = self.waveFile.getnframes() # The total amount of samples
self.volume = 2**(8*self.waveFile.getsampwidth()-1) - 1 # The max volume is equal to the max value that the sample can have
self.volume /= 1.0 # Conver to float
self.sizes = {1: 'B', 2: 'h', 3: 'i', 4: 'i'} # Different formatting depending on the amount of bytes
self.channels = self.waveFile.getnchannels() # The number of channels (mono / stereo)
self.fmt_size = self.sizes[self.waveFile.getsampwidth()] # The actual format size of the file using the bytes
self.fmt = "<" + self.fmt_size * self.channels # Build the format to use with struct unpack
def sampleFrequency(self, rate, actualTime, bands=7):
"""
This function samples the frequency of the file at a specific time
"""
# Calculate the positions where we want to sample
samples = 1.0 * self.frameRate / rate # The amount of samples per frame
startSample = samples * actualTime
endSample = startSample + samples
# Exit the function the endsample is greater than the last sample of the file
if endSample > self.nFrames:
return []
# Get the range of samples
soundWave = self.sampleRange(int(startSample), int(endSample))
# Get only the first channel
soundWave = [v[0] for v in soundWave]
# The amount of samples that we got
values_count = len(soundWave)
# Calculate the power of two that fits the amount of samples
log = math.log(values_count, 2)
# Get that power of two
finalSamples = 2 ** int(math.floor(log))
# Calculate the fourier transform of only the first -power of two- samples
spectrum = self.fft(soundWave[:finalSamples])
# Get only the real part of complex numbers
realSpectrum = [abs(v.real) for v in spectrum]
# Calculate linear spacing between the frequencies gotten
bandSample = 1.0 * finalSamples / (bands + 1)
# Get the values separated by the band spacing
sampledSpectrum = [realSpectrum[frame] for frame in xrange(0, len(realSpectrum), int(bandSample))]
# If the final result has less values than the bands desired, we return the last item on the spectrum
if len(sampledSpectrum) - 2 < bands:
sampledSpectrum.append(realSpectrum[-1])
# Return the middle values as first and last are (by experimenting) really high
return sampledSpectrum[1:-1]
def sampleRange(self, startFrame, endFrame):
"""
Returns the samples in the file between a certain range
"""
# Set the position of the "marker" in the file
self.waveFile.setpos(startFrame)
# Calculate the amount of samples that we want
samples = endFrame - startFrame
soundArray = []
# If the file's ampWidth is 3, means it is 24 bits
if self.waveFile.getsampwidth() == 3:
s = ''
for k in xrange(samples):
# Read a frame
fr = self.waveFile.readframes(1)
for c in xrange(0,3*self.channels,3):
s += '\0'+fr[c:(c+3)] # put TRAILING 0 to make 32-bit (file is little-endian)
# Unpack the resulting string
unpstr = '<{0}{1}'.format(samples*self.channels, 'i')
x = struct.unpack(unpstr, s)
# Move the value to get positives and negatives
x = [k >> 8 for k in x]
# Convert the result to a tuple to fit the formating of the other lists
result = tuple([value/self.volume for value in x])
# Make an array to finish the formatting
soundArray = [result[i:i + self.channels] for i in xrange(0, len(result), self.channels)]
return soundArray
# If the file is not 24 bits
for f in xrange(samples):
# Read a frame and unpack
frameValue = struct.unpack(self.fmt, self.waveFile.readframes(1))
# Convert to tuple
result = tuple([value/self.volume for value in frameValue])
# Append to the result array
soundArray.append(result)
return soundArray
def sampleStepped(self, rate):
"""
This function looks for samples in the sound that fit the spacing between frames
"""
# Move the file's "marker" to the beginning
self.waveFile.rewind()
soundArray = []
stepPerFrame = self.frameRate/rate
totalFrames = int(self.nFrames/stepPerFrame) + 1
if self.waveFile.getsampwidth() == 3:
s = ''
for sample in xrange(0, self.nFrames, stepPerFrame):
self.waveFile.setpos(sample)
fr = self.waveFile.readframes(1)
for c in xrange(0,3*self.channels,3):
s += '\0'+fr[c:(c+3)] # put TRAILING 0 to make 32-bit (file is little-endian)
unpstr = '<{0}{1}'.format(totalFrames*self.channels, 'i')
x = struct.unpack(unpstr, s)
x = [sample >> 8 for sample in x]
result = tuple([value/self.volume for value in x])
newArray = [result[i:i + self.channels] for i in xrange(0, len(result), self.channels)]
soundArray = newArray
return soundArray
for sample in xrange(0, self.nFrames, stepPerFrame):
self.waveFile.setpos(sample)
frameValue = struct.unpack(self.fmt, self.waveFile.readframes(1))
result = tuple([value/self.volume for value in frameValue])
soundArray.append(result)
return soundArray
def fft(self, values):
"""
Fast Fourier Transform algorithm
"""
values_count = len(values) # The amount of data that we will process
# We can only use a power of two amount so we can divide the list
if math.log(values_count, 2) % 1 > 0:
raise ValueError('values count must be a power of 2, "{}" given.'.format(values_count))
# E^x (this is the fourier's formula)
t = exp(-2 * pi * 1j / values_count)
# If there is more than one value on the list given
if values_count > 1:
# Recursively calculate the fourier transform
# First calculate FFT of even numbers, then go to odd numbers
# Append them together
values = self.fft(values[::2]) + self.fft(values[1::2])
# For every value in half of the values amount
for k in range(values_count // 2):
# Get the value
k_value = values[k]
# Apply the formula for Discrete Fourier Transform
# Calculate the value for k
values[k] = k_value + t ** k * values[k + values_count // 2]
# Calculate the value for the index in the other half
values[k + values_count // 2] = k_value - t ** k * values[k + values_count // 2]
return values
class MainUI():
"""
This class manages the UI creation and its functionality
"""
def __init__(self):
self.readersList = [] # List of the wav reader objects created
self.reader = None # The current wav reader being used
self.audioNode = "" # The audio node in the Maya scene
self.playBackSlider = mel.eval('$tmpVar=$gPlayBackSlider') # Maya's playback slider (to add sound on it)
self.valueMultiplier = 1 # The multiplier being applied to the wave values
self.analyzerMethod = "WaveForm" # The method to analyze the wav file
self.bandAmount = 4 # The amount of bands to divide the frequencies on spectrum mode
self.selectedBand = 1 # The selected band to animate the object
self.graph = "" # The UI component representing the graphs
self.mainLayout = "" # The layout that will keep the graphs
self.spectrumLayout = "" # UI elements to modify how to analyze with spectrum methos
# Create the window
self.MakeWin()
def MakeWin(self):
"""
Builds and shows the window
"""
windowName = "mainUI"
if cmds.window(windowName, query=True, exists=True):
cmds.deleteUI(windowName)
# Creating window
self.window = cmds.window(windowName, title="Music Animator", width=510)
# Creatin main layout that will contain everything
cmds.columnLayout(columnOffset=("both", 5))
allowedAreas = ['right', 'left']
if cmds.dockControl("MusicAnimator", query=True, exists=True):
cmds.deleteUI("MusicAnimator")
cmds.dockControl("MusicAnimator", area='right', content=windowName, allowedArea=allowedAreas )
# Space for the user to select the wav file
cmds.separator(height=10, style="none")
fileNameField = cmds.textFieldButtonGrp(label="Select wav file", buttonLabel="...", buttonCommand=lambda: self.OpenFile(fileNameField))
# Button for Applying the audio (this creates an audio node)
# and adds it to the timeline
cmds.separator(height=5, style="none")
cmds.rowLayout(numberOfColumns=2, adjustableColumn=1)
cmds.separator(width=410, style="none")
cmds.button(label="Apply audio", width = 80, command=lambda x: self.ApplyAudio(fileNameField, tracksMenu))
cmds.setParent("..")
# Track chooser (in case more than one audio is created)
cmds.separator(height=10, style="none")
tracksMenu = cmds.optionMenu(label="Select audio track: ", width=500, changeCommand=lambda x: self.ChangeTrack(tracksMenu, x))
# Creating spaces for adding objects and attributes
cmds.separator(height=10, style="none")
cmds.rowColumnLayout(numberOfColumns=5, columnWidth=[(1,100),(2,150),(3,10),(4,100),(5,150)])
cmds.button(label="Add Object(s)", command=lambda x: self.AddObj(OBJSelect))
OBJSelect=cmds.textScrollList(allowMultiSelection=True, selectCommand= lambda: self.selectObjectsOnScene(OBJSelect))
cmds.separator(width=10, style="none")
cmds.button(label="Add Attributes", command=lambda x: self.AddAttr(OBJSelect, AttrSelect))
AttrSelect=cmds.textScrollList(allowMultiSelection=True)
cmds.setParent("..")
# Button for resetting the scrollList
cmds.separator(height=5, style="none")
cmds.button(label="Reset Lists", width=100, command= lambda x: self.ResetScrollLists(OBJSelect, AttrSelect))
cmds.separator(height=10, style="none")
# Create option for audio methods
cmds.separator(height=5, style="none")
cmds.optionMenu(label="Select analyzing method: ", width=500, changeCommand=lambda x: self.ChangeAnalizer(x))
cmds.menuItem(label="WaveForm", annotation="Use the entire shape of the wave for animation")
cmds.menuItem(label="Spectrum", annotation="Use the state of the frequencies on each frame")
# Create Spectrum options
self.spectrumLayout = cmds.columnLayout()
cmds.separator(height=5, style="none")
cmds.intSliderGrp(label="Band Amount", value=3, minValue=4, maxValue=20, field=True,
annotation = "The amount of divisions in the frequencies",
statusBarMessage = "The amount of divisions in the frequencies",
changeCommand= lambda x: self.ChangeBandAmount(x, BandSelector))
cmds.separator(height=5, style="none")
BandSelector = cmds.intSliderGrp(label="Selected Band", value=3, minValue=1, maxValue=4, field=True,
annotation="The specific division to use",
statusBarMessage="The specific division to use",
changeCommand= lambda x: self.ChangeSelectedBand(x))
cmds.layout(self.spectrumLayout, edit=True, enable=False)
cmds.setParent("..")
# Creating multiplier for values
cmds.separator(height=5, style="none")
cmds.intSliderGrp(label="Value Multiplier", min=1, max=100, value=1,field=True, changeCommand= self.setMultiplier)
# Creating buttons for preview or animation
cmds.separator(height=5, style="none")
cmds.rowLayout(numberOfColumns=3, columnWidth=[(1,250),(2,10),(3,250)])
cmds.button(label="Preview", width=250, command=lambda x: self.PreviewAnim(OBJSelect, AttrSelect))
cmds.separator(width=10, style="none")
cmds.button(label="Animate", width=250, command=lambda x: self.SetKeys(OBJSelect, AttrSelect))
cmds.setParent("..")
# Separation for experiments
cmds.separator(height=5, style="none")
self.mainLayout = cmds.frameLayout(label="Analizing functions", labelIndent=1, width=510, collapsable=True, collapse=True,marginHeight=5)
cmds.text(label="Draw the shape of the wave:")
cmds.button(label="Draw Waveform", command= self.drawGraph)
cmds.separator(height=5, style="none")
cmds.text(label="Draw the spectrum of the frequencies on the current frame.")
cmds.text(label="Change the analyzing method above to modify the number of divisions.")
cmds.button(label="Draw Spectrum", command= self.drawSpectrum)
#cmds.showWindow(windowName)
def selectObjectsOnScene(self, ObjScroll, *args):
"""
This function selects the objects in the scene that the user pick on the scrollList
"""
currentItems = cmds.textScrollList(ObjScroll, query=True, selectUniqueTagItem=True)
cmds.select(currentItems, replace=True)
def ResetScrollLists(self, ObjScroll, AttrScroll, *args):
"""
Resets the lists of objects and attributes
"""
cmds.textScrollList(ObjScroll, edit=True, removeAll=True)
cmds.textScrollList(AttrScroll, edit=True, removeAll=True)
def ChangeAnalizer(self, method):
"""
Change how to analyze the wav file
"""
self.analyzerMethod = method
if method == "Spectrum":
# Enable the spectrum layout
cmds.layout(self.spectrumLayout, edit=True, enable=True)
else:
# Disable the spectrum layout
cmds.layout(self.spectrumLayout, edit=True, enable=False)
def ChangeBandAmount(self, amount, BandSelector, *args):
"""
Updates the amount of bands to analyze with spectrum mode
"""
self.bandAmount = amount
# Get the current value of selected band
currentValue = cmds.intSliderGrp(BandSelector, query=True, value=True)
# If the selected band is greater than the total amount of bands,
# change that value to the new maximum
if currentValue > amount:
cmds.intSliderGrp(BandSelector, edit=True, value=amount)
# Modify band selector to reflect the new MaxValue
cmds.intSliderGrp(BandSelector, edit=True, maxValue=amount)
def ChangeSelectedBand(self, band, *args):
"""
Updates the selected band used to animate objects
"""
self.selectedBand = band
def OpenFile(self, theTextField, *args):
"""
Opens a dialog to let the user select a wav file on their computer
"""
waveFile = cmds.fileDialog2(caption="Select wav file", fileFilter="*.wav", fileMode=1)
cmds.textFieldButtonGrp(theTextField, edit=True, text=waveFile[0])
def ApplyAudio(self, fileNameField, tracksMenu, *args):
"""
Applies the selected audio to the timeline and creates a wav reader
"""
# Get path from text field
audioPath = cmds.textFieldButtonGrp(fileNameField, query=True, text=True)
# Create new reader
newReader = WavReader(audioPath)
# Get base name from the path and remove the
songName = os.path.basename(audioPath).split('.')[0]
# Create audio node with the song name
audioNode = cmds.createNode("audio", name=songName)
# Add the song to the optionMenu
cmds.setParent(tracksMenu, menu=True)
cmds.menuItem(label=audioNode)
numberOfItems = cmds.optionMenu(tracksMenu, query=True, numberOfItems=True)
cmds.optionMenu(tracksMenu, edit=True, select=numberOfItems)
# Append reader to the reader list and make it the selected reader
self.readersList.append(newReader)
self.reader = self.readersList[-1]
# Put file name on audio node
cmds.setAttr("{}.filename".format(audioNode), audioPath, type="string")
# Put music on playBackSlider
cmds.timeControl(self.playBackSlider, edit=True, sound=audioNode, displaySound=True)
def ChangeTrack(self, tracksMenu, selectedTrack):
"""
Changes from one audio node to another
"""
# Set selected track as the audio in the playBackSlider
cmds.timeControl(self.playBackSlider, edit=True, sound=selectedTrack, displaySound=True)
# Get the index of this track
numberOfItems = cmds.optionMenu(tracksMenu, query=True, select=True)
# Set the wav reader
self.reader = self.readersList[numberOfItems-1]
def AddObj(self, scrollList, *args):
"""
Adds an object to the textScrollList
"""
# Get list of selected onbjects
selectedObjects = cmds.ls(selection=True, objectsOnly=True)
if not selectedObjects:
cmds.warning("Please select an object in the scene")
return
# Get current items on the scroll list
currentItems = cmds.textScrollList(scrollList, query=True, allItems=True)
itemsToAdd = selectedObjects
# If there are items selected on the scroll, only add those that were not previously added
if currentItems:
itemsToAdd = [item for item in selectedObjects if not item in currentItems]
cmds.textScrollList(scrollList, edit=True, append=itemsToAdd, uniqueTag=itemsToAdd)
def AddAttr(self, ObjScroll, scrollList, *args):
"""
Add attributes from the objects
"""
selectedObjects = cmds.textScrollList(ObjScroll, query=True, selectUniqueTagItem=True)
if not selectedObjects:
cmds.warning("Please add select an object from the Object Scroll List")
return
# Restart the attr scrollList
cmds.textScrollList(scrollList, edit=True, removeAll=True)
# Get the attrs in the first object
attrList = cmds.listAttr(selectedObjects[0], keyable=True, scalar=True)
resultList = []
# Avoid inserting the visibility attribute
if "visibility" in attrList:
attrList.remove("visibility")
# Loop trrough the rest of objects
for obj in selectedObjects:
objectAttrs = cmds.listAttr(obj, keyable=True, scalar=True)
# Add only those attributes that are in both list, this makes that only shared attributes are shown
attrList = [attr for attr in objectAttrs if attr in attrList]
# Append to scroll list
cmds.textScrollList(scrollList, edit=True, append=attrList, uniqueTag=attrList)
def PreviewAnim(self, ObjScroll, AttrScroll, *args):
"""
This function shows the user how the animation will look
"""
if not self.reader:
cmds.warning("Please apply an audio first")
return
# Get lists of objects and attrs
objList = cmds.textScrollList(ObjScroll, query=True, selectUniqueTagItem=True)
attrList = cmds.textScrollList(AttrScroll, query=True, selectUniqueTagItem=True)
if not objList:
cmds.warning("Please select at least one object in the Object scroll list")
return
if not attrList:
cmds.warning("Please select at least one attribute in the Attribute scroll list")
return
endFrame = int(cmds.playbackOptions(query=True, maxTime=True)) # The final frame on the timeslider
frameRate = mel.eval('currentTimeUnitToFPS()') # The fps of the scene
soundValues = []
# Dictionary containing the original attribute names
originalAttributes = {(objList*len(attrList))[x] + "." + attrList[x/len(objList)]:0 for x in xrange(len(objList)*len(attrList))}
# Get their values
for key in originalAttributes.keys():
originalAttributes[key] = cmds.getAttr(key)
# If user selects the waveform
if self.analyzerMethod == "WaveForm":
# Analyzes only by frames
soundValues = self.reader.sampleStepped(int(frameRate))
for frame in xrange(endFrame):
# Break the loop if the music is finished
if frame >= len(soundValues):
break
for obj in objList:
for attr in attrList:
# Move the time one frame
cmds.currentTime(frame+1)
# Get orinal attr's value
originalValue = originalAttributes[obj+"."+attr]
# Set the new value
cmds.setAttr(obj+"."+attr,soundValues[frame][0] * self.valueMultiplier + originalValue)
# Waits a little so the user can visualize the animation
time.sleep(.5/frameRate)
# Using spectrum mode
else:
for frame in xrange(endFrame):
# Sample the frequencies on this frame
soundValues = self.reader.sampleFrequency(frameRate, frame, self.bandAmount)
# Break the loop if the music is finished
if not soundValues:
break
for obj in objList:
for attr in attrList:
# Move the time one frame
cmds.currentTime(frame+1)
# Get orinal attr's value
originalValue = originalAttributes[obj+"."+attr]
# Set the new value
cmds.setAttr(obj+"."+attr, soundValues[self.selectedBand-1] * self.valueMultiplier + originalValue)
# Waits a little so the user can visualize the animation
time.sleep(.5/frameRate)
# Return to start of the time
cmds.currentTime(1)
# Return values to their original
for key in originalAttributes.keys():
cmds.setAttr(key, originalAttributes[key])
def SetKeys(self, ObjScroll, AttrScroll, *args):
"""
This function applies the animation from the music
"""
if not self.reader:
cmds.warning("Please apply an audio first")
return
objList = cmds.textScrollList(ObjScroll, query=True, selectUniqueTagItem=True)
attrList = cmds.textScrollList(AttrScroll, query=True, selectUniqueTagItem=True)
if not objList:
cmds.warning("Please select at least one object in the Object scroll list")
return
if not attrList:
cmds.warning("Please select at least one attribute in the Attribute scroll list")
return
endFrame = int(cmds.playbackOptions(query=True, maxTime=True))
frameRate = mel.eval('currentTimeUnitToFPS()')
secondsAmount = int(math.ceil(1.0*endFrame/frameRate))
soundValues = []
# Dictionary containing the original attribute names
originalAttributes = {(objList*len(attrList))[x] + "." + attrList[x/len(objList)]:0 for x in xrange(len(objList)*len(attrList))}
# Get their values
for key in originalAttributes.keys():
originalAttributes[key] = cmds.getAttr(key)
if self.analyzerMethod == "WaveForm":
soundValues = self.reader.sampleStepped(int(frameRate))
for frame in xrange(endFrame):
if frame >= len(soundValues):
break
for obj in objList:
for attr in attrList:
cmds.currentTime(frame+1)
originalValue = originalAttributes[obj+"."+attr]
cmds.setAttr(obj+"."+attr, soundValues[frame][0] * self.valueMultiplier + originalValue)
cmds.setKeyframe(obj+"."+attr)
else:
for frame in xrange(endFrame):
soundValues = self.reader.sampleFrequency(frameRate, frame, self.bandAmount)
if not soundValues:
break
for obj in objList:
for attr in attrList:
cmds.currentTime(frame+1)
originalValue = originalAttributes[obj+"."+attr]
cmds.setAttr(obj+"."+attr, soundValues[self.selectedBand-1] * self.valueMultiplier + originalValue)
cmds.setKeyframe(obj+"."+attr)
def setMultiplier(self, multiplier):
"""
Sets the valueMultiplier
"""
self.valueMultiplier = multiplier
def drawSpectrum(self, *args):
"""
This function draws the audio spectrum on the UI
"""
if not self.reader:
cmds.warning("Please apply an audio first")
return
frameRate = mel.eval('currentTimeUnitToFPS()')
values = self.reader.sampleFrequency(frameRate, cmds.currentTime(query=True), self.bandAmount)
if not values:
cmds.warning("End of file reached")
return
# Normalize value from 0 to 1 using the max value
norm = [float(i*self.valueMultiplier)/(max(values)*self.valueMultiplier) for i in values]
#norm = [float(i)/sum(values) for i in values]
curvePoints = ["{},{},".format(1.0*x/self.bandAmount, norm[x-1]) for x in xrange(self.bandAmount+1)]
curveString = ""
curveString = curveString.join(curvePoints)
curveString = curveString[:-1]
cmds.setParent(self.mainLayout)
cmds.deleteUI(self.graph)
self.graph = cmds.frameLayout(height=200, width=500, labelVisible=False)
cmds.falloffCurve(asString=curveString)
def drawGraph(self, *args):
"""
This function draws the soundWave on the UI
"""
if not self.reader:
cmds.warning("Please apply an audio first")
return
if cmds.objExists("AudioVisHelper"):
cmds.delete("AudioVisHelper")
visualizer = cmds.polySphere(name="AudioVisHelper")[0]
cmds.setAttr(visualizer + ".visibility", 0)
frameRate = mel.eval('currentTimeUnitToFPS()')
values = self.reader.sampleStepped(int(frameRate))
for f in xrange(len(values)):
cmds.currentTime(f)
cmds.setAttr("{}.translateY".format(visualizer), values[int(f)][0])
cmds.setKeyframe("{}.translateY".format(visualizer))
cmds.setParent(self.mainLayout)
cmds.deleteUI(self.graph)
self.graph = cmds.frameLayout(height=200, width=500, labelVisible=False)
cmds.animCurveEditor(autoFit=True, displayKeys=False, displayNormalized=True)
cmds.setParent("..")
theUI = MainUI()
|
[
"maya.cmds.timeControl",
"maya.cmds.deleteUI",
"maya.cmds.textFieldButtonGrp",
"maya.cmds.layout",
"maya.cmds.button",
"maya.cmds.createNode",
"maya.cmds.text",
"maya.cmds.intSliderGrp",
"maya.cmds.optionMenu",
"maya.cmds.menuItem",
"maya.cmds.columnLayout",
"maya.cmds.playbackOptions",
"maya.cmds.fileDialog2",
"maya.cmds.polySphere",
"maya.cmds.warning",
"maya.cmds.window",
"maya.cmds.setAttr",
"math.log",
"maya.cmds.falloffCurve",
"maya.cmds.setParent",
"maya.cmds.dockControl",
"math.ceil",
"maya.cmds.select",
"maya.cmds.rowLayout",
"struct.unpack",
"maya.cmds.getAttr",
"maya.cmds.delete",
"maya.cmds.separator",
"maya.cmds.ls",
"time.sleep",
"maya.cmds.listAttr",
"maya.cmds.objExists",
"maya.cmds.setKeyframe",
"cmath.exp",
"wave.open",
"maya.mel.eval",
"maya.cmds.frameLayout",
"maya.cmds.rowColumnLayout",
"maya.cmds.textScrollList",
"math.floor",
"maya.cmds.currentTime",
"maya.cmds.animCurveEditor"
] |
[((436, 460), 'wave.open', 'wave.open', (['filePath', '"""r"""'], {}), "(filePath, 'r')\n", (445, 460), False, 'import wave, struct\n'), ((2400, 2425), 'math.log', 'math.log', (['values_count', '(2)'], {}), '(values_count, 2)\n', (2408, 2425), False, 'import os.path, math, array, time\n'), ((7403, 7437), 'cmath.exp', 'exp', (['(-2 * pi * 1.0j / values_count)'], {}), '(-2 * pi * 1.0j / values_count)\n', (7406, 7437), False, 'from cmath import exp, pi\n'), ((8850, 8886), 'maya.mel.eval', 'mel.eval', (['"""$tmpVar=$gPlayBackSlider"""'], {}), "('$tmpVar=$gPlayBackSlider')\n", (8858, 8886), False, 'from maya import cmds, mel\n'), ((9992, 10040), 'maya.cmds.window', 'cmds.window', (['windowName'], {'query': '(True)', 'exists': '(True)'}), '(windowName, query=True, exists=True)\n', (10003, 10040), False, 'from maya import cmds, mel\n'), ((10141, 10199), 'maya.cmds.window', 'cmds.window', (['windowName'], {'title': '"""Music Animator"""', 'width': '(510)'}), "(windowName, title='Music Animator', width=510)\n", (10152, 10199), False, 'from maya import cmds, mel\n'), ((10289, 10332), 'maya.cmds.columnLayout', 'cmds.columnLayout', ([], {'columnOffset': "('both', 5)"}), "(columnOffset=('both', 5))\n", (10306, 10332), False, 'from maya import cmds, mel\n'), ((10397, 10455), 'maya.cmds.dockControl', 'cmds.dockControl', (['"""MusicAnimator"""'], {'query': '(True)', 'exists': '(True)'}), "('MusicAnimator', query=True, exists=True)\n", (10413, 10455), False, 'from maya import cmds, mel\n'), ((10524, 10621), 'maya.cmds.dockControl', 'cmds.dockControl', (['"""MusicAnimator"""'], {'area': '"""right"""', 'content': 'windowName', 'allowedArea': 'allowedAreas'}), "('MusicAnimator', area='right', content=windowName,\n allowedArea=allowedAreas)\n", (10540, 10621), False, 'from maya import cmds, mel\n'), ((10683, 10722), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(10)', 'style': '"""none"""'}), "(height=10, style='none')\n", (10697, 10722), False, 'from maya import cmds, mel\n'), ((10996, 11034), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (11010, 11034), False, 'from maya import cmds, mel\n'), ((11044, 11097), 'maya.cmds.rowLayout', 'cmds.rowLayout', ([], {'numberOfColumns': '(2)', 'adjustableColumn': '(1)'}), '(numberOfColumns=2, adjustableColumn=1)\n', (11058, 11097), False, 'from maya import cmds, mel\n'), ((11107, 11146), 'maya.cmds.separator', 'cmds.separator', ([], {'width': '(410)', 'style': '"""none"""'}), "(width=410, style='none')\n", (11121, 11146), False, 'from maya import cmds, mel\n'), ((11272, 11292), 'maya.cmds.setParent', 'cmds.setParent', (['""".."""'], {}), "('..')\n", (11286, 11292), False, 'from maya import cmds, mel\n'), ((11378, 11417), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(10)', 'style': '"""none"""'}), "(height=10, style='none')\n", (11392, 11417), False, 'from maya import cmds, mel\n'), ((11634, 11673), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(10)', 'style': '"""none"""'}), "(height=10, style='none')\n", (11648, 11673), False, 'from maya import cmds, mel\n'), ((11683, 11789), 'maya.cmds.rowColumnLayout', 'cmds.rowColumnLayout', ([], {'numberOfColumns': '(5)', 'columnWidth': '[(1, 100), (2, 150), (3, 10), (4, 100), (5, 150)]'}), '(numberOfColumns=5, columnWidth=[(1, 100), (2, 150), (3,\n 10), (4, 100), (5, 150)])\n', (11703, 11789), False, 'from maya import cmds, mel\n'), ((11998, 12036), 'maya.cmds.separator', 'cmds.separator', ([], {'width': '(10)', 'style': '"""none"""'}), "(width=10, style='none')\n", (12012, 12036), False, 'from maya import cmds, mel\n'), ((12157, 12202), 'maya.cmds.textScrollList', 'cmds.textScrollList', ([], {'allowMultiSelection': '(True)'}), '(allowMultiSelection=True)\n', (12176, 12202), False, 'from maya import cmds, mel\n'), ((12212, 12232), 'maya.cmds.setParent', 'cmds.setParent', (['""".."""'], {}), "('..')\n", (12226, 12232), False, 'from maya import cmds, mel\n'), ((12291, 12329), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (12305, 12329), False, 'from maya import cmds, mel\n'), ((12457, 12496), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(10)', 'style': '"""none"""'}), "(height=10, style='none')\n", (12471, 12496), False, 'from maya import cmds, mel\n'), ((12551, 12589), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (12565, 12589), False, 'from maya import cmds, mel\n'), ((12718, 12815), 'maya.cmds.menuItem', 'cmds.menuItem', ([], {'label': '"""WaveForm"""', 'annotation': '"""Use the entire shape of the wave for animation"""'}), "(label='WaveForm', annotation=\n 'Use the entire shape of the wave for animation')\n", (12731, 12815), False, 'from maya import cmds, mel\n'), ((12820, 12917), 'maya.cmds.menuItem', 'cmds.menuItem', ([], {'label': '"""Spectrum"""', 'annotation': '"""Use the state of the frequencies on each frame"""'}), "(label='Spectrum', annotation=\n 'Use the state of the frequencies on each frame')\n", (12833, 12917), False, 'from maya import cmds, mel\n'), ((12981, 13000), 'maya.cmds.columnLayout', 'cmds.columnLayout', ([], {}), '()\n', (12998, 13000), False, 'from maya import cmds, mel\n'), ((13010, 13048), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (13024, 13048), False, 'from maya import cmds, mel\n'), ((13418, 13456), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (13432, 13456), False, 'from maya import cmds, mel\n'), ((13798, 13855), 'maya.cmds.layout', 'cmds.layout', (['self.spectrumLayout'], {'edit': '(True)', 'enable': '(False)'}), '(self.spectrumLayout, edit=True, enable=False)\n', (13809, 13855), False, 'from maya import cmds, mel\n'), ((13865, 13885), 'maya.cmds.setParent', 'cmds.setParent', (['""".."""'], {}), "('..')\n", (13879, 13885), False, 'from maya import cmds, mel\n'), ((13947, 13985), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (13961, 13985), False, 'from maya import cmds, mel\n'), ((13995, 14114), 'maya.cmds.intSliderGrp', 'cmds.intSliderGrp', ([], {'label': '"""Value Multiplier"""', 'min': '(1)', 'max': '(100)', 'value': '(1)', 'field': '(True)', 'changeCommand': 'self.setMultiplier'}), "(label='Value Multiplier', min=1, max=100, value=1, field=\n True, changeCommand=self.setMultiplier)\n", (14012, 14114), False, 'from maya import cmds, mel\n'), ((14174, 14212), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (14188, 14212), False, 'from maya import cmds, mel\n'), ((14222, 14298), 'maya.cmds.rowLayout', 'cmds.rowLayout', ([], {'numberOfColumns': '(3)', 'columnWidth': '[(1, 250), (2, 10), (3, 250)]'}), '(numberOfColumns=3, columnWidth=[(1, 250), (2, 10), (3, 250)])\n', (14236, 14298), False, 'from maya import cmds, mel\n'), ((14411, 14449), 'maya.cmds.separator', 'cmds.separator', ([], {'width': '(10)', 'style': '"""none"""'}), "(width=10, style='none')\n", (14425, 14449), False, 'from maya import cmds, mel\n'), ((14563, 14583), 'maya.cmds.setParent', 'cmds.setParent', (['""".."""'], {}), "('..')\n", (14577, 14583), False, 'from maya import cmds, mel\n'), ((14641, 14679), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (14655, 14679), False, 'from maya import cmds, mel\n'), ((14707, 14831), 'maya.cmds.frameLayout', 'cmds.frameLayout', ([], {'label': '"""Analizing functions"""', 'labelIndent': '(1)', 'width': '(510)', 'collapsable': '(True)', 'collapse': '(True)', 'marginHeight': '(5)'}), "(label='Analizing functions', labelIndent=1, width=510,\n collapsable=True, collapse=True, marginHeight=5)\n", (14723, 14831), False, 'from maya import cmds, mel\n'), ((14836, 14882), 'maya.cmds.text', 'cmds.text', ([], {'label': '"""Draw the shape of the wave:"""'}), "(label='Draw the shape of the wave:')\n", (14845, 14882), False, 'from maya import cmds, mel\n'), ((14892, 14950), 'maya.cmds.button', 'cmds.button', ([], {'label': '"""Draw Waveform"""', 'command': 'self.drawGraph'}), "(label='Draw Waveform', command=self.drawGraph)\n", (14903, 14950), False, 'from maya import cmds, mel\n'), ((14963, 15001), 'maya.cmds.separator', 'cmds.separator', ([], {'height': '(5)', 'style': '"""none"""'}), "(height=5, style='none')\n", (14977, 15001), False, 'from maya import cmds, mel\n'), ((15011, 15088), 'maya.cmds.text', 'cmds.text', ([], {'label': '"""Draw the spectrum of the frequencies on the current frame."""'}), "(label='Draw the spectrum of the frequencies on the current frame.')\n", (15020, 15088), False, 'from maya import cmds, mel\n'), ((15098, 15190), 'maya.cmds.text', 'cmds.text', ([], {'label': '"""Change the analyzing method above to modify the number of divisions."""'}), "(label=\n 'Change the analyzing method above to modify the number of divisions.')\n", (15107, 15190), False, 'from maya import cmds, mel\n'), ((15195, 15256), 'maya.cmds.button', 'cmds.button', ([], {'label': '"""Draw Spectrum"""', 'command': 'self.drawSpectrum'}), "(label='Draw Spectrum', command=self.drawSpectrum)\n", (15206, 15256), False, 'from maya import cmds, mel\n'), ((15516, 15584), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['ObjScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(ObjScroll, query=True, selectUniqueTagItem=True)\n', (15535, 15584), False, 'from maya import cmds, mel\n'), ((15596, 15635), 'maya.cmds.select', 'cmds.select', (['currentItems'], {'replace': '(True)'}), '(currentItems, replace=True)\n', (15607, 15635), False, 'from maya import cmds, mel\n'), ((15788, 15845), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['ObjScroll'], {'edit': '(True)', 'removeAll': '(True)'}), '(ObjScroll, edit=True, removeAll=True)\n', (15807, 15845), False, 'from maya import cmds, mel\n'), ((15855, 15913), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['AttrScroll'], {'edit': '(True)', 'removeAll': '(True)'}), '(AttrScroll, edit=True, removeAll=True)\n', (15874, 15913), False, 'from maya import cmds, mel\n'), ((16613, 16668), 'maya.cmds.intSliderGrp', 'cmds.intSliderGrp', (['BandSelector'], {'query': '(True)', 'value': '(True)'}), '(BandSelector, query=True, value=True)\n', (16630, 16668), False, 'from maya import cmds, mel\n'), ((16971, 17030), 'maya.cmds.intSliderGrp', 'cmds.intSliderGrp', (['BandSelector'], {'edit': '(True)', 'maxValue': 'amount'}), '(BandSelector, edit=True, maxValue=amount)\n', (16988, 17030), False, 'from maya import cmds, mel\n'), ((17388, 17463), 'maya.cmds.fileDialog2', 'cmds.fileDialog2', ([], {'caption': '"""Select wav file"""', 'fileFilter': '"""*.wav"""', 'fileMode': '(1)'}), "(caption='Select wav file', fileFilter='*.wav', fileMode=1)\n", (17404, 17463), False, 'from maya import cmds, mel\n'), ((17483, 17549), 'maya.cmds.textFieldButtonGrp', 'cmds.textFieldButtonGrp', (['theTextField'], {'edit': '(True)', 'text': 'waveFile[0]'}), '(theTextField, edit=True, text=waveFile[0])\n', (17506, 17549), False, 'from maya import cmds, mel\n'), ((17781, 17842), 'maya.cmds.textFieldButtonGrp', 'cmds.textFieldButtonGrp', (['fileNameField'], {'query': '(True)', 'text': '(True)'}), '(fileNameField, query=True, text=True)\n', (17804, 17842), False, 'from maya import cmds, mel\n'), ((18130, 18169), 'maya.cmds.createNode', 'cmds.createNode', (['"""audio"""'], {'name': 'songName'}), "('audio', name=songName)\n", (18145, 18169), False, 'from maya import cmds, mel\n'), ((18223, 18260), 'maya.cmds.setParent', 'cmds.setParent', (['tracksMenu'], {'menu': '(True)'}), '(tracksMenu, menu=True)\n', (18237, 18260), False, 'from maya import cmds, mel\n'), ((18270, 18300), 'maya.cmds.menuItem', 'cmds.menuItem', ([], {'label': 'audioNode'}), '(label=audioNode)\n', (18283, 18300), False, 'from maya import cmds, mel\n'), ((18326, 18385), 'maya.cmds.optionMenu', 'cmds.optionMenu', (['tracksMenu'], {'query': '(True)', 'numberOfItems': '(True)'}), '(tracksMenu, query=True, numberOfItems=True)\n', (18341, 18385), False, 'from maya import cmds, mel\n'), ((18395, 18455), 'maya.cmds.optionMenu', 'cmds.optionMenu', (['tracksMenu'], {'edit': '(True)', 'select': 'numberOfItems'}), '(tracksMenu, edit=True, select=numberOfItems)\n', (18410, 18455), False, 'from maya import cmds, mel\n'), ((18818, 18906), 'maya.cmds.timeControl', 'cmds.timeControl', (['self.playBackSlider'], {'edit': '(True)', 'sound': 'audioNode', 'displaySound': '(True)'}), '(self.playBackSlider, edit=True, sound=audioNode,\n displaySound=True)\n', (18834, 18906), False, 'from maya import cmds, mel\n'), ((19116, 19208), 'maya.cmds.timeControl', 'cmds.timeControl', (['self.playBackSlider'], {'edit': '(True)', 'sound': 'selectedTrack', 'displaySound': '(True)'}), '(self.playBackSlider, edit=True, sound=selectedTrack,\n displaySound=True)\n', (19132, 19208), False, 'from maya import cmds, mel\n'), ((19279, 19331), 'maya.cmds.optionMenu', 'cmds.optionMenu', (['tracksMenu'], {'query': '(True)', 'select': '(True)'}), '(tracksMenu, query=True, select=True)\n', (19294, 19331), False, 'from maya import cmds, mel\n'), ((19613, 19654), 'maya.cmds.ls', 'cmds.ls', ([], {'selection': '(True)', 'objectsOnly': '(True)'}), '(selection=True, objectsOnly=True)\n', (19620, 19654), False, 'from maya import cmds, mel\n'), ((19859, 19917), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['scrollList'], {'query': '(True)', 'allItems': '(True)'}), '(scrollList, query=True, allItems=True)\n', (19878, 19917), False, 'from maya import cmds, mel\n'), ((20210, 20298), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['scrollList'], {'edit': '(True)', 'append': 'itemsToAdd', 'uniqueTag': 'itemsToAdd'}), '(scrollList, edit=True, append=itemsToAdd, uniqueTag=\n itemsToAdd)\n', (20229, 20298), False, 'from maya import cmds, mel\n'), ((20452, 20520), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['ObjScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(ObjScroll, query=True, selectUniqueTagItem=True)\n', (20471, 20520), False, 'from maya import cmds, mel\n'), ((20719, 20777), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['scrollList'], {'edit': '(True)', 'removeAll': '(True)'}), '(scrollList, edit=True, removeAll=True)\n', (20738, 20777), False, 'from maya import cmds, mel\n'), ((20853, 20913), 'maya.cmds.listAttr', 'cmds.listAttr', (['selectedObjects[0]'], {'keyable': '(True)', 'scalar': '(True)'}), '(selectedObjects[0], keyable=True, scalar=True)\n', (20866, 20913), False, 'from maya import cmds, mel\n'), ((21494, 21573), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['scrollList'], {'edit': '(True)', 'append': 'attrList', 'uniqueTag': 'attrList'}), '(scrollList, edit=True, append=attrList, uniqueTag=attrList)\n', (21513, 21573), False, 'from maya import cmds, mel\n'), ((21903, 21971), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['ObjScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(ObjScroll, query=True, selectUniqueTagItem=True)\n', (21922, 21971), False, 'from maya import cmds, mel\n'), ((21992, 22061), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['AttrScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(AttrScroll, query=True, selectUniqueTagItem=True)\n', (22011, 22061), False, 'from maya import cmds, mel\n'), ((22477, 22511), 'maya.mel.eval', 'mel.eval', (['"""currentTimeUnitToFPS()"""'], {}), "('currentTimeUnitToFPS()')\n", (22485, 22511), False, 'from maya import cmds, mel\n'), ((24963, 24982), 'maya.cmds.currentTime', 'cmds.currentTime', (['(1)'], {}), '(1)\n', (24979, 24982), False, 'from maya import cmds, mel\n'), ((25420, 25488), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['ObjScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(ObjScroll, query=True, selectUniqueTagItem=True)\n', (25439, 25488), False, 'from maya import cmds, mel\n'), ((25509, 25578), 'maya.cmds.textScrollList', 'cmds.textScrollList', (['AttrScroll'], {'query': '(True)', 'selectUniqueTagItem': '(True)'}), '(AttrScroll, query=True, selectUniqueTagItem=True)\n', (25528, 25578), False, 'from maya import cmds, mel\n'), ((25953, 25987), 'maya.mel.eval', 'mel.eval', (['"""currentTimeUnitToFPS()"""'], {}), "('currentTimeUnitToFPS()')\n", (25961, 25987), False, 'from maya import cmds, mel\n'), ((28112, 28146), 'maya.mel.eval', 'mel.eval', (['"""currentTimeUnitToFPS()"""'], {}), "('currentTimeUnitToFPS()')\n", (28120, 28146), False, 'from maya import cmds, mel\n'), ((28826, 28857), 'maya.cmds.setParent', 'cmds.setParent', (['self.mainLayout'], {}), '(self.mainLayout)\n', (28840, 28857), False, 'from maya import cmds, mel\n'), ((28867, 28892), 'maya.cmds.deleteUI', 'cmds.deleteUI', (['self.graph'], {}), '(self.graph)\n', (28880, 28892), False, 'from maya import cmds, mel\n'), ((28915, 28974), 'maya.cmds.frameLayout', 'cmds.frameLayout', ([], {'height': '(200)', 'width': '(500)', 'labelVisible': '(False)'}), '(height=200, width=500, labelVisible=False)\n', (28931, 28974), False, 'from maya import cmds, mel\n'), ((28986, 29025), 'maya.cmds.falloffCurve', 'cmds.falloffCurve', ([], {'asString': 'curveString'}), '(asString=curveString)\n', (29003, 29025), False, 'from maya import cmds, mel\n'), ((29270, 29302), 'maya.cmds.objExists', 'cmds.objExists', (['"""AudioVisHelper"""'], {}), "('AudioVisHelper')\n", (29284, 29302), False, 'from maya import cmds, mel\n'), ((29424, 29467), 'maya.cmds.setAttr', 'cmds.setAttr', (["(visualizer + '.visibility')", '(0)'], {}), "(visualizer + '.visibility', 0)\n", (29436, 29467), False, 'from maya import cmds, mel\n'), ((29491, 29525), 'maya.mel.eval', 'mel.eval', (['"""currentTimeUnitToFPS()"""'], {}), "('currentTimeUnitToFPS()')\n", (29499, 29525), False, 'from maya import cmds, mel\n'), ((29834, 29865), 'maya.cmds.setParent', 'cmds.setParent', (['self.mainLayout'], {}), '(self.mainLayout)\n', (29848, 29865), False, 'from maya import cmds, mel\n'), ((29875, 29900), 'maya.cmds.deleteUI', 'cmds.deleteUI', (['self.graph'], {}), '(self.graph)\n', (29888, 29900), False, 'from maya import cmds, mel\n'), ((29923, 29982), 'maya.cmds.frameLayout', 'cmds.frameLayout', ([], {'height': '(200)', 'width': '(500)', 'labelVisible': '(False)'}), '(height=200, width=500, labelVisible=False)\n', (29939, 29982), False, 'from maya import cmds, mel\n'), ((29992, 30069), 'maya.cmds.animCurveEditor', 'cmds.animCurveEditor', ([], {'autoFit': '(True)', 'displayKeys': '(False)', 'displayNormalized': '(True)'}), '(autoFit=True, displayKeys=False, displayNormalized=True)\n', (30012, 30069), False, 'from maya import cmds, mel\n'), ((30079, 30099), 'maya.cmds.setParent', 'cmds.setParent', (['""".."""'], {}), "('..')\n", (30093, 30099), False, 'from maya import cmds, mel\n'), ((4334, 4358), 'struct.unpack', 'struct.unpack', (['unpstr', 's'], {}), '(unpstr, s)\n', (4347, 4358), False, 'import wave, struct\n'), ((6194, 6218), 'struct.unpack', 'struct.unpack', (['unpstr', 's'], {}), '(unpstr, s)\n', (6207, 6218), False, 'import wave, struct\n'), ((10055, 10080), 'maya.cmds.deleteUI', 'cmds.deleteUI', (['windowName'], {}), '(windowName)\n', (10068, 10080), False, 'from maya import cmds, mel\n'), ((10470, 10500), 'maya.cmds.deleteUI', 'cmds.deleteUI', (['"""MusicAnimator"""'], {}), "('MusicAnimator')\n", (10483, 10500), False, 'from maya import cmds, mel\n'), ((16156, 16212), 'maya.cmds.layout', 'cmds.layout', (['self.spectrumLayout'], {'edit': '(True)', 'enable': '(True)'}), '(self.spectrumLayout, edit=True, enable=True)\n', (16167, 16212), False, 'from maya import cmds, mel\n'), ((16286, 16343), 'maya.cmds.layout', 'cmds.layout', (['self.spectrumLayout'], {'edit': '(True)', 'enable': '(False)'}), '(self.spectrumLayout, edit=True, enable=False)\n', (16297, 16343), False, 'from maya import cmds, mel\n'), ((16843, 16899), 'maya.cmds.intSliderGrp', 'cmds.intSliderGrp', (['BandSelector'], {'edit': '(True)', 'value': 'amount'}), '(BandSelector, edit=True, value=amount)\n', (16860, 16899), False, 'from maya import cmds, mel\n'), ((19703, 19755), 'maya.cmds.warning', 'cmds.warning', (['"""Please select an object in the scene"""'], {}), "('Please select an object in the scene')\n", (19715, 19755), False, 'from maya import cmds, mel\n'), ((20569, 20640), 'maya.cmds.warning', 'cmds.warning', (['"""Please add select an object from the Object Scroll List"""'], {}), "('Please add select an object from the Object Scroll List')\n", (20581, 20640), False, 'from maya import cmds, mel\n'), ((21206, 21251), 'maya.cmds.listAttr', 'cmds.listAttr', (['obj'], {'keyable': '(True)', 'scalar': '(True)'}), '(obj, keyable=True, scalar=True)\n', (21219, 21251), False, 'from maya import cmds, mel\n'), ((21776, 21819), 'maya.cmds.warning', 'cmds.warning', (['"""Please apply an audio first"""'], {}), "('Please apply an audio first')\n", (21788, 21819), False, 'from maya import cmds, mel\n'), ((22102, 22177), 'maya.cmds.warning', 'cmds.warning', (['"""Please select at least one object in the Object scroll list"""'], {}), "('Please select at least one object in the Object scroll list')\n", (22114, 22177), False, 'from maya import cmds, mel\n'), ((22239, 22325), 'maya.cmds.warning', 'cmds.warning', (['"""Please select at least one attribute in the Attribute scroll list"""'], {}), "(\n 'Please select at least one attribute in the Attribute scroll list')\n", (22251, 22325), False, 'from maya import cmds, mel\n'), ((22367, 22413), 'maya.cmds.playbackOptions', 'cmds.playbackOptions', ([], {'query': '(True)', 'maxTime': '(True)'}), '(query=True, maxTime=True)\n', (22387, 22413), False, 'from maya import cmds, mel\n'), ((22900, 22917), 'maya.cmds.getAttr', 'cmds.getAttr', (['key'], {}), '(key)\n', (22912, 22917), False, 'from maya import cmds, mel\n'), ((25088, 25130), 'maya.cmds.setAttr', 'cmds.setAttr', (['key', 'originalAttributes[key]'], {}), '(key, originalAttributes[key])\n', (25100, 25130), False, 'from maya import cmds, mel\n'), ((25335, 25378), 'maya.cmds.warning', 'cmds.warning', (['"""Please apply an audio first"""'], {}), "('Please apply an audio first')\n", (25347, 25378), False, 'from maya import cmds, mel\n'), ((25619, 25694), 'maya.cmds.warning', 'cmds.warning', (['"""Please select at least one object in the Object scroll list"""'], {}), "('Please select at least one object in the Object scroll list')\n", (25631, 25694), False, 'from maya import cmds, mel\n'), ((25756, 25842), 'maya.cmds.warning', 'cmds.warning', (['"""Please select at least one attribute in the Attribute scroll list"""'], {}), "(\n 'Please select at least one attribute in the Attribute scroll list')\n", (25768, 25842), False, 'from maya import cmds, mel\n'), ((25884, 25930), 'maya.cmds.playbackOptions', 'cmds.playbackOptions', ([], {'query': '(True)', 'maxTime': '(True)'}), '(query=True, maxTime=True)\n', (25904, 25930), False, 'from maya import cmds, mel\n'), ((26027, 26064), 'math.ceil', 'math.ceil', (['(1.0 * endFrame / frameRate)'], {}), '(1.0 * endFrame / frameRate)\n', (26036, 26064), False, 'import os.path, math, array, time\n'), ((26416, 26433), 'maya.cmds.getAttr', 'cmds.getAttr', (['key'], {}), '(key)\n', (26428, 26433), False, 'from maya import cmds, mel\n'), ((28025, 28068), 'maya.cmds.warning', 'cmds.warning', (['"""Please apply an audio first"""'], {}), "('Please apply an audio first')\n", (28037, 28068), False, 'from maya import cmds, mel\n'), ((28204, 28232), 'maya.cmds.currentTime', 'cmds.currentTime', ([], {'query': '(True)'}), '(query=True)\n', (28220, 28232), False, 'from maya import cmds, mel\n'), ((28290, 28325), 'maya.cmds.warning', 'cmds.warning', (['"""End of file reached"""'], {}), "('End of file reached')\n", (28302, 28325), False, 'from maya import cmds, mel\n'), ((29192, 29235), 'maya.cmds.warning', 'cmds.warning', (['"""Please apply an audio first"""'], {}), "('Please apply an audio first')\n", (29204, 29235), False, 'from maya import cmds, mel\n'), ((29317, 29346), 'maya.cmds.delete', 'cmds.delete', (['"""AudioVisHelper"""'], {}), "('AudioVisHelper')\n", (29328, 29346), False, 'from maya import cmds, mel\n'), ((29371, 29409), 'maya.cmds.polySphere', 'cmds.polySphere', ([], {'name': '"""AudioVisHelper"""'}), "(name='AudioVisHelper')\n", (29386, 29409), False, 'from maya import cmds, mel\n'), ((29648, 29667), 'maya.cmds.currentTime', 'cmds.currentTime', (['f'], {}), '(f)\n', (29664, 29667), False, 'from maya import cmds, mel\n'), ((2492, 2507), 'math.floor', 'math.floor', (['log'], {}), '(log)\n', (2502, 2507), False, 'import os.path, math, array, time\n'), ((7201, 7226), 'math.log', 'math.log', (['values_count', '(2)'], {}), '(values_count, 2)\n', (7209, 7226), False, 'import os.path, math, array, time\n'), ((23882, 23909), 'time.sleep', 'time.sleep', (['(0.5 / frameRate)'], {}), '(0.5 / frameRate)\n', (23892, 23909), False, 'import os.path, math, array, time\n'), ((24888, 24915), 'time.sleep', 'time.sleep', (['(0.5 / frameRate)'], {}), '(0.5 / frameRate)\n', (24898, 24915), False, 'import os.path, math, array, time\n'), ((23480, 23507), 'maya.cmds.currentTime', 'cmds.currentTime', (['(frame + 1)'], {}), '(frame + 1)\n', (23496, 23507), False, 'from maya import cmds, mel\n'), ((23701, 23797), 'maya.cmds.setAttr', 'cmds.setAttr', (["(obj + '.' + attr)", '(soundValues[frame][0] * self.valueMultiplier + originalValue)'], {}), "(obj + '.' + attr, soundValues[frame][0] * self.valueMultiplier +\n originalValue)\n", (23713, 23797), False, 'from maya import cmds, mel\n'), ((24458, 24485), 'maya.cmds.currentTime', 'cmds.currentTime', (['(frame + 1)'], {}), '(frame + 1)\n', (24474, 24485), False, 'from maya import cmds, mel\n'), ((24679, 24789), 'maya.cmds.setAttr', 'cmds.setAttr', (["(obj + '.' + attr)", '(soundValues[self.selectedBand - 1] * self.valueMultiplier + originalValue)'], {}), "(obj + '.' + attr, soundValues[self.selectedBand - 1] * self.\n valueMultiplier + originalValue)\n", (24691, 24789), False, 'from maya import cmds, mel\n'), ((26793, 26820), 'maya.cmds.currentTime', 'cmds.currentTime', (['(frame + 1)'], {}), '(frame + 1)\n', (26809, 26820), False, 'from maya import cmds, mel\n'), ((26918, 27014), 'maya.cmds.setAttr', 'cmds.setAttr', (["(obj + '.' + attr)", '(soundValues[frame][0] * self.valueMultiplier + originalValue)'], {}), "(obj + '.' + attr, soundValues[frame][0] * self.valueMultiplier +\n originalValue)\n", (26930, 27014), False, 'from maya import cmds, mel\n'), ((27032, 27066), 'maya.cmds.setKeyframe', 'cmds.setKeyframe', (["(obj + '.' + attr)"], {}), "(obj + '.' + attr)\n", (27048, 27066), False, 'from maya import cmds, mel\n'), ((27391, 27418), 'maya.cmds.currentTime', 'cmds.currentTime', (['(frame + 1)'], {}), '(frame + 1)\n', (27407, 27418), False, 'from maya import cmds, mel\n'), ((27516, 27626), 'maya.cmds.setAttr', 'cmds.setAttr', (["(obj + '.' + attr)", '(soundValues[self.selectedBand - 1] * self.valueMultiplier + originalValue)'], {}), "(obj + '.' + attr, soundValues[self.selectedBand - 1] * self.\n valueMultiplier + originalValue)\n", (27528, 27626), False, 'from maya import cmds, mel\n'), ((27641, 27675), 'maya.cmds.setKeyframe', 'cmds.setKeyframe', (["(obj + '.' + attr)"], {}), "(obj + '.' + attr)\n", (27657, 27675), False, 'from maya import cmds, mel\n')]
|
from unittest import mock
import tensorflow as tf
class RealTfModel:
def __init__(self, model):
self.model = model
self.input = tf.ones([1, 2]) * 1
self.y_true = [[9.]]
self.loss = tf.keras.losses.MeanSquaredError()
@classmethod
def create(cls):
ones_init = tf.keras.initializers.ones
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(3, input_dim=2, activation='linear', kernel_initializer=ones_init,
bias_initializer=ones_init),
tf.keras.layers.Dense(2, activation='linear', kernel_initializer=ones_init, bias_initializer=ones_init),
])
return cls(model)
def get(self):
return self.model
def backward(self):
with tf.GradientTape() as tape:
logits = self.model(self.input)
loss_val = self.loss(logits, self.y_true)
grads = tape.gradient(loss_val, self.model.trainable_weights)
return grads
class ModelMocker:
@staticmethod
def mock_layer(name, shape):
layer = mock.create_autospec(tf.keras.layers.Dense)
layer.name = name
kernel_weights = mock.create_autospec(tf.Variable)
kernel_weights.name = name + '/kernel:0'
kernel_weights.shape = shape
bias_weights = mock.create_autospec(tf.Variable)
bias_weights.name = name + '/bias:0'
bias_weights.shape = [shape[1]]
layer.weights = [kernel_weights, bias_weights]
return layer
@staticmethod
def mock_model():
model = mock.create_autospec(tf.keras.models.Sequential)
return model
|
[
"tensorflow.ones",
"unittest.mock.create_autospec",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.Dense",
"tensorflow.GradientTape"
] |
[((220, 254), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (252, 254), True, 'import tensorflow as tf\n'), ((1101, 1144), 'unittest.mock.create_autospec', 'mock.create_autospec', (['tf.keras.layers.Dense'], {}), '(tf.keras.layers.Dense)\n', (1121, 1144), False, 'from unittest import mock\n'), ((1196, 1229), 'unittest.mock.create_autospec', 'mock.create_autospec', (['tf.Variable'], {}), '(tf.Variable)\n', (1216, 1229), False, 'from unittest import mock\n'), ((1339, 1372), 'unittest.mock.create_autospec', 'mock.create_autospec', (['tf.Variable'], {}), '(tf.Variable)\n', (1359, 1372), False, 'from unittest import mock\n'), ((1591, 1639), 'unittest.mock.create_autospec', 'mock.create_autospec', (['tf.keras.models.Sequential'], {}), '(tf.keras.models.Sequential)\n', (1611, 1639), False, 'from unittest import mock\n'), ((151, 166), 'tensorflow.ones', 'tf.ones', (['[1, 2]'], {}), '([1, 2])\n', (158, 166), True, 'import tensorflow as tf\n'), ((788, 805), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (803, 805), True, 'import tensorflow as tf\n'), ((398, 518), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'input_dim': '(2)', 'activation': '"""linear"""', 'kernel_initializer': 'ones_init', 'bias_initializer': 'ones_init'}), "(3, input_dim=2, activation='linear',\n kernel_initializer=ones_init, bias_initializer=ones_init)\n", (419, 518), True, 'import tensorflow as tf\n'), ((562, 669), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""linear"""', 'kernel_initializer': 'ones_init', 'bias_initializer': 'ones_init'}), "(2, activation='linear', kernel_initializer=ones_init,\n bias_initializer=ones_init)\n", (583, 669), True, 'import tensorflow as tf\n')]
|
import json
from get_country_code import get_cc, get_continent
import networkx as nx
input_file = "24h_5k_users_followers_countries"
output_file = "graph_full"
total_entries = 0
with_country = 0
g = nx.DiGraph()
def increment_edge(n1, n2):
if n1 == n2:
return
increment_node(n1)
make_node(n2)
if not g.has_edge(n1, n2):
g.add_edge(n1, n2)
if 'weight' in g.edge[n1][n2]:
g.edge[n1][n2]['weight'] += 1
else:
g.edge[n1][n2]['weight'] = 1
def increment_node(name):
make_node(name)
if 'weight' in g.node[name]:
g.node[name]['weight'] += 1
else:
g.node[name]['weight'] = 1
def make_node(name):
if not g.has_node(name):
g.add_node(name)
if not 'continent' in g.node[name] and name != None:
g.node[name]['continent'] = get_continent(name)
for line in open("processed/"+input_file+".json"):
user = json.loads(line)
user_c = user['country']
for follower in user['followers']:
follower_c = follower['country']
increment_edge(user_c, follower_c)
nx.write_gexf(g, "graphs/"+output_file+".gexf")
|
[
"networkx.DiGraph",
"json.loads",
"networkx.write_gexf",
"get_country_code.get_continent"
] |
[((202, 214), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (212, 214), True, 'import networkx as nx\n'), ((1083, 1134), 'networkx.write_gexf', 'nx.write_gexf', (['g', "('graphs/' + output_file + '.gexf')"], {}), "(g, 'graphs/' + output_file + '.gexf')\n", (1096, 1134), True, 'import networkx as nx\n'), ((911, 927), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (921, 927), False, 'import json\n'), ((827, 846), 'get_country_code.get_continent', 'get_continent', (['name'], {}), '(name)\n', (840, 846), False, 'from get_country_code import get_cc, get_continent\n')]
|
"""Unit tests for instrupy.radiometer_model.
References: [1] Chapter 6,7 in "Microwave Radar and Radiometric Remote Sensing," <NAME> , <NAME> 2014
@TODO Include rectangular antenna tests
"""
import unittest
import json
import numpy as np
import sys, os
from instrupy.radiometer_model import PredetectionSectionParams, SystemParams
from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, \
BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, \
ScanTech, FixedScan, CrossTrackScan, ConicalScan
from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver
class TestTotalPowerRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
# See [1] Section 7-3.1 for the source of some of the receiver specs specified below. Section 7.5 lists a normalaized gain variation specs of 10^-2.
cls.tpr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 100e-3,' \
'"bandwidth": 10e6,' \
'"@id": 121}'
cls.tpr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 200,' \
'"predetectionGainVariation": 2000000,' \
'"integrationTime": 100e-3,' \
'"bandwidth": 10e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the total power radiometer system.
"""
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json)
self.assertIsInstance(o, TotalPowerRadiometerSystem)
self.assertEqual(o._id, 121)
self.assertEqual(o._type, "TotalPowerRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 100e-3)
self.assertEqual(o.bandwidth, 10e6)
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json)
self.assertIsInstance(o, TotalPowerRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "TotalPowerRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 200)
self.assertEqual(o.predetectionGainVariation, 2000000)
self.assertEqual(o.integrationTime, 100e-3)
self.assertEqual(o.bandwidth, 10e6)
def test_to_dict(self):
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': 121, '@type': 'TOTAL_POWER'}
)
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 200.0,
'predetectionGainVariation': 2000000.0, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': None, '@type': 'TOTAL_POWER'}
)
def test_compute_integration_time(self):
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=0.5), 0.5)
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=2), 1.5)
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=None), 1.5)
def test_compute_predetection_sec_params(self):
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=200, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 177827941.00389218)
self.assertAlmostEqual(x.G_p, 180510851.84124476)
self.assertAlmostEqual(x.G_m, 175171746.5823525)
self.assertAlmostEqual(x.T_REC_q, 261.1355769549698)
self.assertAlmostEqual(x.B, 10000000.0)
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=15e6, predetectionGain=90, predetectionGainVariation=10000000, predetectionInpNoiseTemp=300)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 1000000000)
self.assertAlmostEqual(x.G_p, 1005000000)
self.assertAlmostEqual(x.G_m, 995000000)
self.assertAlmostEqual(x.T_REC_q, 300)
self.assertAlmostEqual(x.B, 15000000.0)
# no RF amplifier
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=1, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=0, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=0, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 223872.1138568339)
self.assertAlmostEqual(x.G_p, 226119.10297269153)
self.assertAlmostEqual(x.G_m, 221636.34492551928)
self.assertAlmostEqual(x.T_REC_q, 1104.9756026018772)
self.assertAlmostEqual(x.B, 10000000.0)
def test_compute_system_params(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
pd_sec_params = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=200, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
G = 180000000
pd_sec_params = PredetectionSectionParams(G=G, G_p=G+0.01*G, G_m=G-0.01*G, T_REC_q=260, B=10e6)
x = TotalPowerRadiometerSystem.compute_system_params(antenna, pd_sec_params, integratorVoltageGain=1000, T_A_q=290)
self.assertIsInstance(x, SystemParams)
self.assertAlmostEqual(x.G_s_delta/x.G_s_bar, 0.02)
self.assertAlmostEqual(x.T_A, 286)
self.assertAlmostEqual(x.T_SYS, 546)
def test_compute_radiometric_resolution(self):
# system 1
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json) # note that these is 100ms integration time specification
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 16.676630237262927)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=600), 23.886384796495147)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=500e-3, antenna=antenna, T_A_q=300), 16.676630237262927)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=50e-3, antenna=antenna, T_A_q=300), 16.685867420640534)
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 350})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 17.15728054121174)
antenna = Antenna.from_dict({"radiationEfficiency": 0.5, "phyTemp": 270})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 16.406264441291718) # reduced radiantion-efficiency appears to make the radiometer more sensitive
# system 2
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json) # note that these is 100ms integration time specification
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 4.976310348842347)
class TestUnbalancedDikeRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.udr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"dickeSwitchOutputNoiseTemperature": 90,' \
'"referenceTemperature": 300,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.udr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"referenceTemperature": 300,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the unbalanced Dicke radiometer system.
"""
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertIsInstance(o, UnbalancedDikeRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "UnbalancedDikeRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.dickeSwitchOutputNoiseTemperature, 90)
self.assertEqual(o.referenceTemperature, 300)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertIsInstance(o, UnbalancedDikeRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "UnbalancedDikeRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertIsNone(o.dickeSwitchOutputNoiseTemperature)
self.assertEqual(o.referenceTemperature, 300)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'dickeSwitchOutputNoiseTemperature':90.0, 'referenceTemperature':300.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'UNBALANCED_DICKE'}
)
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'dickeSwitchOutputNoiseTemperature':None, 'referenceTemperature':300.0, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'UNBALANCED_DICKE'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 300})
#################################################### System 1 ####################################################
############# Test with T_A equal to the reference temperature #############
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.13022711539099396) # note that these is 1s integration time specification
#################################################### System 2 ####################################################
############# See Section 7-6, end of Pg. 282. for truth values for the below calculation. #############
############# Test with T_A equal to the reference temperature
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.2)
# Compare with total-power radiometer
# Initialize a total-power radiometer with the same specifications. Note that however the predetection noise temperature shall be lower
# for a total-power radiometer since it does not include the Dicke switch.
o = TotalPowerRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 10.000499987500632)
############# Test with T_A not equal to the reference temperature
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json) # note that these is 1s integration time specification
antenna = Antenna.from_dict({"radiationEfficiency": 1, "phyTemp": 300}) # setting efficiency to 100% to remove effect of antenna physical temperature
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 3.0049625621627984)
# Compare with total-power radiometer
# Initialize a total-power radiometer with the same specifications. Note that however the predetection noise temperature shall be lower
# for a total-power radiometer since it does not include the Dicke switch.
o = TotalPowerRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 7.000349991250442)
class TestBalancedDikeRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bdr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"dickeSwitchOutputNoiseTemperature": 90,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.bdr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the balanced Dicke radiometer system.
"""
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertIsInstance(o, BalancedDikeRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "BalancedDikeRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.dickeSwitchOutputNoiseTemperature, 90)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json)
self.assertIsInstance(o, BalancedDikeRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "BalancedDikeRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertIsNone(o.dickeSwitchOutputNoiseTemperature)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'dickeSwitchOutputNoiseTemperature':90.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'BALANCED_DICKE'}
)
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'dickeSwitchOutputNoiseTemperature':None, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'BALANCED_DICKE'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 1, "phyTemp": 300}) # setting efficiency to 100% to remove effect of antenna physical temperature
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 0.07022711539099395) # note that these is 1s integration time specification
############# Test with T_A not equal to the reference temperature
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json) # note that these is 1s integration time specification
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 0.14)
class TestNoiseAddingRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nar_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"excessNoiseTemperature": 1000,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.nar_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"excessNoiseTemperature": 10000,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the noise-adding radiometer system.
"""
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertIsInstance(o, NoiseAddingRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "NoiseAddingRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.excessNoiseTemperature, 1000)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertIsInstance(o, NoiseAddingRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "NoiseAddingRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertEqual(o.excessNoiseTemperature, 10000)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'excessNoiseTemperature':1000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'NOISE_ADDING'}
)
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'excessNoiseTemperature':10000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'NOISE_ADDING'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 300})
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.23817636968082867) # note that these is 1s integration time specification
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.24) # note that these is 1s integration time specification
class TestFixedScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the FixedScan object
"""
o = FixedScan.from_json('{"@id": 123}')
self.assertIsInstance(o, FixedScan)
self.assertEqual(o._id, 123)
self.assertEqual(o._type, "FixedScan")
o = FixedScan.from_json('{"@id": "abc"}')
self.assertIsInstance(o, FixedScan)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "FixedScan")
o = FixedScan.from_json('{}')
self.assertIsInstance(o, FixedScan)
self.assertIsNone(o._id)
self.assertEqual(o._type, "FixedScan")
def test_to_dict(self):
o = FixedScan.from_json('{"@id": 123}')
self.assertEqual(o.to_dict(), {'@id': 123, '@type': 'FIXED'})
o = FixedScan.from_json('{"@id": "abc"}')
self.assertEqual(o.to_dict(), {'@id': "abc", '@type': 'FIXED'})
o = FixedScan.from_json('{}')
self.assertEqual(o.to_dict(), {'@id': None, '@type': 'FIXED'})
def test_compute_instru_field_of_view(self):
o = FixedScan.from_json('{"@id": "abc"}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
instru_fov_sph_geom = antenna_fov_sph_geom
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 10, "angleWidth": 20})
instru_fov_sph_geom = antenna_fov_sph_geom
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
def test_compute_dwell_time_per_ground_pixel(self):
o = FixedScan.from_json('{"@id": 123}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=1000, sat_speed_kmps=7.8), 0.1282051282051282)
def test_compute_swath_width(self):
o = FixedScan.from_json('{"@id": 123}')
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*500, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*700, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=15, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*(500/np.cos(np.deg2rad(15))), delta=25)
class TestCrossTrackScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the CrossTrackScan object
"""
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertIsInstance(o, CrossTrackScan)
self.assertEqual(o._id, 123)
self.assertEqual(o._type, "CrossTrackScan")
self.assertEqual(o.scanWidth, 120)
self.assertEqual(o.interScanOverheadTime, 1e-3)
def test_to_dict(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertEqual(o.to_dict(), {'@id': 123, '@type': 'CROSS_TRACK', "scanWidth": 120.0, "interScanOverheadTime": 0.001})
def test_compute_instru_field_of_view(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
instru_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 30, "angleWidth": 150})
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 15, "angleWidth": 60})
instru_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 15, "angleWidth": 180})
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
def test_compute_dwell_time_per_ground_pixel(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021334188034188035)
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=10000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of double along-track pixel resolution
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=8), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of cross-track iFOV
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021034188034188037)
def test_compute_swath_width(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}')
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 1})
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 20*np.pi/180*500, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 20*np.pi/180*700, delta=25)
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 60*np.pi/180*500, delta=75)
class TestConicalScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the ConicalScan object
"""
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
self.assertIsInstance(o, ConicalScan)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "ConicalScan")
self.assertEqual(o.offNadirAngle, 30)
self.assertEqual(o.clockAngleRange, 60)
self.assertEqual(o.interScanOverheadTime, 1e-3)
def test_to_dict(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
self.assertEqual(o.to_dict(), {'@id': "abc", '@type': 'CONICAL', "offNadirAngle": 30.0, "clockAngleRange": 60.0, "interScanOverheadTime": 0.001})
def test_compute_instru_field_of_view(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
with self.assertRaises(NotImplementedError):
o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation)
def test_compute_dwell_time_per_ground_pixel(self):
# results are the same as that of the CrossTrackScan
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021334188034188035)
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=10000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of double along-track pixel resolution
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=8), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of cross-track iFOV
o = CrossTrackScan.from_json('{"scanWidth": 120, "interScanOverheadTime": 10e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021034188034188037)
def test_compute_swath_width(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}')
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0), 612.7169711748869)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0), 862.5336432436297)
with self.assertRaises(Exception):
o.compute_swath_width(alt_km=500, instru_look_angle_deg=30) # instrument look angle is not 0 degrees
class TestRadiometerModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.radio1_json = '{"@type": "Radiometer", "name": "ray1", "mass": 50, "volume": 3, "power": 10,' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},' \
' "bitsPerPixel": 16,' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 0.8, "phyTemp": 300},' \
' "system": {"tlLoss": 0.5, "tlPhyTemp": 290, ' \
' "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200, ' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200,' \
' "mixerGainVariation": 2, "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100,' \
' "ifAmpGainVariation": 10, "integratorVoltageGain": 1, "integrationTime": 100e-3,' \
' "bandwidth": 10e6, "@type": "TOTAL_POWER"},' \
' "scan": {"@type": "FIXED"},' \
' "targetBrightnessTemp": 345' \
'}'
cls.radio2_json = '{"@type": "Radiometer", "name": "ray2", "mass": 50, ' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "RECTANGULAR", "height": 1, "width": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 0.75, "phyTemp": 300},' \
' "system": { "predetectionGain": 83, "predetectionInpNoiseTemp": 700, ' \
' "predetectionGainVariation": 1995262.314968883, "integrationTime": 1, ' \
' "bandwidth": 100e6, "referenceTemperature": 300, "integratorVoltageGain": 1,' \
' "@type": "UNBALANCED_DICKE"},' \
' "scan": {"@type": "CROSS_TRACK", "scanWidth": 120, "interScanOverheadTime": 1e-3},' \
' "targetBrightnessTemp": 301' \
'}'
cls.radio3_json = '{"@type": "Radiometer", "@id": "ray3",' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},' \
' "bitsPerPixel": 16,' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 3.5, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 1, "phyTemp": 300},' \
' "system": { "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,' \
' "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "dickeSwitchOutputNoiseTemperature": 90,' \
' "integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "BALANCED_DICKE"},' \
' "scan": {"@type": "CONICAL", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3},' \
' "targetBrightnessTemp": 295' \
'}'
cls.radio4_json = '{"@type": "Radiometer", "@id": "ray4",' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-30},' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 1, "phyTemp": 300},' \
' "system": { "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,' \
' "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "excessNoiseTemperature": 1000,' \
' "integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "NOISE_ADDING"},' \
' "scan": {"@type": "FIXED"},' \
' "targetBrightnessTemp": 295' \
'}'
def test_from_json(self):
""" Test typical initializations of the RadiometerModel object
"""
o = RadiometerModel.from_json(self.radio1_json)
self.assertIsInstance(o, RadiometerModel)
self.assertIsNotNone(o._id)
self.assertEqual(o.name, "ray1")
self.assertEqual(o.mass, 50)
self.assertEqual(o.volume, 3)
self.assertEqual(o.power, 10)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertEqual(o.bitsPerPixel, 16)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 0.8, "phyTemp": 300}))
self.assertEqual(o.system, TotalPowerRadiometerSystem.from_dict({"tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200, "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10,
"integratorVoltageGain": 1, "integrationTime": 100e-3, "bandwidth": 10e6}))
self.assertEqual(o.scan, FixedScan.from_dict({}))
self.assertEqual(o.targetBrightnessTemp, 345)
o = RadiometerModel.from_json(self.radio2_json)
self.assertIsInstance(o, RadiometerModel)
self.assertIsNotNone(o._id)
self.assertEqual(o.name, "ray2")
self.assertEqual(o.mass, 50)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertIsNone(o.bitsPerPixel)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "RECTANGULAR", "height": 1, "width":1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 0.75, "phyTemp": 300}))
self.assertEqual(o.system, UnbalancedDikeRadiometerSystem.from_dict({ "predetectionGain": 83, "predetectionInpNoiseTemp": 700,
"predetectionGainVariation": 1995262.314968883, "integrationTime": 1,
"bandwidth": 100e6, "referenceTemperature": 300, "integratorVoltageGain": 1,
"@type": "UNBALANCED_DICKE"}))
self.assertEqual(o.scan, CrossTrackScan.from_dict({"scanWidth": 120, "interScanOverheadTime": 1e-3}))
self.assertEqual(o.targetBrightnessTemp, 301)
o = RadiometerModel.from_json(self.radio3_json)
self.assertIsInstance(o, RadiometerModel)
self.assertEqual(o._id, "ray3")
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertEqual(o.bitsPerPixel, 16)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 3.5, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 1, "phyTemp": 300}))
self.assertEqual(o.system, BalancedDikeRadiometerSystem.from_dict({ "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,
"rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "dickeSwitchOutputNoiseTemperature": 90,
"integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "BALANCED_DICKE"}))
self.assertEqual(o.scan, ConicalScan.from_dict({"offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}))
self.assertEqual(o.targetBrightnessTemp, 295)
o = RadiometerModel.from_json(self.radio4_json)
self.assertIsInstance(o, RadiometerModel)
self.assertEqual(o._id, "ray4")
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-30}))
self.assertIsNone(o.bitsPerPixel)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 1, "phyTemp": 300}))
self.assertEqual(o.system, NoiseAddingRadiometerSystem.from_dict({ "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,
"rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "excessNoiseTemperature": 1000,
"integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "NOISE_ADDING"}))
self.assertEqual(o.scan, FixedScan.from_dict({}))
self.assertEqual(o.targetBrightnessTemp, 295)
def test_to_dict(self):
o = RadiometerModel.from_json(self.radio1_json)
_id = o._id # id is generated randomly
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': 'ray1', 'mass': 50.0, 'volume': 3.0, 'power': 10.0,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None}, 'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': 16,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 1.0, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 0.8, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': None, '@type': 'TOTAL_POWER'},
'scan': {'@id': None, '@type': 'FIXED'}, 'targetBrightnessTemp': 345.0, '@id': _id})
o = RadiometerModel.from_json(self.radio2_json)
_id = o._id # id is generated randomly
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': 'ray2', 'mass': 50.0, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'RECTANGULAR', 'angleHeight': 12.092497171570107, 'angleWidth': 12.092497171570086, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'RECTANGULAR', 'angleHeight': 12.092497171570107, 'angleWidth': 12.092497171570086, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': None,
'antenna': {'shape': 'RECTANGULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': None, 'height': 1.0, 'width': 1.0, 'apertureEfficiency': None, 'radiationEfficiency': 0.75, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None,
'ifAmpGain': None, 'ifAmpInputNoiseTemp': None, 'ifAmpGainVariation': None,
'dickeSwitchOutputNoiseTemperature': None, 'referenceTemperature': 300.0,
'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0, 'predetectionGainVariation': 1995262.314968883,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'UNBALANCED_DICKE'},
'scan': {'scanWidth': 120.0, 'interScanOverheadTime': 0.001, '@id': None, '@type': 'CROSS_TRACK'},
'targetBrightnessTemp': 301.0, '@id': _id})
o = RadiometerModel.from_json(self.radio3_json)
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': None, 'mass': None, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 3.9261354453149697, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 3.9261354453149697, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': 16,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 3.5, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 1.0, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'dickeSwitchOutputNoiseTemperature': 90.0, 'integratorVoltageGain': 1.0,
'predetectionGain': None, 'predetectionInpNoiseTemp': None, 'predetectionGainVariation': None,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'BALANCED_DICKE'},
'scan': {'offNadirAngle': 30.0, 'clockAngleRange': 60.0, 'interScanOverheadTime': 0.001, '@id': None, '@type': 'CONICAL'},
'targetBrightnessTemp': 295.0, '@id': 'ray3'})
o = RadiometerModel.from_json(self.radio4_json)
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': None, 'mass': None, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 330.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': None,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 1.0, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 1.0, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'excessNoiseTemperature': 1000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None, 'predetectionGainVariation': None,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'NOISE_ADDING'},
'scan': {'@id': None, '@type': 'FIXED'}, 'targetBrightnessTemp': 295.0, '@id': 'ray4'})
def test_calc_data_metrics_1(self):
""" ``instru_look_angle_from_target_inc_angle`` flag is set to False."""
epoch_JDUT1 = 2458543.06088 # 2019 Feb 28 13:27:40 is time at which the ECEF and ECI frames approximately align, hence ECEF to ECI rotation is identity. See <https://www.celnav.de/longterm.htm> online calculator of GMST.
SpacecraftOrbitState = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 6878.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.6126, 'vz [km/s]': 0} # altitude 500 km
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 0}
o = RadiometerModel.from_json(self.radio1_json)
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 119917.0, 'ground pixel cross-track resolution [m]': 119917.02,
'swath-width [m]': 120565.56, 'sensitivity [K]': 17.94, 'incidence angle [deg]': 0.03, 'beam efficiency': np.nan})
o = RadiometerModel.from_json(self.radio2_json)
TargetCoords = {'lat [deg]': 5, 'lon [deg]': 0} # target pixel somewhere on the cross-track direction.
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 161269.89, 'ground pixel cross-track resolution [m]': 260072.11, 'swath-width [m]': 3159185.93,
'sensitivity [K]': 0.2, 'incidence angle [deg]': 51.68, 'beam efficiency': 0.24})
o = RadiometerModel.from_json(self.radio3_json)
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 2.62}
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
# calculated pixels resolutions are not accurate since the imaged pixel is not at side-looking geometry
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 40047.33, 'ground pixel cross-track resolution [m]': 47491.27,
'swath-width [m]': 306358.49, 'sensitivity [K]': 0.21, 'incidence angle [deg]': 32.51, 'beam efficiency': np.nan})
o = RadiometerModel.from_json(self.radio4_json)
TargetCoords = {'lat [deg]': -2.62, 'lon [deg]': 0}
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 140198.56, 'ground pixel cross-track resolution [m]': 166301.75,
'swath-width [m]': 168745.48, 'sensitivity [K]': 0.23, 'incidence angle [deg]': 32.54, 'beam efficiency': np.nan})
def test_calc_data_metrics_2(self):
""" ``instru_look_angle_from_target_inc_angle`` flag is set to True."""
epoch_JDUT1 = 2458543.06088 # 2019 Feb 28 13:27:40 is time at which the ECEF and ECI frames approximately align, hence ECEF to ECI rotation is identity. See <https://www.celnav.de/longterm.htm> online calculator of GMST.
SpacecraftOrbitState = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 6878.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.6126, 'vz [km/s]': 0} # altitude 500 km
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 0.5}
o = RadiometerModel.from_json(self.radio1_json)
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=True)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 120708.29, 'ground pixel cross-track resolution [m]': 121567.92,
'swath-width [m]': 122255.0, 'sensitivity [K]': 17.94, 'incidence angle [deg]': 6.82, 'beam efficiency': np.nan})
|
[
"instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_dict",
"instrupy.util.Antenna.from_dict",
"instrupy.radiometer_model.FixedScan.from_dict",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time",
"instrupy.radiometer_model.PredetectionSectionParams",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_system_params",
"instrupy.radiometer_model.ConicalScan.from_dict",
"instrupy.util.Orientation.from_dict",
"instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json",
"instrupy.radiometer_model.CrossTrackScan.from_dict",
"instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json",
"instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_dict",
"instrupy.util.ViewGeometry",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.from_dict",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json",
"instrupy.radiometer_model.ConicalScan.from_json",
"instrupy.radiometer_model.CrossTrackScan.from_json",
"instrupy.radiometer_model.FixedScan.from_json",
"instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json",
"numpy.deg2rad",
"instrupy.util.SphericalGeometry.from_dict",
"instrupy.radiometer_model.RadiometerModel.from_json",
"instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_dict"
] |
[((2357, 2413), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (2393, 2413), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((3412, 3468), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (3448, 3468), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((4468, 4524), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (4504, 4524), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((5200, 5256), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (5236, 5256), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6406, 6725), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(30)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(10)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(200)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10,\n mixerGainVariation=2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=200,\n mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)\n', (6464, 6725), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((7144, 7332), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(15000000.0)', 'predetectionGain': '(90)', 'predetectionGainVariation': '(10000000)', 'predetectionInpNoiseTemp': '(300)'}), '(\n predetectionBandwidth=15000000.0, predetectionGain=90,\n predetectionGainVariation=10000000, predetectionInpNoiseTemp=300)\n', (7202, 7332), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((7659, 7975), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(1)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(0)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(0)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 1, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=0, mixerGainVariation\n =2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=0, mixerInpNoiseTemp=1200,\n ifAmpInputNoiseTemp=100)\n', (7717, 7975), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((8474, 8537), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (8491, 8537), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((8562, 8881), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(30)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(10)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(200)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10,\n mixerGainVariation=2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=200,\n mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)\n', (8620, 8881), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((8993, 9091), 'instrupy.radiometer_model.PredetectionSectionParams', 'PredetectionSectionParams', ([], {'G': 'G', 'G_p': '(G + 0.01 * G)', 'G_m': '(G - 0.01 * G)', 'T_REC_q': '(260)', 'B': '(10000000.0)'}), '(G=G, G_p=G + 0.01 * G, G_m=G - 0.01 * G, T_REC_q=\n 260, B=10000000.0)\n', (9018, 9091), False, 'from instrupy.radiometer_model import PredetectionSectionParams, SystemParams\n'), ((9085, 9200), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_system_params', 'TotalPowerRadiometerSystem.compute_system_params', (['antenna', 'pd_sec_params'], {'integratorVoltageGain': '(1000)', 'T_A_q': '(290)'}), '(antenna, pd_sec_params,\n integratorVoltageGain=1000, T_A_q=290)\n', (9133, 9200), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((9486, 9549), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (9503, 9549), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((9563, 9619), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (9599, 9619), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((10202, 10265), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 350}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 350})\n", (10219, 10265), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((10417, 10480), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.5, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.5, 'phyTemp': 270})\n", (10434, 10480), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((10723, 10779), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (10759, 10779), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((10856, 10919), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (10873, 10919), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((12737, 12797), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (12777, 12797), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((13922, 13982), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (13962, 13982), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((15113, 15173), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (15153, 15173), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((15928, 15988), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (15968, 15988), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((16815, 16878), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 300})\n", (16832, 16878), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((17101, 17161), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (17141, 17161), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((17670, 17730), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (17710, 17730), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18124, 18180), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (18160, 18180), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18397, 18457), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (18437, 18457), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18531, 18592), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 1, 'phyTemp': 300})\n", (18548, 18592), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((19076, 19132), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (19112, 19132), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((20814, 20872), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (20852, 20872), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((21939, 21997), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (21977, 21997), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((23070, 23128), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (23108, 23128), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((23851, 23909), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (23889, 23909), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((24704, 24765), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 1, 'phyTemp': 300})\n", (24721, 24765), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((24857, 24915), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (24895, 24915), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((25189, 25247), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (25227, 25247), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((27032, 27089), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (27069, 27089), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((28145, 28202), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (28182, 28202), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((29268, 29325), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (29305, 29325), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((30037, 30094), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (30074, 30094), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((30875, 30938), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 300})\n", (30892, 30938), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((30952, 31009), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (30989, 31009), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31201, 31258), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (31238, 31258), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31588, 31623), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (31607, 31623), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31767, 31804), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (31786, 31804), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31950, 31975), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{}"""'], {}), "('{}')\n", (31969, 31975), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32147, 32182), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (32166, 32182), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32266, 32303), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (32285, 32303), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32389, 32414), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{}"""'], {}), "('{}')\n", (32408, 32414), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32552, 32589), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (32571, 32589), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32619, 32729), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (32640, 32729), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((32756, 32822), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (32783, 32822), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33115, 33209), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 10, 'angleWidth': 20}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 10,\n 'angleWidth': 20})\n", (33142, 33209), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33535, 33570), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (33554, 33570), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33754, 33789), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (33773, 33789), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33821, 33887), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (33848, 33887), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((34674, 34768), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (34698, 34768), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35050, 35144), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (35074, 35144), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35335, 35429), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (35359, 35429), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35455, 35565), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (35476, 35565), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35592, 35658), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (35619, 35658), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35689, 35784), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 30, 'angleWidth': 150}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 30,\n 'angleWidth': 150})\n", (35716, 35784), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36022, 36116), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 15, 'angleWidth': 60}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 15,\n 'angleWidth': 60})\n", (36049, 36116), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36143, 36238), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 15, 'angleWidth': 180}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 15,\n 'angleWidth': 180})\n", (36170, 36238), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36513, 36607), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (36537, 36607), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37227, 37322), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}\')\n', (37251, 37322), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37515, 37608), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}\')\n', (37539, 37608), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37636, 37701), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1})\n", (37663, 37701), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((38140, 38233), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}\')\n', (38164, 38233), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((38562, 38686), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (38583, 38686), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39009, 39133), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (39030, 39133), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39345, 39469), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (39366, 39469), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39497, 39607), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (39518, 39607), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((39634, 39700), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (39661, 39700), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((40010, 40135), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}\'\n )\n', (40031, 40135), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((40742, 40820), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"scanWidth": 120, "interScanOverheadTime": 10e-3}"""'], {}), '(\'{"scanWidth": 120, "interScanOverheadTime": 10e-3}\')\n', (40766, 40820), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((41018, 41143), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}\'\n )\n', (41039, 41143), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((46556, 46599), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (46581, 46599), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((47916, 47959), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (47941, 47959), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49338, 49381), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (49363, 49381), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((50915, 50958), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (50940, 50958), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((52445, 52488), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (52470, 52488), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((54435, 54478), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (54460, 54478), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((56790, 56833), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (56815, 56833), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((58848, 58891), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (58873, 58891), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((61463, 61506), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (61488, 61506), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((61987, 62030), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (62012, 62030), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((62621, 62664), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (62646, 62664), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((63329, 63372), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (63354, 63372), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((64525, 64568), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (64550, 64568), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6013, 6103), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': '(0.5)'}), '(td=1.5,\n integration_time_spec=0.5)\n', (6064, 6103), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6131, 6219), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': '(2)'}), '(td=1.5,\n integration_time_spec=2)\n', (6182, 6219), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6247, 6338), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': 'None'}), '(td=1.5,\n integration_time_spec=None)\n', (6298, 6338), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33013, 33081), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (33025, 33081), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33396, 33464), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (33408, 33464), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35920, 35988), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (35932, 35988), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36374, 36442), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (36386, 36442), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((46880, 46977), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (46901, 46977), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((47111, 47258), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1, 'apertureExcitationProfile': 'UNIFORM',\n 'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.8,\n 'phyTemp': 300})\n", (47128, 47258), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((47287, 47665), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_dict', 'TotalPowerRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'integratorVoltageGain': 1, 'integrationTime':\n 0.1, 'bandwidth': 10000000.0}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'integratorVoltageGain': 1, 'integrationTime': 0.1, 'bandwidth': \n 10000000.0})\n", (47323, 47665), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((47824, 47847), 'instrupy.radiometer_model.FixedScan.from_dict', 'FixedScan.from_dict', (['{}'], {}), '({})\n', (47843, 47847), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((48235, 48332), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (48256, 48332), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((48463, 48624), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'RECTANGULAR', 'height': 1, 'width': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.75,\n 'phyTemp': 300}"], {}), "({'shape': 'RECTANGULAR', 'height': 1, 'width': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.75,\n 'phyTemp': 300})\n", (48480, 48624), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((48652, 48950), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_dict', 'UnbalancedDikeRadiometerSystem.from_dict', (["{'predetectionGain': 83, 'predetectionInpNoiseTemp': 700,\n 'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1,\n 'bandwidth': 100000000.0, 'referenceTemperature': 300,\n 'integratorVoltageGain': 1, '@type': 'UNBALANCED_DICKE'}"], {}), "({'predetectionGain': 83,\n 'predetectionInpNoiseTemp': 700, 'predetectionGainVariation': \n 1995262.314968883, 'integrationTime': 1, 'bandwidth': 100000000.0,\n 'referenceTemperature': 300, 'integratorVoltageGain': 1, '@type':\n 'UNBALANCED_DICKE'})\n", (48692, 48950), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49185, 49261), 'instrupy.radiometer_model.CrossTrackScan.from_dict', 'CrossTrackScan.from_dict', (["{'scanWidth': 120, 'interScanOverheadTime': 0.001}"], {}), "({'scanWidth': 120, 'interScanOverheadTime': 0.001})\n", (49209, 49261), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49651, 49748), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (49672, 49748), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((49882, 50029), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 3.5, 'apertureExcitationProfile':\n 'UNIFORM', 'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 3.5,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 1,\n 'phyTemp': 300})\n", (49899, 50029), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((50058, 50504), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_dict', 'BalancedDikeRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'dickeSwitchOutputNoiseTemperature': 90,\n 'integratorVoltageGain': 1, 'integrationTime': 1, 'bandwidth': \n 100000000.0, '@type': 'BALANCED_DICKE'}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'dickeSwitchOutputNoiseTemperature': 90, 'integratorVoltageGain': 1,\n 'integrationTime': 1, 'bandwidth': 100000000.0, '@type': 'BALANCED_DICKE'})\n", (50096, 50504), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((50748, 50851), 'instrupy.radiometer_model.ConicalScan.from_dict', 'ConicalScan.from_dict', (["{'offNadirAngle': 30, 'clockAngleRange': 60, 'interScanOverheadTime': 0.001}"], {}), "({'offNadirAngle': 30, 'clockAngleRange': 60,\n 'interScanOverheadTime': 0.001})\n", (50769, 50851), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((51228, 51339), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': -30}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': -30})\n", (51249, 51339), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((51469, 51614), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1, 'apertureExcitationProfile': 'UNIFORM',\n 'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 1,\n 'phyTemp': 300})\n", (51486, 51614), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((51643, 52077), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_dict', 'NoiseAddingRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'excessNoiseTemperature': 1000,\n 'integratorVoltageGain': 1, 'integrationTime': 1, 'bandwidth': \n 100000000.0, '@type': 'NOISE_ADDING'}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'excessNoiseTemperature': 1000, 'integratorVoltageGain': 1,\n 'integrationTime': 1, 'bandwidth': 100000000.0, '@type': 'NOISE_ADDING'})\n", (51680, 52077), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((52321, 52344), 'instrupy.radiometer_model.FixedScan.from_dict', 'FixedScan.from_dict', (['{}'], {}), '({})\n', (52340, 52344), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((34471, 34485), 'numpy.deg2rad', 'np.deg2rad', (['(15)'], {}), '(15)\n', (34481, 34485), True, 'import numpy as np\n')]
|
from copy import deepcopy
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.conf import settings
from django.test import TestCase, override_settings
from rest_framework.test import APITestCase
from formidable.models import Formidable
from formidable.views import check_callback_configuration
from . import form_data, form_data_items
from unittest.mock import patch
CALLBACK = 'demo.callback_save'
CALLBACK_EXCEPTION = 'demo.callback_exception'
class CreateFormTestCase(APITestCase):
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK,
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK
)
def test_do_no_call_on_get(self):
with patch(CALLBACK) as patched_callback:
res = self.client.get(
reverse('formidable:form_create')
)
self.assertEqual(res.status_code, 405)
# No call on GET
self.assertEqual(patched_callback.call_count, 0)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK)
def test_create_no_error_post(self):
with patch(CALLBACK) as patched_callback:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK)
def test_create_error_post(self):
with patch(CALLBACK) as patched_callback:
form_data_without_items = deepcopy(form_data_items)
form_data_without_items['fields'][0].pop('items')
res = self.client.post(
reverse('formidable:form_create'), form_data_without_items,
format='json'
)
self.assertEquals(res.status_code, 422)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_create_exception(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_create_exception_logger(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
with patch('formidable.views.logger.error') as logger_error:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(logger_error.call_count, 1)
@override_settings(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existent')
def test_create_callback_is_non_existent(self):
# A non-existing module is treated separately.
with patch('formidable.views.logger.error') as logger_error:
res = self.client.post(
reverse('formidable:form_create'), form_data, format='json'
)
self.assertEqual(res.status_code, 201)
self.assertEqual(logger_error.call_count, 1)
class UpdateFormTestCase(APITestCase):
def setUp(self):
super().setUp()
self.form = Formidable.objects.create(
label='test', description='test'
)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK,
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK
)
def test_do_no_call_on_get(self):
with patch(CALLBACK) as patched_callback:
res = self.client.get(
reverse('formidable:form_detail', args=[self.form.id])
)
self.assertEqual(res.status_code, 200)
# No call on GET
self.assertEqual(patched_callback.call_count, 0)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK)
def test_update_no_error_post(self):
with patch(CALLBACK) as patched_callback:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK)
def test_update_error_post(self):
with patch(CALLBACK) as patched_callback:
form_data_without_items = deepcopy(form_data_items)
form_data_without_items['fields'][0].pop('items')
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data_without_items, format='json'
)
self.assertEquals(res.status_code, 422)
self.assertEqual(patched_callback.call_count, 1)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_update_exception(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION
)
def test_update_exception_logger(self):
# The called function raises an error, but the treatment proceeds
# as if nothing has happened
with patch('formidable.views.logger.error') as logger_error:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(logger_error.call_count, 1)
@override_settings(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existent')
def test_update_callback_is_non_existent(self):
# A non-existing module is treated separately.
with patch('formidable.views.logger.error') as logger_error:
res = self.client.put(
reverse('formidable:form_detail', args=[self.form.id]),
form_data, format='json'
)
self.assertEqual(res.status_code, 200)
self.assertEqual(logger_error.call_count, 1)
class ConfigurationLoadingTestCases(TestCase):
@override_settings()
def test_all_deleted(self):
del settings.FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS
del settings.FORMIDABLE_POST_UPDATE_CALLBACK_FAIL
del settings.FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS
del settings.FORMIDABLE_POST_CREATE_CALLBACK_FAIL
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=None,
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=None,
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=None,
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=None
)
def test_all_none(self):
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='',
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='',
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='',
FORMIDABLE_POST_CREATE_CALLBACK_FAIL=''
)
def test_all_empty(self):
self.assertTrue(check_callback_configuration())
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existing',
)
def test_update_success_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='non.existing',
)
def test_update_fail_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existing',
)
def test_create_success_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
@override_settings(
FORMIDABLE_POST_CREATE_CALLBACK_FAIL='non.existing',
)
def test_create_fail_unknown(self):
with self.assertRaises(ImproperlyConfigured):
check_callback_configuration()
|
[
"copy.deepcopy",
"formidable.models.Formidable.objects.create",
"formidable.views.check_callback_configuration",
"unittest.mock.patch",
"django.urls.reverse",
"django.test.override_settings"
] |
[((546, 664), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': 'CALLBACK', 'FORMIDABLE_POST_CREATE_CALLBACK_FAIL': 'CALLBACK'}), '(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK,\n FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK)\n', (563, 664), False, 'from django.test import TestCase, override_settings\n'), ((1017, 1084), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': 'CALLBACK'}), '(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK)\n', (1034, 1084), False, 'from django.test import TestCase, override_settings\n'), ((1420, 1484), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_FAIL': 'CALLBACK'}), '(FORMIDABLE_POST_CREATE_CALLBACK_FAIL=CALLBACK)\n', (1437, 1484), False, 'from django.test import TestCase, override_settings\n'), ((1975, 2052), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': 'CALLBACK_EXCEPTION'}), '(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION)\n', (1992, 2052), False, 'from django.test import TestCase, override_settings\n'), ((2382, 2459), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': 'CALLBACK_EXCEPTION'}), '(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION)\n', (2399, 2459), False, 'from django.test import TestCase, override_settings\n'), ((2938, 3011), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': '"""non.existent"""'}), "(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existent')\n", (2955, 3011), False, 'from django.test import TestCase, override_settings\n'), ((3617, 3735), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': 'CALLBACK', 'FORMIDABLE_POST_UPDATE_CALLBACK_FAIL': 'CALLBACK'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK,\n FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK)\n', (3634, 3735), False, 'from django.test import TestCase, override_settings\n'), ((4109, 4176), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': 'CALLBACK'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK)\n', (4126, 4176), False, 'from django.test import TestCase, override_settings\n'), ((4548, 4612), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_FAIL': 'CALLBACK'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=CALLBACK)\n', (4565, 4612), False, 'from django.test import TestCase, override_settings\n'), ((5123, 5200), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': 'CALLBACK_EXCEPTION'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION)\n', (5140, 5200), False, 'from django.test import TestCase, override_settings\n'), ((5562, 5639), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': 'CALLBACK_EXCEPTION'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=CALLBACK_EXCEPTION)\n', (5579, 5639), False, 'from django.test import TestCase, override_settings\n'), ((6154, 6227), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': '"""non.existent"""'}), "(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existent')\n", (6171, 6227), False, 'from django.test import TestCase, override_settings\n'), ((6729, 6748), 'django.test.override_settings', 'override_settings', ([], {}), '()\n', (6746, 6748), False, 'from django.test import TestCase, override_settings\n'), ((7081, 7288), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': 'None', 'FORMIDABLE_POST_UPDATE_CALLBACK_FAIL': 'None', 'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': 'None', 'FORMIDABLE_POST_CREATE_CALLBACK_FAIL': 'None'}), '(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS=None,\n FORMIDABLE_POST_UPDATE_CALLBACK_FAIL=None,\n FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS=None,\n FORMIDABLE_POST_CREATE_CALLBACK_FAIL=None)\n', (7098, 7288), False, 'from django.test import TestCase, override_settings\n'), ((7406, 7605), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': '""""""', 'FORMIDABLE_POST_UPDATE_CALLBACK_FAIL': '""""""', 'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': '""""""', 'FORMIDABLE_POST_CREATE_CALLBACK_FAIL': '""""""'}), "(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='',\n FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='',\n FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='',\n FORMIDABLE_POST_CREATE_CALLBACK_FAIL='')\n", (7423, 7605), False, 'from django.test import TestCase, override_settings\n'), ((7724, 7797), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS': '"""non.existing"""'}), "(FORMIDABLE_POST_UPDATE_CALLBACK_SUCCESS='non.existing')\n", (7741, 7797), False, 'from django.test import TestCase, override_settings\n'), ((7959, 8029), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_UPDATE_CALLBACK_FAIL': '"""non.existing"""'}), "(FORMIDABLE_POST_UPDATE_CALLBACK_FAIL='non.existing')\n", (7976, 8029), False, 'from django.test import TestCase, override_settings\n'), ((8188, 8261), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS': '"""non.existing"""'}), "(FORMIDABLE_POST_CREATE_CALLBACK_SUCCESS='non.existing')\n", (8205, 8261), False, 'from django.test import TestCase, override_settings\n'), ((8423, 8493), 'django.test.override_settings', 'override_settings', ([], {'FORMIDABLE_POST_CREATE_CALLBACK_FAIL': '"""non.existing"""'}), "(FORMIDABLE_POST_CREATE_CALLBACK_FAIL='non.existing')\n", (8440, 8493), False, 'from django.test import TestCase, override_settings\n'), ((3529, 3588), 'formidable.models.Formidable.objects.create', 'Formidable.objects.create', ([], {'label': '"""test"""', 'description': '"""test"""'}), "(label='test', description='test')\n", (3554, 3588), False, 'from formidable.models import Formidable\n'), ((734, 749), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (739, 749), False, 'from unittest.mock import patch\n'), ((1139, 1154), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (1144, 1154), False, 'from unittest.mock import patch\n'), ((1536, 1551), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (1541, 1551), False, 'from unittest.mock import patch\n'), ((1611, 1636), 'copy.deepcopy', 'deepcopy', (['form_data_items'], {}), '(form_data_items)\n', (1619, 1636), False, 'from copy import deepcopy\n'), ((2259, 2292), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (2266, 2292), False, 'from django.urls import reverse\n'), ((2642, 2680), 'unittest.mock.patch', 'patch', (['"""formidable.views.logger.error"""'], {}), "('formidable.views.logger.error')\n", (2647, 2680), False, 'from unittest.mock import patch\n'), ((3132, 3170), 'unittest.mock.patch', 'patch', (['"""formidable.views.logger.error"""'], {}), "('formidable.views.logger.error')\n", (3137, 3170), False, 'from unittest.mock import patch\n'), ((3805, 3820), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (3810, 3820), False, 'from unittest.mock import patch\n'), ((4231, 4246), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (4236, 4246), False, 'from unittest.mock import patch\n'), ((4664, 4679), 'unittest.mock.patch', 'patch', (['CALLBACK'], {}), '(CALLBACK)\n', (4669, 4679), False, 'from unittest.mock import patch\n'), ((4739, 4764), 'copy.deepcopy', 'deepcopy', (['form_data_items'], {}), '(form_data_items)\n', (4747, 4764), False, 'from copy import deepcopy\n'), ((5406, 5460), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (5413, 5460), False, 'from django.urls import reverse\n'), ((5822, 5860), 'unittest.mock.patch', 'patch', (['"""formidable.views.logger.error"""'], {}), "('formidable.views.logger.error')\n", (5827, 5860), False, 'from unittest.mock import patch\n'), ((6348, 6386), 'unittest.mock.patch', 'patch', (['"""formidable.views.logger.error"""'], {}), "('formidable.views.logger.error')\n", (6353, 6386), False, 'from unittest.mock import patch\n'), ((7043, 7073), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (7071, 7073), False, 'from formidable.views import check_callback_configuration\n'), ((7368, 7398), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (7396, 7398), False, 'from formidable.views import check_callback_configuration\n'), ((7686, 7716), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (7714, 7716), False, 'from formidable.views import check_callback_configuration\n'), ((7922, 7952), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (7950, 7952), False, 'from formidable.views import check_callback_configuration\n'), ((8151, 8181), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (8179, 8181), False, 'from formidable.views import check_callback_configuration\n'), ((8386, 8416), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (8414, 8416), False, 'from formidable.views import check_callback_configuration\n'), ((8615, 8645), 'formidable.views.check_callback_configuration', 'check_callback_configuration', ([], {}), '()\n', (8643, 8645), False, 'from formidable.views import check_callback_configuration\n'), ((822, 855), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (829, 855), False, 'from django.urls import reverse\n'), ((1228, 1261), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (1235, 1261), False, 'from django.urls import reverse\n'), ((1752, 1785), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (1759, 1785), False, 'from django.urls import reverse\n'), ((2750, 2783), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (2757, 2783), False, 'from django.urls import reverse\n'), ((3240, 3273), 'django.urls.reverse', 'reverse', (['"""formidable:form_create"""'], {}), "('formidable:form_create')\n", (3247, 3273), False, 'from django.urls import reverse\n'), ((3893, 3947), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (3900, 3947), False, 'from django.urls import reverse\n'), ((4319, 4373), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (4326, 4373), False, 'from django.urls import reverse\n'), ((4879, 4933), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (4886, 4933), False, 'from django.urls import reverse\n'), ((5929, 5983), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (5936, 5983), False, 'from django.urls import reverse\n'), ((6455, 6509), 'django.urls.reverse', 'reverse', (['"""formidable:form_detail"""'], {'args': '[self.form.id]'}), "('formidable:form_detail', args=[self.form.id])\n", (6462, 6509), False, 'from django.urls import reverse\n')]
|
"""Main"""
import sys
sys.stderr = open("error.log", "w")
import time
# import msvcrt
import os
import subprocess
import cv2
import sqlite3
import numpy as np
from pyzbar import pyzbar
from easytello.tello import Tello
from httprequest import HTTPRequest
from easytello.tello_control import ControlCommand as CoCo
from easytello.tello_control import TelloControl
def main():
"""
ストリーミングでQRを解析しながら
DjangoサーバーにHTTPリクエストする
"""
#|パラメータ
#|--固定ルートモード
fixed_mode = True
max_height = 80 # [cm]
max_LR = 100 # [cm]
height_step = 1 # 段差数
#|--HTTPリクエスト
request_enable = True
request_url = "http://127.0.0.1/qrcodes/jsontest"
# #サーバー起動
# command = [
# "python",
# "./TelloRecords/records/manage.py",
# "runserver",
# "0.0.0.0:80"
# ]
# subprocess.Popen(command)
# time.sleep(10)
# #ブラウザ起動
# os.system("start http://127.0.0.1/qrcodes/")
arg = input("コードを入力してください:")
drone = Tello()
drone.streamon()
controller = TelloControl()
controller.append(CoCo(lambda : drone.set_speed(10)))
# DB取得(出庫時想定)
if len(arg) > 0:
fixed_mode = False
dbname = './TelloRecords/records/db.sqlite3'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
try:
sql = "select pos_x, pos_y, pos_z from qrcodes_qr"
sql += " where qr_code = '{}'".format(arg)
cur.execute(sql)
except Exception as ex:
print('SQL ERROR: {}'.format(ex))
finally:
target_pos = cur.fetchall()
print(target_pos)
if len(target_pos[0]) == 3:
controller.append(CoCo(drone.takeoff))
target_x: int = target_pos[0][0]
target_y: int = target_pos[0][1]
target_z: int = target_pos[0][2]
move_flg: bool = np.abs(target_x) >= 20
pls_flg: bool = target_x > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.right(target_x), np.array([target_x, 0, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.left(-target_x), np.array([target_x, 0, 0])))
move_flg = np.abs(target_y) >= 20
pls_flg = target_y > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.up(target_y), np.array([0, target_y, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.down(-target_y), np.array([0, target_y, 0])))
move_flg = np.abs(target_z) >= 20
pls_flg = target_z > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.forward(target_z), np.array([0, 0, target_z])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.back(-target_z), np.array([0, 0, target_z])))
move_flg = np.abs(target_z) >= 20
pls_flg = target_z > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.back(target_z), np.array([0, 0, -target_z])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.forward(-target_z), np.array([0, 0, -target_z])))
move_flg = np.abs(target_y) >= 20
pls_flg = target_y > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.down(target_y), np.array([0, -target_y, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.up(-target_y), np.array([0, -target_y, 0])))
move_flg: bool = np.abs(target_x) >= 20
pls_flg: bool = target_x > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.left(target_x), np.array([-target_x, 0, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.right(-target_x), np.array([-target_x, 0, 0])))
controller.append(CoCo(drone.land))
cur.close()
conn.close()
if fixed_mode:
# controller.append(CoCo(drone.takeoff))
# #平行移動
# controller.append(CoCo(lambda : drone.up(max_height), np.array([0, max_height, 0])))
# for i in range(height_step):
# if i % 2 == 0:
# # drone.left(max_LR)
# controller.append(CoCo(lambda : drone.left(max_LR), np.array([-max_LR, 0, 0])))
# else:
# controller.append(CoCo(lambda : drone.right(max_LR), np.array([max_LR, 0, 0])))
# controller.append(CoCo(lambda : drone.down(max_height/height_step), np.array([0, (int)(-max_height/height_step), 0])))
# controller.append(CoCo(drone.land))
with open("commands.txt") as f:
for line in f:
if line.startswith("takeoff"):
controller.append(CoCo(drone.takeoff))
if line.startswith("land"):
controller.append(CoCo(drone.land))
if line.startswith("up"):
distance = int(line.replace("up ", ""))
controller.append(CoCo(lambda: drone.up(distance), np.array([0,distance,0])))
if line.startswith("down"):
distance1 = int(line.replace("down ", ""))
controller.append(CoCo(lambda: drone.down(distance1), np.array([0,-distance1,0])))
if line.startswith("left"):
distance2 = int(line.replace("left ", ""))
controller.append(CoCo(lambda: drone.left(distance2), np.array([-distance2,0,0])))
if line.startswith("right"):
distance3 = int(line.replace("right ", ""))
controller.append(CoCo(lambda: drone.right(distance3), np.array([distance3,0,0])))
if line.startswith("forward"):
distance4 = int(line.replace("forward ", ""))
controller.append(CoCo(lambda: drone.forward(distance4), np.array([0,0,distance4])))
if line.startswith("back"):
distance5 = int(line.replace("back ", ""))
controller.append(CoCo(lambda: drone.back(distance5), np.array([0,0,-distance5])))
drone.set_controller(controller)
#ストリーミングをONにしたらN秒間待機
time.sleep(5)
controller.start()
req = None
if request_enable:
req = HTTPRequest(request_url)
try:
while True:
frame = drone.read()
#cv2.imshow('<NAME>', frame)
# QR解析
if frame is not None:
decoded_objs = pyzbar.decode(frame)
if decoded_objs != []:
# 解析した1個目を表示
str_dec_obj = decoded_objs[0][0].decode('utf-8', 'ignore')
print(f'QR cord: {str_dec_obj}')
# 解析時の座標
pos = drone.get_position()
# HTTP送信
if request_enable:
req.send_qr(str_dec_obj, pos)
# # キー入力
# if msvcrt.kbhit():
# kb = msvcrt.getch()
# key = kb.decode()
# if key == 't': # 離陸
# drone.takeoff()
# elif key == 'l': # 着陸
# drone.land()
# elif key == 'w': # 前進
# drone.forward(50)
# elif key == 's': # 後進
# drone.back(50)
# elif key == 'a': # 左移動
# drone.left(50)
# elif key == 'd': # 右移動
# drone.right(50)
# elif key == 'q': # 左旋回
# drone.ccw(50)
# elif key == 'e': # 右旋回
# drone.cw(50)
# elif key == 'r': # 上昇
# drone.up(50)
# elif key == 'f': # 下降
# drone.down(50)
# elif key == 'p': # 任意のタイミングでストップ
# #drone.send_command('stop')
# drone.send_command('emergency')
# ウエイト(とりあえず固定で)
time.sleep(0.05)
except KeyboardInterrupt:
pass
drone.streamoff()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"easytello.tello_control.ControlCommand",
"httprequest.HTTPRequest",
"easytello.tello.Tello",
"pyzbar.pyzbar.decode",
"time.sleep",
"sqlite3.connect",
"numpy.array",
"easytello.tello_control.TelloControl"
] |
[((1007, 1014), 'easytello.tello.Tello', 'Tello', ([], {}), '()\n', (1012, 1014), False, 'from easytello.tello import Tello\n'), ((1058, 1072), 'easytello.tello_control.TelloControl', 'TelloControl', ([], {}), '()\n', (1070, 1072), False, 'from easytello.tello_control import TelloControl\n'), ((6642, 6655), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6652, 6655), False, 'import time\n'), ((1268, 1291), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (1283, 1291), False, 'import sqlite3\n'), ((6733, 6757), 'httprequest.HTTPRequest', 'HTTPRequest', (['request_url'], {}), '(request_url)\n', (6744, 6757), False, 'from httprequest import HTTPRequest\n'), ((8528, 8544), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (8538, 8544), False, 'import time\n'), ((6950, 6970), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['frame'], {}), '(frame)\n', (6963, 6970), False, 'from pyzbar import pyzbar\n'), ((1720, 1739), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.takeoff'], {}), '(drone.takeoff)\n', (1724, 1739), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((1939, 1955), 'numpy.abs', 'np.abs', (['target_x'], {}), '(target_x)\n', (1945, 1955), True, 'import numpy as np\n'), ((2329, 2345), 'numpy.abs', 'np.abs', (['target_y'], {}), '(target_y)\n', (2335, 2345), True, 'import numpy as np\n'), ((2710, 2726), 'numpy.abs', 'np.abs', (['target_z'], {}), '(target_z)\n', (2716, 2726), True, 'import numpy as np\n'), ((3113, 3129), 'numpy.abs', 'np.abs', (['target_z'], {}), '(target_z)\n', (3119, 3129), True, 'import numpy as np\n'), ((3501, 3517), 'numpy.abs', 'np.abs', (['target_y'], {}), '(target_y)\n', (3507, 3517), True, 'import numpy as np\n'), ((3890, 3906), 'numpy.abs', 'np.abs', (['target_x'], {}), '(target_x)\n', (3896, 3906), True, 'import numpy as np\n'), ((4290, 4306), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.land'], {}), '(drone.land)\n', (4294, 4306), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((5193, 5212), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.takeoff'], {}), '(drone.takeoff)\n', (5197, 5212), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((5296, 5312), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.land'], {}), '(drone.land)\n', (5300, 5312), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((2121, 2147), 'numpy.array', 'np.array', (['[target_x, 0, 0]'], {}), '([target_x, 0, 0])\n', (2129, 2147), True, 'import numpy as np\n'), ((2273, 2299), 'numpy.array', 'np.array', (['[target_x, 0, 0]'], {}), '([target_x, 0, 0])\n', (2281, 2299), True, 'import numpy as np\n'), ((2502, 2528), 'numpy.array', 'np.array', (['[0, target_y, 0]'], {}), '([0, target_y, 0])\n', (2510, 2528), True, 'import numpy as np\n'), ((2654, 2680), 'numpy.array', 'np.array', (['[0, target_y, 0]'], {}), '([0, target_y, 0])\n', (2662, 2680), True, 'import numpy as np\n'), ((2888, 2914), 'numpy.array', 'np.array', (['[0, 0, target_z]'], {}), '([0, 0, target_z])\n', (2896, 2914), True, 'import numpy as np\n'), ((3040, 3066), 'numpy.array', 'np.array', (['[0, 0, target_z]'], {}), '([0, 0, target_z])\n', (3048, 3066), True, 'import numpy as np\n'), ((3288, 3315), 'numpy.array', 'np.array', (['[0, 0, -target_z]'], {}), '([0, 0, -target_z])\n', (3296, 3315), True, 'import numpy as np\n'), ((3444, 3471), 'numpy.array', 'np.array', (['[0, 0, -target_z]'], {}), '([0, 0, -target_z])\n', (3452, 3471), True, 'import numpy as np\n'), ((3676, 3703), 'numpy.array', 'np.array', (['[0, -target_y, 0]'], {}), '([0, -target_y, 0])\n', (3684, 3703), True, 'import numpy as np\n'), ((3827, 3854), 'numpy.array', 'np.array', (['[0, -target_y, 0]'], {}), '([0, -target_y, 0])\n', (3835, 3854), True, 'import numpy as np\n'), ((4071, 4098), 'numpy.array', 'np.array', (['[-target_x, 0, 0]'], {}), '([-target_x, 0, 0])\n', (4079, 4098), True, 'import numpy as np\n'), ((4225, 4252), 'numpy.array', 'np.array', (['[-target_x, 0, 0]'], {}), '([-target_x, 0, 0])\n', (4233, 4252), True, 'import numpy as np\n'), ((5487, 5513), 'numpy.array', 'np.array', (['[0, distance, 0]'], {}), '([0, distance, 0])\n', (5495, 5513), True, 'import numpy as np\n'), ((5695, 5723), 'numpy.array', 'np.array', (['[0, -distance1, 0]'], {}), '([0, -distance1, 0])\n', (5703, 5723), True, 'import numpy as np\n'), ((5905, 5933), 'numpy.array', 'np.array', (['[-distance2, 0, 0]'], {}), '([-distance2, 0, 0])\n', (5913, 5933), True, 'import numpy as np\n'), ((6118, 6145), 'numpy.array', 'np.array', (['[distance3, 0, 0]'], {}), '([distance3, 0, 0])\n', (6126, 6145), True, 'import numpy as np\n'), ((6336, 6363), 'numpy.array', 'np.array', (['[0, 0, distance4]'], {}), '([0, 0, distance4])\n', (6344, 6363), True, 'import numpy as np\n'), ((6545, 6573), 'numpy.array', 'np.array', (['[0, 0, -distance5]'], {}), '([0, 0, -distance5])\n', (6553, 6573), True, 'import numpy as np\n')]
|
#@ OpService ops
#@ Integer (value=128) xSize
#@ Integer (value=128) ySize
#@ Integer (value=128) zSize
#@OUTPUT ImgPlus phantom
#@OUTPUT ImgPlus convolved
from net.imglib2 import Point
from net.imglib2.algorithm.region.hypersphere import HyperSphere
# create an empty image
phantom=ops.create().img([xSize, ySize, zSize])
# make phantom an ImgPlus
phantom=ops.create().imgPlus(phantom);
# use the randomAccess interface to place points in the image
randomAccess= phantom.randomAccess()
randomAccess.setPosition([xSize/2, ySize/2, zSize/2])
randomAccess.get().setReal(255.0)
randomAccess.setPosition([xSize/4, ySize/4, zSize/4])
randomAccess.get().setReal(255.0)
location = Point(phantom.numDimensions())
location.setPosition([3*xSize/4, 3*ySize/4, 3*zSize/4])
hyperSphere = HyperSphere(phantom, location, 5)
for value in hyperSphere:
value.setReal(16)
phantom.setName("phantom")
# create psf using the gaussian kernel op (alternatively PSF could be an input to the script)
psf=ops.create().kernelGauss([5, 5, 5])
# convolve psf with phantom
convolved=ops.filter().convolve(phantom, psf)
# make convolved an ImgPlus
convolved=ops.create().imgPlus(convolved);
convolved.setName("convolved")
|
[
"net.imglib2.algorithm.region.hypersphere.HyperSphere"
] |
[((783, 816), 'net.imglib2.algorithm.region.hypersphere.HyperSphere', 'HyperSphere', (['phantom', 'location', '(5)'], {}), '(phantom, location, 5)\n', (794, 816), False, 'from net.imglib2.algorithm.region.hypersphere import HyperSphere\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: carbon.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='carbon.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0c\x63\x61rbon.proto\")\n\x05Point\x12\x11\n\ttimestamp\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x01\"0\n\x06Metric\x12\x0e\n\x06metric\x18\x01 \x01(\t\x12\x16\n\x06points\x18\x02 \x03(\x0b\x32\x06.Point\"#\n\x07Payload\x12\x18\n\x07metrics\x18\x01 \x03(\x0b\x32\x07.Metricb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_POINT = _descriptor.Descriptor(
name='Point',
full_name='Point',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Point.timestamp', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Point.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=57,
)
_METRIC = _descriptor.Descriptor(
name='Metric',
full_name='Metric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metric', full_name='Metric.metric', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='points', full_name='Metric.points', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=107,
)
_PAYLOAD = _descriptor.Descriptor(
name='Payload',
full_name='Payload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metrics', full_name='Payload.metrics', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=144,
)
_METRIC.fields_by_name['points'].message_type = _POINT
_PAYLOAD.fields_by_name['metrics'].message_type = _METRIC
DESCRIPTOR.message_types_by_name['Point'] = _POINT
DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD
Point = _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), dict(
DESCRIPTOR = _POINT,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Point)
))
_sym_db.RegisterMessage(Point)
Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
DESCRIPTOR = _METRIC,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Metric)
))
_sym_db.RegisterMessage(Metric)
Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict(
DESCRIPTOR = _PAYLOAD,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Payload)
))
_sym_db.RegisterMessage(Payload)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor"
] |
[((479, 505), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (503, 505), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1118, 1414), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""timestamp"""', 'full_name': '"""Point.timestamp"""', 'index': '(0)', 'number': '(1)', 'type': '(13)', 'cpp_type': '(3)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='timestamp', full_name='Point.timestamp',\n index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=\n False, default_value=0, message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None, options\n =None)\n", (1145, 1414), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2447, 2736), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""points"""', 'full_name': '"""Metric.points"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='points', full_name='Metric.points', index\n =1, number=2, type=11, cpp_type=10, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None)\n", (2474, 2736), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3122, 3418), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""metrics"""', 'full_name': '"""Payload.metrics"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='metrics', full_name='Payload.metrics',\n index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=\n False, default_value=[], message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None, options\n =None)\n", (3149, 3418), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import json
import json
import csv
import sys
import os
import random
import math
import string
from collections import namedtuple, Counter
# T2 Deliverable
# Assuming dataset is downloaded, open each json and extract text data
# Assuming metadata is downloaded, open and extract publish time per json article
def createDataset(datasetDir, metadataPath, basePath):
### Place all publish_time dates into sorted order (earliest -> latest)
dataset_list = []
for filename in os.listdir(datasetDir):
if filename.endswith(".json"):
path = os.path.join(datasetDir, filename)
# print(path)
dataset_list.append(path)
else:
continue
# failsafe for if no json files were found
if len(dataset_list) == 0:
print("ERROR: No json files found in {}.".format(datasetDir))
exit()
# Create training and test folder directories
trainBeforePath = basePath + "train/before"
trainAfterPath = basePath + "train/after"
testBeforePath = basePath + "test/before"
testAfterPath = basePath + "test/after"
exceptPath = None
if os.path.exists(trainBeforePath):
exceptPath = trainBeforePath
if os.path.exists(trainAfterPath):
exceptPath = trainAfterPath
if os.path.exists(testBeforePath):
exceptPath = testBeforePath
if os.path.exists(testAfterPath):
exceptPath = testAfterPath
# Failsafe against already present directories, make sure they're deleted
if exceptPath:
print("Directory {} already exists. Please delete the directory before running this script.".format(exceptPath))
exit()
os.makedirs(trainBeforePath)
os.makedirs(trainAfterPath)
os.makedirs(testBeforePath)
os.makedirs(testAfterPath)
# holds text per paper id
paperId_text = {}
# open and parse through all files
for dataset_path in dataset_list:
dataset_file = open(dataset_path,"r")
dataset = json.load(dataset_file)
# iterate through json
paper_id = dataset["paper_id"]
paperId_text[paper_id] = ""
# get each piece of text from the article
for section in ["abstract","body_text"]:
text_section = dataset[section]
for ref in text_section:
paperId_text[paper_id] += ref["text"] + " "
# Start to look through metadata for publish times
data_by_time = dict()
# named tuple for article which holds a publish time and its text
Article = namedtuple('Article', ['publish_time', 'text'])
print("Beginning to read metadata...")
with open(metadataPath,"r", encoding="utf8") as csvfile:
reader = csv.DictReader(csvfile)
for article in reader:
paper_id = article["sha"]
publish_time = article["publish_time"]
# place article in data if paper_id is in our dataset
if paper_id in paperId_text:
data_by_time[paper_id] = Article(publish_time=publish_time, text=paperId_text[paper_id])
print("Completed reading of metadata...")
print("Length of dataset: {}".format(len(data_by_time)))
# Sort our newly acquired data by the publish time
totalSet = {k: v for k, v in sorted(data_by_time.items(), key=lambda item: item[1].publish_time)}
# print("Printing sorted data...")
# print(sorted_data_by_time)
### let median date -> medianDate, half articles published before medianDate (beforeSet), half on or after (afterSet)
medianDateIdx = len(totalSet) // 2
beforeSet = dict(list(totalSet.items())[:medianDateIdx])
afterSet = dict(list(totalSet.items())[medianDateIdx:])
medianDate = list(afterSet.items())[0][1].publish_time
print("Median date: {}".format(medianDate))
### Randomly select 90% of beforeSet (trainBefore) and 90% of afterSet (trainAfter)
train_test_before = random.sample(range(len(beforeSet)), len(beforeSet))
train_test_after = random.sample(range(len(afterSet)), len(afterSet))
# create split indices to determing
trainBefore_split = math.ceil(0.9 * len(train_test_before))
trainAfter_split = math.ceil(0.9 * len(train_test_after))
# create set lists for random indexing
beforeSet_list = list(beforeSet.items())
afterSet_list = list(afterSet.items())
# trainBefore
print("Exporting 'before' training text...")
for i in train_test_before[:trainBefore_split]:
idx = train_test_before[i] # index of the paper ID in beforeSet that we're using for training
paper_id = beforeSet_list[idx][0] # get paper ID from before list
text = beforeSet[paper_id].text # get text using paper_id
# write text to file
ftrain = open(trainBeforePath + "/" + paper_id + ".txt", "a+", encoding="UTF-8")
ftrain.writelines(text)
ftrain.close()
print("Finished exporting 'before' training text...")
# trainAfter
print("Exporting 'after' training text...")
for i in train_test_after[:trainAfter_split]:
idx = train_test_before[i] # index of the afterSet that we're using for training
paper_id = afterSet_list[idx][0] # get paper ID from after list
text = afterSet[paper_id].text # get text using paper_id
# write text to file
ftrain = open(trainAfterPath + "/" + paper_id + ".txt", "a+", encoding="UTF-8")
ftrain.writelines(text)
ftrain.close()
print("Finished exporting 'after' training text...")
### Remaining 10% of before and after are testBefore, testAfter, respectively
# testBefore
print("Exporting 'before' test text...")
for i in train_test_before[trainBefore_split:]:
idx = train_test_before[i] # index of the beforeSet that we're using for training
paper_id = beforeSet_list[idx][0] # get paper ID from before list
text = beforeSet[paper_id].text # get text using paper_id
# write text to file
ftrain = open(testBeforePath + "/" + paper_id + ".txt", "a+", encoding="UTF-8")
ftrain.writelines(text)
ftrain.close()
print("Finished exporting 'before' test text...")
# testAfter
print("Exporting 'after' test text...")
for i in train_test_after[trainAfter_split:]:
idx = train_test_before[i] # index of the afterSet that we're using for training
paper_id = afterSet_list[idx][0] # get paper ID from after list
text = afterSet[paper_id].text # get text using paper_id
# write text to file
ftrain = open(testAfterPath + "/" + paper_id + ".txt", "a+", encoding="UTF-8")
ftrain.writelines(text)
ftrain.close()
print("Finished exporting 'after' test text...")
if __name__ == '__main__':
datasetDir = sys.argv[1]
metadataPath = sys.argv[2]
basePath = sys.argv[3]
createDataset(datasetDir, metadataPath, basePath)
|
[
"json.load",
"os.makedirs",
"csv.DictReader",
"os.path.exists",
"collections.namedtuple",
"os.path.join",
"os.listdir"
] |
[((485, 507), 'os.listdir', 'os.listdir', (['datasetDir'], {}), '(datasetDir)\n', (495, 507), False, 'import os\n'), ((1135, 1166), 'os.path.exists', 'os.path.exists', (['trainBeforePath'], {}), '(trainBeforePath)\n', (1149, 1166), False, 'import os\n'), ((1212, 1242), 'os.path.exists', 'os.path.exists', (['trainAfterPath'], {}), '(trainAfterPath)\n', (1226, 1242), False, 'import os\n'), ((1287, 1317), 'os.path.exists', 'os.path.exists', (['testBeforePath'], {}), '(testBeforePath)\n', (1301, 1317), False, 'import os\n'), ((1362, 1391), 'os.path.exists', 'os.path.exists', (['testAfterPath'], {}), '(testAfterPath)\n', (1376, 1391), False, 'import os\n'), ((1667, 1695), 'os.makedirs', 'os.makedirs', (['trainBeforePath'], {}), '(trainBeforePath)\n', (1678, 1695), False, 'import os\n'), ((1700, 1727), 'os.makedirs', 'os.makedirs', (['trainAfterPath'], {}), '(trainAfterPath)\n', (1711, 1727), False, 'import os\n'), ((1732, 1759), 'os.makedirs', 'os.makedirs', (['testBeforePath'], {}), '(testBeforePath)\n', (1743, 1759), False, 'import os\n'), ((1764, 1790), 'os.makedirs', 'os.makedirs', (['testAfterPath'], {}), '(testAfterPath)\n', (1775, 1790), False, 'import os\n'), ((2523, 2570), 'collections.namedtuple', 'namedtuple', (['"""Article"""', "['publish_time', 'text']"], {}), "('Article', ['publish_time', 'text'])\n", (2533, 2570), False, 'from collections import namedtuple, Counter\n'), ((1985, 2008), 'json.load', 'json.load', (['dataset_file'], {}), '(dataset_file)\n', (1994, 2008), False, 'import json\n'), ((2692, 2715), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (2706, 2715), False, 'import csv\n'), ((567, 601), 'os.path.join', 'os.path.join', (['datasetDir', 'filename'], {}), '(datasetDir, filename)\n', (579, 601), False, 'import os\n')]
|
import typing
import json
from pathlib import Path
from web3 import Web3
from web3.providers import BaseProvider
from web3.contract import Contract
class BridgePool:
"""
A class for interacting with the bridge pool contract.
"""
@staticmethod
def connect(address: str, provider: BaseProvider) -> Contract:
w3 = Web3(provider)
with open(
Path(__file__).parent / "abis/bridge_pool_abi.json", "r"
) as abi_file:
abi = json.load(abi_file)
contract_instance = w3.eth.contract(address=address, abi=abi)
return contract_instance
class RateModelStore:
"""
A class for interacting with the rate model store contract.
"""
@staticmethod
def connect(address: str, provider: BaseProvider) -> Contract:
w3 = Web3(provider)
with open(
Path(__file__).parent / "abis/rate_model_store_abi.json", "r"
) as abi_file:
abi = json.load(abi_file)
contract_instance = w3.eth.contract(address=address, abi=abi)
return contract_instance
def get_address(self, network_id: int) -> str:
# FIXME: hardcoded
if network_id == 1:
return "0xd18fFeb5fdd1F2e122251eA7Bf357D8Af0B60B50"
else:
return ""
|
[
"pathlib.Path",
"json.load",
"web3.Web3"
] |
[((343, 357), 'web3.Web3', 'Web3', (['provider'], {}), '(provider)\n', (347, 357), False, 'from web3 import Web3\n'), ((813, 827), 'web3.Web3', 'Web3', (['provider'], {}), '(provider)\n', (817, 827), False, 'from web3 import Web3\n'), ((487, 506), 'json.load', 'json.load', (['abi_file'], {}), '(abi_file)\n', (496, 506), False, 'import json\n'), ((962, 981), 'json.load', 'json.load', (['abi_file'], {}), '(abi_file)\n', (971, 981), False, 'import json\n'), ((389, 403), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (393, 403), False, 'from pathlib import Path\n'), ((859, 873), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (863, 873), False, 'from pathlib import Path\n')]
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def home():
workers =[
{"id": 1, "name":"Worker1", "salary":1237.99},
{"id": 2, "name":"Worker2", "salary":5237.99},
{"id": 3, "name":"Worker5", "salary":5237.39}
]
return render_template("html/home.html", workers=workers)
@app.route('/about', methods=["GET", "POST"])
def about():
return render_template('html/about.html')
|
[
"flask.Flask",
"flask.render_template"
] |
[((57, 72), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (62, 72), False, 'from flask import Flask, render_template, request\n'), ((311, 361), 'flask.render_template', 'render_template', (['"""html/home.html"""'], {'workers': 'workers'}), "('html/home.html', workers=workers)\n", (326, 361), False, 'from flask import Flask, render_template, request\n'), ((433, 467), 'flask.render_template', 'render_template', (['"""html/about.html"""'], {}), "('html/about.html')\n", (448, 467), False, 'from flask import Flask, render_template, request\n')]
|
from django.db import IntegrityError
from Poem.api import serializers
from Poem.api.views import NotFound
from Poem.poem import models as poem_models
from Poem.users.models import CustUser
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
def get_groups_for_user(user):
groupsofaggregations = list(
user.userprofile.groupsofaggregations.all().values_list(
'name', flat=True
)
)
results = {'aggregations': groupsofaggregations}
groupsofmetrics = list(
user.userprofile.groupsofmetrics.all().values_list('name', flat=True)
)
results.update({'metrics': groupsofmetrics})
groupsofmetricprofiles = list(
user.userprofile.groupsofmetricprofiles.all().values_list(
'name', flat=True
)
)
results.update({'metricprofiles': groupsofmetricprofiles})
groupsofthresholdsprofiles = list(
user.userprofile.groupsofthresholdsprofiles.all().values_list(
'name', flat=True
)
)
results.update({'thresholdsprofiles': groupsofthresholdsprofiles})
return results
def get_all_groups():
groupsofaggregations = list(
poem_models.GroupOfAggregations.objects.all().values_list(
'name', flat=True
)
)
results = {'aggregations': groupsofaggregations}
groupsofmetrics = list(
poem_models.GroupOfMetrics.objects.all().values_list('name', flat=True)
)
results.update({'metrics': groupsofmetrics})
groupsofmetricprofiles = list(
poem_models.GroupOfMetricProfiles.objects.all().values_list(
'name', flat=True
)
)
results.update({'metricprofiles': groupsofmetricprofiles})
groupsofthresholdsprofiles = list(
poem_models.GroupOfThresholdsProfiles.objects.all().values_list(
'name', flat=True
)
)
results.update({'thresholdsprofiles': groupsofthresholdsprofiles})
return results
class ListUsers(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, username=None):
if username:
try:
user = CustUser.objects.get(username=username)
serializer = serializers.UsersSerializer(user)
return Response(serializer.data)
except CustUser.DoesNotExist:
raise NotFound(status=404,
detail='User not found')
else:
if request.user.is_superuser:
users = CustUser.objects.all()
else:
users = CustUser.objects.filter(username=request.user.username)
serializer = serializers.UsersSerializer(users, many=True)
data = sorted(serializer.data, key=lambda k: k['username'].lower())
return Response(data)
def put(self, request):
try:
user = CustUser.objects.get(pk=request.data['pk'])
user.username = request.data['username']
user.first_name = request.data['first_name']
user.last_name = request.data['last_name']
user.email = request.data['email']
user.is_superuser = request.data['is_superuser']
user.is_active = request.data['is_active']
user.save()
return Response(status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
{'detail': 'User with this username already exists.'},
status=status.HTTP_400_BAD_REQUEST
)
def post(self, request):
try:
CustUser.objects.create_user(
username=request.data['username'],
password=request.data['password'],
email=request.data['email'],
first_name=request.data['first_name'],
last_name=request.data['last_name'],
is_superuser=request.data['is_superuser'],
is_active=request.data['is_active']
)
return Response(status=status.HTTP_201_CREATED)
except IntegrityError:
return Response(
{'detail': 'User with this username already exists.'},
status=status.HTTP_400_BAD_REQUEST
)
def delete(self, request, username=None):
if username:
try:
user = CustUser.objects.get(username=username)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except CustUser.DoesNotExist:
raise(NotFound(status=404, detail='User not found'))
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class GetUserprofileForUsername(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, username):
try:
user = CustUser.objects.get(username=username)
except CustUser.DoesNotExist:
raise NotFound(status=404, detail='User not found')
else:
try:
user_profile = poem_models.UserProfile.objects.get(user=user)
serializer = serializers.UserProfileSerializer(user_profile)
return Response(serializer.data)
except poem_models.UserProfile.DoesNotExist:
raise NotFound(status=404, detail='User profile not found')
def put(self, request):
user = CustUser.objects.get(username=request.data['username'])
userprofile = poem_models.UserProfile.objects.get(user=user)
userprofile.displayname = request.data['displayname']
userprofile.subject = request.data['subject']
userprofile.egiid = request.data['egiid']
userprofile.save()
if 'groupsofaggregations' in dict(request.data):
for group in dict(request.data)['groupsofaggregations']:
userprofile.groupsofaggregations.add(
poem_models.GroupOfAggregations.objects.get(name=group)
)
if 'groupsofmetrics' in dict(request.data):
for group in dict(request.data)['groupsofmetrics']:
userprofile.groupsofmetrics.add(
poem_models.GroupOfMetrics.objects.get(name=group)
)
if 'groupsofmetricprofiles' in dict(request.data):
for group in dict(request.data)['groupsofmetricprofiles']:
userprofile.groupsofmetricprofiles.add(
poem_models.GroupOfMetricProfiles.objects.get(name=group)
)
if 'groupsofthresholdsprofiles' in dict(request.data):
for group in dict(request.data)['groupsofthresholdsprofiles']:
userprofile.groupsofthresholdsprofiles.add(
poem_models.GroupOfThresholdsProfiles.objects.get(
name=group
)
)
# remove the groups that existed before, and now were removed:
if 'groupsofaggregations' in dict(request.data):
for group in userprofile.groupsofaggregations.all():
if group.name not in dict(request.data)['groupsofaggregations']:
userprofile.groupsofaggregations.remove(group)
if 'groupsofmetrics' in dict(request.data):
for group in userprofile.groupsofmetrics.all():
if group.name not in dict(request.data)['groupsofmetrics']:
userprofile.groupsofmetrics.remove(group)
if 'groupsofmetricprofiles' in dict(request.data):
for group in userprofile.groupsofmetricprofiles.all():
if group.name not in dict(request.data)[
'groupsofmetricprofiles'
]:
userprofile.groupsofmetricprofiles.remove(group)
if 'groupsofthresholdsprofiles' in dict(request.data):
for group in userprofile.groupsofthresholdsprofiles.all():
if group.name not in dict(request.data)[
'groupsofthresholdsprofiles'
]:
userprofile.groupsofthresholdsprofiles.remove(group)
return Response(status=status.HTTP_201_CREATED)
def post(self, request):
user = CustUser.objects.get(username=request.data['username'])
userprofile = poem_models.UserProfile.objects.create(
user=user,
displayname=request.data['displayname'],
subject=request.data['subject'],
egiid=request.data['egiid']
)
if 'groupsofaggregations' in dict(request.data):
for group in dict(request.data)['groupsofaggregations']:
userprofile.groupsofaggregations.add(
poem_models.GroupOfAggregations.objects.get(name=group)
)
if 'groupsofmetrics' in dict(request.data):
for group in dict(request.data)['groupsofmetrics']:
userprofile.groupsofmetrics.add(
poem_models.GroupOfMetrics.objects.get(name=group)
)
if 'groupsofmetricprofiles' in dict(request.data):
for group in dict(request.data)['groupsofmetricprofiles']:
userprofile.groupsofmetricprofiles.add(
poem_models.GroupOfMetricProfiles.objects.get(name=group)
)
if 'groupsofthresholdsprofiles' in dict(request.data):
for group in dict(request.data)['groupsofthresholdsprofiles']:
userprofile.groupsofthresholdsprofiles.add(
poem_models.GroupOfThresholdsProfiles.objects.get(
name=group
)
)
return Response(status=status.HTTP_201_CREATED)
class ListGroupsForGivenUser(APIView):
authentication_classes = (SessionAuthentication,)
def get(self, request, username=None):
if username:
try:
user = CustUser.objects.get(username=username)
except CustUser.DoesNotExist:
raise NotFound(status=404, detail='User not found')
else:
results = get_groups_for_user(user)
else:
results = get_all_groups()
return Response({'result': results})
|
[
"Poem.poem.models.GroupOfAggregations.objects.get",
"Poem.poem.models.GroupOfThresholdsProfiles.objects.get",
"Poem.poem.models.UserProfile.objects.get",
"Poem.poem.models.UserProfile.objects.create",
"Poem.users.models.CustUser.objects.get",
"Poem.users.models.CustUser.objects.create_user",
"rest_framework.response.Response",
"Poem.api.serializers.UsersSerializer",
"Poem.poem.models.GroupOfMetrics.objects.all",
"Poem.users.models.CustUser.objects.all",
"Poem.poem.models.GroupOfThresholdsProfiles.objects.all",
"Poem.api.serializers.UserProfileSerializer",
"Poem.users.models.CustUser.objects.filter",
"Poem.api.views.NotFound",
"Poem.poem.models.GroupOfMetrics.objects.get",
"Poem.poem.models.GroupOfMetricProfiles.objects.get",
"Poem.poem.models.GroupOfAggregations.objects.all",
"Poem.poem.models.GroupOfMetricProfiles.objects.all"
] |
[((5546, 5601), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': "request.data['username']"}), "(username=request.data['username'])\n", (5566, 5601), False, 'from Poem.users.models import CustUser\n'), ((5624, 5670), 'Poem.poem.models.UserProfile.objects.get', 'poem_models.UserProfile.objects.get', ([], {'user': 'user'}), '(user=user)\n', (5659, 5670), True, 'from Poem.poem import models as poem_models\n'), ((8281, 8321), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_201_CREATED'}), '(status=status.HTTP_201_CREATED)\n', (8289, 8321), False, 'from rest_framework.response import Response\n'), ((8367, 8422), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': "request.data['username']"}), "(username=request.data['username'])\n", (8387, 8422), False, 'from Poem.users.models import CustUser\n'), ((8446, 8608), 'Poem.poem.models.UserProfile.objects.create', 'poem_models.UserProfile.objects.create', ([], {'user': 'user', 'displayname': "request.data['displayname']", 'subject': "request.data['subject']", 'egiid': "request.data['egiid']"}), "(user=user, displayname=request.data[\n 'displayname'], subject=request.data['subject'], egiid=request.data[\n 'egiid'])\n", (8484, 8608), True, 'from Poem.poem import models as poem_models\n'), ((9831, 9871), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_201_CREATED'}), '(status=status.HTTP_201_CREATED)\n', (9839, 9871), False, 'from rest_framework.response import Response\n'), ((10364, 10393), 'rest_framework.response.Response', 'Response', (["{'result': results}"], {}), "({'result': results})\n", (10372, 10393), False, 'from rest_framework.response import Response\n'), ((2786, 2831), 'Poem.api.serializers.UsersSerializer', 'serializers.UsersSerializer', (['users'], {'many': '(True)'}), '(users, many=True)\n', (2813, 2831), False, 'from Poem.api import serializers\n'), ((2933, 2947), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (2941, 2947), False, 'from rest_framework.response import Response\n'), ((3009, 3052), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'pk': "request.data['pk']"}), "(pk=request.data['pk'])\n", (3029, 3052), False, 'from Poem.users.models import CustUser\n'), ((3425, 3465), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_201_CREATED'}), '(status=status.HTTP_201_CREATED)\n', (3433, 3465), False, 'from rest_framework.response import Response\n'), ((3718, 4020), 'Poem.users.models.CustUser.objects.create_user', 'CustUser.objects.create_user', ([], {'username': "request.data['username']", 'password': "request.data['password']", 'email': "request.data['email']", 'first_name': "request.data['first_name']", 'last_name': "request.data['last_name']", 'is_superuser': "request.data['is_superuser']", 'is_active': "request.data['is_active']"}), "(username=request.data['username'], password=\n request.data['password'], email=request.data['email'], first_name=\n request.data['first_name'], last_name=request.data['last_name'],\n is_superuser=request.data['is_superuser'], is_active=request.data[\n 'is_active'])\n", (3746, 4020), False, 'from Poem.users.models import CustUser\n'), ((4148, 4188), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_201_CREATED'}), '(status=status.HTTP_201_CREATED)\n', (4156, 4188), False, 'from rest_framework.response import Response\n'), ((4777, 4821), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (4785, 4821), False, 'from rest_framework.response import Response\n'), ((4991, 5030), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': 'username'}), '(username=username)\n', (5011, 5030), False, 'from Poem.users.models import CustUser\n'), ((1292, 1337), 'Poem.poem.models.GroupOfAggregations.objects.all', 'poem_models.GroupOfAggregations.objects.all', ([], {}), '()\n', (1335, 1337), True, 'from Poem.poem import models as poem_models\n'), ((1487, 1527), 'Poem.poem.models.GroupOfMetrics.objects.all', 'poem_models.GroupOfMetrics.objects.all', ([], {}), '()\n', (1525, 1527), True, 'from Poem.poem import models as poem_models\n'), ((1658, 1705), 'Poem.poem.models.GroupOfMetricProfiles.objects.all', 'poem_models.GroupOfMetricProfiles.objects.all', ([], {}), '()\n', (1703, 1705), True, 'from Poem.poem import models as poem_models\n'), ((1876, 1927), 'Poem.poem.models.GroupOfThresholdsProfiles.objects.all', 'poem_models.GroupOfThresholdsProfiles.objects.all', ([], {}), '()\n', (1925, 1927), True, 'from Poem.poem import models as poem_models\n'), ((2265, 2304), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': 'username'}), '(username=username)\n', (2285, 2304), False, 'from Poem.users.models import CustUser\n'), ((2334, 2367), 'Poem.api.serializers.UsersSerializer', 'serializers.UsersSerializer', (['user'], {}), '(user)\n', (2361, 2367), False, 'from Poem.api import serializers\n'), ((2391, 2416), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (2399, 2416), False, 'from rest_framework.response import Response\n'), ((2640, 2662), 'Poem.users.models.CustUser.objects.all', 'CustUser.objects.all', ([], {}), '()\n', (2660, 2662), False, 'from Poem.users.models import CustUser\n'), ((2705, 2760), 'Poem.users.models.CustUser.objects.filter', 'CustUser.objects.filter', ([], {'username': 'request.user.username'}), '(username=request.user.username)\n', (2728, 2760), False, 'from Poem.users.models import CustUser\n'), ((3517, 3621), 'rest_framework.response.Response', 'Response', (["{'detail': 'User with this username already exists.'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'detail': 'User with this username already exists.'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (3525, 3621), False, 'from rest_framework.response import Response\n'), ((4240, 4344), 'rest_framework.response.Response', 'Response', (["{'detail': 'User with this username already exists.'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'detail': 'User with this username already exists.'}, status=\n status.HTTP_400_BAD_REQUEST)\n", (4248, 4344), False, 'from rest_framework.response import Response\n'), ((4494, 4533), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': 'username'}), '(username=username)\n', (4514, 4533), False, 'from Poem.users.models import CustUser\n'), ((4587, 4630), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (4595, 4630), False, 'from rest_framework.response import Response\n'), ((5087, 5132), 'Poem.api.views.NotFound', 'NotFound', ([], {'status': '(404)', 'detail': '"""User not found"""'}), "(status=404, detail='User not found')\n", (5095, 5132), False, 'from Poem.api.views import NotFound\n'), ((5195, 5241), 'Poem.poem.models.UserProfile.objects.get', 'poem_models.UserProfile.objects.get', ([], {'user': 'user'}), '(user=user)\n', (5230, 5241), True, 'from Poem.poem import models as poem_models\n'), ((5271, 5318), 'Poem.api.serializers.UserProfileSerializer', 'serializers.UserProfileSerializer', (['user_profile'], {}), '(user_profile)\n', (5304, 5318), False, 'from Poem.api import serializers\n'), ((5342, 5367), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (5350, 5367), False, 'from rest_framework.response import Response\n'), ((10072, 10111), 'Poem.users.models.CustUser.objects.get', 'CustUser.objects.get', ([], {'username': 'username'}), '(username=username)\n', (10092, 10111), False, 'from Poem.users.models import CustUser\n'), ((2482, 2527), 'Poem.api.views.NotFound', 'NotFound', ([], {'status': '(404)', 'detail': '"""User not found"""'}), "(status=404, detail='User not found')\n", (2490, 2527), False, 'from Poem.api.views import NotFound\n'), ((4696, 4741), 'Poem.api.views.NotFound', 'NotFound', ([], {'status': '(404)', 'detail': '"""User not found"""'}), "(status=404, detail='User not found')\n", (4704, 4741), False, 'from Poem.api.views import NotFound\n'), ((5448, 5501), 'Poem.api.views.NotFound', 'NotFound', ([], {'status': '(404)', 'detail': '"""User profile not found"""'}), "(status=404, detail='User profile not found')\n", (5456, 5501), False, 'from Poem.api.views import NotFound\n'), ((6065, 6120), 'Poem.poem.models.GroupOfAggregations.objects.get', 'poem_models.GroupOfAggregations.objects.get', ([], {'name': 'group'}), '(name=group)\n', (6108, 6120), True, 'from Poem.poem import models as poem_models\n'), ((6325, 6375), 'Poem.poem.models.GroupOfMetrics.objects.get', 'poem_models.GroupOfMetrics.objects.get', ([], {'name': 'group'}), '(name=group)\n', (6363, 6375), True, 'from Poem.poem import models as poem_models\n'), ((6601, 6658), 'Poem.poem.models.GroupOfMetricProfiles.objects.get', 'poem_models.GroupOfMetricProfiles.objects.get', ([], {'name': 'group'}), '(name=group)\n', (6646, 6658), True, 'from Poem.poem import models as poem_models\n'), ((6896, 6957), 'Poem.poem.models.GroupOfThresholdsProfiles.objects.get', 'poem_models.GroupOfThresholdsProfiles.objects.get', ([], {'name': 'group'}), '(name=group)\n', (6945, 6957), True, 'from Poem.poem import models as poem_models\n'), ((8858, 8913), 'Poem.poem.models.GroupOfAggregations.objects.get', 'poem_models.GroupOfAggregations.objects.get', ([], {'name': 'group'}), '(name=group)\n', (8901, 8913), True, 'from Poem.poem import models as poem_models\n'), ((9118, 9168), 'Poem.poem.models.GroupOfMetrics.objects.get', 'poem_models.GroupOfMetrics.objects.get', ([], {'name': 'group'}), '(name=group)\n', (9156, 9168), True, 'from Poem.poem import models as poem_models\n'), ((9394, 9451), 'Poem.poem.models.GroupOfMetricProfiles.objects.get', 'poem_models.GroupOfMetricProfiles.objects.get', ([], {'name': 'group'}), '(name=group)\n', (9439, 9451), True, 'from Poem.poem import models as poem_models\n'), ((9689, 9750), 'Poem.poem.models.GroupOfThresholdsProfiles.objects.get', 'poem_models.GroupOfThresholdsProfiles.objects.get', ([], {'name': 'group'}), '(name=group)\n', (9738, 9750), True, 'from Poem.poem import models as poem_models\n'), ((10177, 10222), 'Poem.api.views.NotFound', 'NotFound', ([], {'status': '(404)', 'detail': '"""User not found"""'}), "(status=404, detail='User not found')\n", (10185, 10222), False, 'from Poem.api.views import NotFound\n')]
|
import speech_recognition as sr
r = sr.Recognizer()
def listen():
with sr.Microphone(device_index = 2) as source:
r.adjust_for_ambient_noise(source)
r.pause_threshold = 2
print("Say Something");
audio = r.listen(source)
print("got it");
text = r.recognize_google(audio, language = "fr-FR")
print("You said : ", text)
def l2():
with sr.AudioFile('out.wav') as source :
r.adjust_for_ambiant_noise(source)
audio = r.record(source)
text = r.recognize_google(audio, language='fr-FR')
print(text)
|
[
"speech_recognition.AudioFile",
"speech_recognition.Recognizer",
"speech_recognition.Microphone"
] |
[((36, 51), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (49, 51), True, 'import speech_recognition as sr\n'), ((72, 101), 'speech_recognition.Microphone', 'sr.Microphone', ([], {'device_index': '(2)'}), '(device_index=2)\n', (85, 101), True, 'import speech_recognition as sr\n'), ((347, 370), 'speech_recognition.AudioFile', 'sr.AudioFile', (['"""out.wav"""'], {}), "('out.wav')\n", (359, 370), True, 'import speech_recognition as sr\n')]
|
# -*- coding: utf-8 -*-
from Crypto.Cipher import AES
from Crypto import Random
import logging
logger = logging.getLogger("root")
AES_KEY = '73f40f2c57eae727a4be171009cecf89'
def aes_encrypt(data):
if data:
bs = AES.block_size
pad = lambda s: s + (bs - len(s) % bs) * chr(bs - len(s) % bs)
iv = Random.new().read(bs)
cipher = AES.new(AES_KEY, AES.MODE_CBC, iv)
data = cipher.encrypt(pad(data))
data = iv + data
return data.encode('base64')
else:
return ''
def aes_decrypt(data):
try:
data = data.decode('base64')
bs = AES.block_size
if len(data) <= bs:
return True, data
unpad = lambda s: s[0:-ord(s[-1])]
iv = data[:bs]
cipher = AES.new(AES_KEY, AES.MODE_CBC, iv)
data = unpad(cipher.decrypt(data[bs:]))
return True, data
except Exception as e:
logger.exception("aes_decrypt")
return False, ""
if __name__ == '__main__':
password = aes_encrypt('<PASSWORD>')
print(password)
res,password = aes_decrypt(password)
print(password)
|
[
"Crypto.Random.new",
"Crypto.Cipher.AES.new",
"logging.getLogger"
] |
[((105, 130), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (122, 130), False, 'import logging\n'), ((366, 400), 'Crypto.Cipher.AES.new', 'AES.new', (['AES_KEY', 'AES.MODE_CBC', 'iv'], {}), '(AES_KEY, AES.MODE_CBC, iv)\n', (373, 400), False, 'from Crypto.Cipher import AES\n'), ((772, 806), 'Crypto.Cipher.AES.new', 'AES.new', (['AES_KEY', 'AES.MODE_CBC', 'iv'], {}), '(AES_KEY, AES.MODE_CBC, iv)\n', (779, 806), False, 'from Crypto.Cipher import AES\n'), ((327, 339), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (337, 339), False, 'from Crypto import Random\n')]
|
import os
import random
import time
import math
from datetime import timedelta
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as utils
import torch_geometric.transforms as T
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch.nn.utils import weight_norm
from torch_geometric.data import DataLoader
from torch_geometric.nn import GATConv, GCNConv, GINConv, MessagePassing, SAGEConv
from torch_geometric.nn.inits import glorot, zeros
from torch_geometric.utils import add_remaining_self_loops
from torch_scatter import scatter_add
from cargonet.dataset.baselinev1 import BaselineV1
from cargonet.models.baselines.model import BaselineModel
from cargonet.models.eval.losses import MAELoss
from cargonet.models.normalization import MinMaxScaler, Scaler, ZScoreScaler
from cargonet.models.sociallstm import ActiveRoutesModelLSTM, ActiveRoutesModelLSTMGAT
from cargonet.models.tempconv import TemporalConvNet
from cargonet.visualization.delays import DelayProgressPlot
class BaseTCN(nn.Module):
def __init__(
self,
device,
node_input_dim,
edge_input_dim,
embedding_dim,
conv_dim,
pred_seq_len=1,
seq_len=10,
levels=3,
dropout=0,
kernel_size=4,
verbose=False,
):
super().__init__()
self.device = device
self.seq_len = seq_len
self.pred_seq_len = pred_seq_len
self.embedding_dim = embedding_dim
self.conv_dim = conv_dim
self.node_input_dim = node_input_dim
self.edge_input_dim = edge_input_dim
self.kernel_size = kernel_size
self.levels = int(min(levels, math.log(self.seq_len + self.pred_seq_len, self.kernel_size)))
self.dropout = nn.Dropout(dropout)
self.encoder = nn.Linear(self.node_input_dim + self.edge_input_dim + 0, self.embedding_dim)
chans = [self.conv_dim] * self.levels
self.temp_conv = TemporalConvNet(
num_inputs=self.embedding_dim,
num_channels=chans,
dropout=dropout,
kernel_size=self.kernel_size,
)
self.lrelu = nn.LeakyReLU()
self.bn = nn.BatchNorm1d(self.embedding_dim)
self.bn2 = nn.BatchNorm1d(self.conv_dim)
self.lin = nn.Linear(self.conv_dim, self.pred_seq_len)
self.init_weights()
def init_weights(self):
pass
def forward(self, data, x, edges, seq, net):
eseq = self.encoder(seq)
eseq = eseq.permute(0, 2, 1)
# input must be (N, C_in, L_in)
conv_out = self.temp_conv(eseq)
conv_out = conv_out.permute(0, 2, 1)
conv_out = self.dropout(conv_out)
final_hidden = conv_out[:, -1, :]
return self.lin(final_hidden)
class BaselineTCNModelV1(BaselineModel):
def __init__(
self, dataset,
node_input_dim, edge_input_dim, dropout=0.1, lr=0.001, l1_reg=0., conv_dim=64,
embedding_dim=64, kernel_size=8,
weight_decay=0.001, **kwargs,
):
super().__init__(dataset, node_input_dim=node_input_dim, edge_input_dim=edge_input_dim, **kwargs)
self.dropout = dropout
net = self.dataset.net.to(self.device)
self.model = BaseTCN(
device=self.device,
node_input_dim=self.node_input_dim,
edge_input_dim=self.edge_input_dim,
conv_dim=conv_dim,
kernel_size=kernel_size,
embedding_dim=embedding_dim,
seq_len=self.seq_len,
pred_seq_len=self.pred_seq_len,
dropout=self.dropout,
).to(self.device)
self.loss = torch.nn.MSELoss()
# self.loss = MAELoss()
self.optimizer = torch.optim.AdamW(
self.model.parameters(), lr=lr, weight_decay=weight_decay
)
decayRate = 0.99
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer=self.optimizer, gamma=decayRate
)
self.lr_scheduler = None
@classmethod
def hyperparameter_search(cls, epochs=20, samples=15, **model_params):
from sklearn.model_selection import ParameterGrid
import datetime
import json
import random
from pprint import pprint
param_grid = dict(
lr=[0.001],
weight_decay=[0.0, 0.001, 0.00001],
dropout=[0.0, 0.1, 0.2],
embedding_dim=[32, 64, 128],
conv_dim=[32, 64, 128],
l1_reg=[0, 0.001],
# layers=[1, 2, 3],
kernel_size=[2, 4, 8],
)
results = []
configs = list(ParameterGrid(param_grid))
random.shuffle(configs)
for params in configs[:samples]:
pprint(params)
model = cls(**{**model_params, **params})
model.train(epochs=epochs, val=False)
_, val_losses = model.test()
results.append((val_losses["mse"], params))
results = sorted(results, key=lambda r: r[0])
print("BEST")
print(results[0])
# Save as JSON
base_path = os.path.dirname(os.path.realpath(__file__))
models_base_path = os.path.join(base_path, "../../../trained")
assert os.path.exists(models_base_path)
out_file = os.path.join(models_base_path, "hps")
out_file = os.path.join(out_file, cls.__name__ + "_" + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ".json")
with open(out_file, "w+") as f:
json.dump(results, f)
|
[
"torch.nn.Dropout",
"json.dump",
"torch.nn.MSELoss",
"cargonet.models.tempconv.TemporalConvNet",
"random.shuffle",
"os.path.realpath",
"torch.nn.BatchNorm1d",
"os.path.exists",
"datetime.datetime.now",
"math.log",
"torch.optim.lr_scheduler.ExponentialLR",
"pprint.pprint",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"os.path.join",
"sklearn.model_selection.ParameterGrid"
] |
[((1874, 1893), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1884, 1893), True, 'import torch.nn as nn\n'), ((1917, 1993), 'torch.nn.Linear', 'nn.Linear', (['(self.node_input_dim + self.edge_input_dim + 0)', 'self.embedding_dim'], {}), '(self.node_input_dim + self.edge_input_dim + 0, self.embedding_dim)\n', (1926, 1993), True, 'import torch.nn as nn\n'), ((2066, 2184), 'cargonet.models.tempconv.TemporalConvNet', 'TemporalConvNet', ([], {'num_inputs': 'self.embedding_dim', 'num_channels': 'chans', 'dropout': 'dropout', 'kernel_size': 'self.kernel_size'}), '(num_inputs=self.embedding_dim, num_channels=chans, dropout=\n dropout, kernel_size=self.kernel_size)\n', (2081, 2184), False, 'from cargonet.models.tempconv import TemporalConvNet\n'), ((2261, 2275), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2273, 2275), True, 'import torch.nn as nn\n'), ((2295, 2329), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.embedding_dim'], {}), '(self.embedding_dim)\n', (2309, 2329), True, 'import torch.nn as nn\n'), ((2349, 2378), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.conv_dim'], {}), '(self.conv_dim)\n', (2363, 2378), True, 'import torch.nn as nn\n'), ((2399, 2442), 'torch.nn.Linear', 'nn.Linear', (['self.conv_dim', 'self.pred_seq_len'], {}), '(self.conv_dim, self.pred_seq_len)\n', (2408, 2442), True, 'import torch.nn as nn\n'), ((3752, 3770), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3768, 3770), False, 'import torch\n'), ((3990, 4076), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', ([], {'optimizer': 'self.optimizer', 'gamma': 'decayRate'}), '(optimizer=self.optimizer, gamma=\n decayRate)\n', (4028, 4076), False, 'import torch\n'), ((4779, 4802), 'random.shuffle', 'random.shuffle', (['configs'], {}), '(configs)\n', (4793, 4802), False, 'import random\n'), ((5297, 5340), 'os.path.join', 'os.path.join', (['base_path', '"""../../../trained"""'], {}), "(base_path, '../../../trained')\n", (5309, 5340), False, 'import os\n'), ((5356, 5388), 'os.path.exists', 'os.path.exists', (['models_base_path'], {}), '(models_base_path)\n', (5370, 5388), False, 'import os\n'), ((5409, 5446), 'os.path.join', 'os.path.join', (['models_base_path', '"""hps"""'], {}), "(models_base_path, 'hps')\n", (5421, 5446), False, 'import os\n'), ((4744, 4769), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['param_grid'], {}), '(param_grid)\n', (4757, 4769), False, 'from sklearn.model_selection import ParameterGrid\n'), ((4856, 4870), 'pprint.pprint', 'pprint', (['params'], {}), '(params)\n', (4862, 4870), False, 'from pprint import pprint\n'), ((5242, 5268), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5258, 5268), False, 'import os\n'), ((5627, 5648), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (5636, 5648), False, 'import json\n'), ((1788, 1848), 'math.log', 'math.log', (['(self.seq_len + self.pred_seq_len)', 'self.kernel_size'], {}), '(self.seq_len + self.pred_seq_len, self.kernel_size)\n', (1796, 1848), False, 'import math\n'), ((5510, 5533), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5531, 5533), False, 'import datetime\n')]
|
"""
Tutorial: Two patch Rosenzweig-MacArthur predator-prey model using Symbolic tools
For details, see
"Predator migration in response to prey density: What are the consequences?"
by <NAME> et al, J. Math Biol, Vol. 43, pp. 561-581, (2001)
"""
from __future__ import print_function
from PyDSTool import *
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
# Declare names and initial values for (symbolic) parameters
mu = Par(0.8, 'mu')
k = Par(7, 'k')
D = Par(0.5, 'D')
theta = Par(1, 'theta')
h = Par(0.5, 'h')
# Compute nontrivial boundary equilibrium initial condition from parameters (see reference)
v1_0 = mu*(2*D + mu) / (D*(1-h*mu-theta*h*mu) + mu*(1-h*mu))
v2_0 = 0.0
p1_0 = (1+h*v1_0)*(1-v1_0/k)
p2_0 = (D/(D+mu))*(1+theta*h*v1_0)*(1-v1_0/k)
# Declare symbolic variables
v1 = Var('v1')
v2 = Var('v2')
p1 = Var('p1')
p2 = Var('p2')
# Create Symbolic Quantity objects for definitions
v1rhs = v1*(1-v1/k) - v1*p1/(1+h*v1)
v2rhs = v2*(1-v2/k) - v2*p2/(1+h*v2)
p1rhs = -1*mu*p1 + v1*p1/(1+h*v1) + D*(((1+theta*h*v2)/(1+h*v2))*p2 - ((1+theta*h*v1)/(1+h*v1))*p1)
p2rhs = -1*mu*p2 + v2*p2/(1+h*v2) + D*(((1+theta*h*v1)/(1+h*v1))*p1 - ((1+theta*h*v2)/(1+h*v2))*p2)
# Build Generator
DSargs = args(name='PredatorPrey')
DSargs.pars = [mu, k, D, theta, h]
DSargs.varspecs = args(v1=v1rhs,
v2=v2rhs,
p1=p1rhs,
p2=p2rhs)
# Use eval method to get a float value from the symbolic definitions given in
# terms of parameter values
DSargs.ics = args(v1=v1_0.eval(), v2=v2_0, p1=p1_0.eval(), p2=p2_0.eval())
ode = Generator.Vode_ODEsystem(DSargs)
# Set up continuation class
PC = ContClass(ode)
PCargs = args(name='EQ1', type='EP-C')
PCargs.freepars = ['k']
PCargs.StepSize = 1e-2
PCargs.MaxNumPoints = 50
PCargs.MaxStepSize = 1e-1
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PCargs.verbosity = 2
PC.newCurve(PCargs)
print('Computing curve...')
start = clock()
PC['EQ1'].forward()
print('done in %.3f seconds!' % (clock()-start))
PCargs.name = 'HO1'
PCargs.type = 'H-C2'
PCargs.initpoint = 'EQ1:H1'
PCargs.freepars = ['k','D']
PCargs.MaxNumPoints = 50
PCargs.MaxStepSize = 0.1
PCargs.LocBifPoints = ['ZH']
PCargs.SaveEigen = True
PC.newCurve(PCargs)
print('Computing Hopf curve...')
start = clock()
PC['HO1'].forward()
print('done in %.3f seconds!' % (clock()-start))
PCargs = args(name = 'FO1', type = 'LP-C')
PCargs.initpoint = 'HO1:ZH1'
PCargs.freepars = ['k','D']
PCargs.MaxNumPoints = 25
PCargs.MaxStepSize = 0.1
PCargs.LocBifPoints = 'all'
PCargs.SaveEigen = True
PC.newCurve(PCargs)
# Plot bifurcation diagram
PC.display(('k','D'), stability=True, figure=1)
plt.title('Bifurcation diagram of equilibria in (k,D) parameters')
# D value increases monotonically with k, so let's find the value at k=9.0
# by linear interpolation from the points found by PyCont
hopfs_unparam = PC['HO1'].sol[['D','v1','v2','p1','p2']]
hopfs = Pointset(indepvararray=PC['HO1'].sol['k'], indepvarname='k',
coorddict=hopfs_unparam.todict())
ix0, ix1 = hopfs.find(9)
print("k values found around 9.0 are %.3f, %.3f" % ( hopfs['k'][ix0], hopfs['k'][ix1] ))
# Arbitrarily choose closest lower index to check stability
print("Hopf point found for k = %.3f is stable?" % hopfs['k'][ix0], end=' ')
if PC['HO1'].sol[ix0].labels['H']['stab'] == 'N':
print("No")
else:
print("Yes")
# Create interpolatable curve from the pointset
hopfs_curve = pointset_to_traj(hopfs)
# Interpolate unstable Hopf equilibrium values of parameters/variables at k=9
Hpt = hopfs_curve(9.0)
# extract the variables defined for 'ode' Generator
Hics = Hpt[ode.funcspec.vars]
# perturb slightly off equilibrium
Hics['v2'] += 0.01
# extract the parameter value for D and indicate on bifurcation diagram
plt.plot(9.0, Hpt['D'], 'go')
ode.set(tdata=[0,100],
pars={'k': 9.0, 'D': Hpt['D']},
ics=Hics)
traj = ode.compute('test')
pts = traj.sample()
fig = plt.figure()
try:
ax = fig.gca(projection='3d')
except ValueError:
# pre-v1.0 version of Matplotlib
ax = Axes3D(fig)
ax.plot(pts['v1'], pts['p1'], pts['p2'],
label='Unstable periodic sol starting near eqm')
# to ensure your version accepts singleton points, enclose in extra brackets!
ax.plot(pts[[0]]['v1'], pts[[0]]['p1'], pts[[0]]['p2'], 'go', label='Initial condition')
ax.legend()
ax.set_xlabel('v1')
ax.set_ylabel('p1')
ax.set_zlabel('p2')
# 3D projection (v1,p1,p2) of unstable periodic sol. starting near eqm.
plt.draw()
|
[
"matplotlib.pyplot.title",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure"
] |
[((2735, 2801), 'matplotlib.pyplot.title', 'plt.title', (['"""Bifurcation diagram of equilibria in (k,D) parameters"""'], {}), "('Bifurcation diagram of equilibria in (k,D) parameters')\n", (2744, 2801), True, 'from matplotlib import pyplot as plt\n'), ((3850, 3879), 'matplotlib.pyplot.plot', 'plt.plot', (['(9.0)', "Hpt['D']", '"""go"""'], {}), "(9.0, Hpt['D'], 'go')\n", (3858, 3879), True, 'from matplotlib import pyplot as plt\n'), ((4016, 4028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4026, 4028), True, 'from matplotlib import pyplot as plt\n'), ((4554, 4564), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4562, 4564), True, 'from matplotlib import pyplot as plt\n'), ((4133, 4144), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (4139, 4144), False, 'from mpl_toolkits.mplot3d import Axes3D\n')]
|
from enum import Enum
import numpy as np
def mean_squared_error(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.mean(np.square(np.subtract(observed_value, predicted_value)))
else:
return np.mean(np.square(np.subtract(observed_value, predicted_value)), axis=axis, keepdims=True)
def mean_squared_error_derivative(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value)), 2.0)
else:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value), axis=axis, keepdims=True), 2.0)
class LossFunctions:
MSE = mean_squared_error
class LossFunctionDerivatives:
MSE_DERIVATIVE = mean_squared_error_derivative
|
[
"numpy.subtract"
] |
[((213, 257), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (224, 257), True, 'import numpy as np\n'), ((303, 347), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (314, 347), True, 'import numpy as np\n'), ((560, 604), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (571, 604), True, 'import numpy as np\n'), ((657, 701), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (668, 701), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
from PIL import ImageFilter
import matplotlib.pyplot as plt
import os
from itertools import permutations
from IPython.display import clear_output
from copy import deepcopy
from collections import namedtuple
# ---------------- Image utilities ----------------
def read_img(filename):
'''Gets the array of an image file.'''
return Image.open(filename)
def split_img(im_shuffled, nb_lines, nb_cols, margin=(0, 0)):
'''Returns a dictionary of all the pieces of the puzzle.
Use optional argument margin in order to have more smooth cuts.
Args:
- im_suffled (Image object)
- nb_lines (int)
- nb_cols (int)
- margin ((x_margin, y_margin))
Returns:
- cropped (dict)
'''
w, h = im_shuffled.size # w, h = width, height
# For one piece of the puzzle
w_piece = (w / nb_cols)
h_piece = (h / nb_lines)
cropped = {}
x_margin, y_margin = margin
for i in range(nb_lines):
for j in range(nb_cols):
left = i * w_piece + x_margin / 2
top = j * h_piece + y_margin / 2
right = (i + 1) * w_piece - x_margin / 2
bottom = (j + 1) * h_piece - y_margin / 2
cropped[(i,j)] = im_shuffled.crop((left, top, right, bottom))
return cropped
def display_image(img, nb_lines, nb_cols, title='', figsize=(5,6)):
'''Show the image with custom ticks for both x and y axis, making piece
identification easier.
Args:
- img (Image object)
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
plt.figure(figsize=figsize)
xticks_location = (img.width / nb_cols) / 2 + np.linspace(0, img.width, nb_cols+1)
yticks_location = (img.height / nb_lines) / 2 + np.linspace(0, img.height, nb_lines+1)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
if title:
plt.title(title)
plt.imshow(img)
return
def display_cropped(cropped, nb_lines, nb_cols, title='', figsize=(5,6)):
'''Show the image with custom ticks for both x and y axis, making piece
identification easier.
Args:
- cropped ({key: image})
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
img = cropped_to_img(cropped, nb_lines, nb_cols)
display_image(img, nb_lines, nb_cols, title='', figsize=figsize)
return
def save_cropped(cropped):
'''Save as file all the pieces of the puzzle in the cropped directory.
The files are named accordingly to 'i-j.jpg' where i and j are the coordinates
of the pieces of the puzzle in the PIL coods system.
Args:
- cropped ({key: image})
Returns:
- None
'''
for (i,j), im in cropped.items():
filename = f'{i}-{j}.jpg'
filepath = os.path.join('cropped', filename)
im.save(filepath)
print('Images successfully saved.')
return
# ---------------- Operations on images ----------------
def get_current_permutations(cropped):
''' Generator that yields a dictionary giving the mapping from the current
configuration to the shuffled puzzle.
Args:
- cropped ({key: image})
Returns:
- generator object
'''
list_keys = list(cropped.keys())
for config in permutations(list_keys):
map_config = dict(zip(list_keys, config))
yield map_config
def grad_x(im1, im2):
'''Return the discrete horizontal gradient. im2 must be to the right of im1.
Args:
- im1 (Image object)
- im2 (Image object)
Returns:
- grad_x_val (float)
NB: numpy and PIL don't share the same coordinate system! '''
## Conversion from Image object into numpy arrays
arr1 = np.array(im1)
arr2 = np.array(im2)
min_x = min(arr1.shape[0], arr2.shape[0])
min_y = min(arr1.shape[1], arr2.shape[1])
arr1 = arr1[:min_x,:min_y,:]
arr2 = arr2[:min_x,:min_y,:]
## Computation of the horizontal gradient at the frontier
return np.sum(np.square(arr1[-1,:,:] - arr2[0,:,:]))
def grad_y(im1, im2):
'''Return the discrete horizontal gradient. im2 must be below im1.
Args:
- im1 (Image object)
- im2 (Image object)
Returns:
- grad_y_val (float)
NB: numpy and PIL don't share the same coordinate system! '''
## Conversion into numpy arrays
arr1 = np.array(im1)
arr2 = np.array(im2)
min_x = min(arr1.shape[0], arr2.shape[0])
min_y = min(arr1.shape[1], arr2.shape[1])
arr1 = arr1[:min_x,:min_y,:]
arr2 = arr2[:min_x,:min_y,:]
## Computation of the vertical gradient at the frontier
return np.sum(np.square(arr1[:,0,:] - arr2[:,-1,:]))
def mean_grad(cropped, nb_lines, nb_cols):
'''Returns the mean of the gradient both horizontally and vertically.'''
res = 0
for j in range(nb_lines):
for i in range(nb_cols-1):
res += grad_x(cropped[(i,j)], cropped[(i+1,j)])
for i in range(nb_cols):
for j in range(nb_lines-1):
res += grad_y(cropped[(i,j)], cropped[(i,j+1)])
return res / 2
def read_cropped_im(i, j):
''' Returns the given image loaded from the cropped folder
as an Image object.'''
im = Image.open(os.path.join('cropped', f'{i}-{j}.jpg'))
return im
def get_concat_h(im1, im2):
''' Returns the horizontal concatenation of im1 and im2.'''
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def get_concat_v(im1, im2):
''' Returns the vertical concatenation of im1 and im2.'''
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (0, im1.height))
return dst
def config_to_img(map_config, nb_lines, nb_cols):
''' Returns an image according to the given configuration.
Strategy:
1) We'll start concatenate each line of the final configuration.
2) Only then are we going to concatenate those lines vertically.
Args:
- map_config ({(old_coords): (new_coords), ...}): dictionary mapping from the current configuration
to the shuffled puzzle.
- nb_lines (int)
- nb_cols (int)
Returns:
- an Image object
'''
## Step 1:
list_lines = []
for j in range(nb_lines): # We process line by line...
# We start from the left-most image.
current_im = read_cropped_im(*map_config[(0,j)]) # NB: The * allows to unpack the given tuple
for i in range(1, nb_cols): # For each piece of the line...
new_piece = read_cropped_im(*map_config[(i,j)]) # we get the juxtaposed piece just right to the previous one
current_im = get_concat_h(current_im, new_piece)
list_lines.append(current_im)
# Now we can vertically concatenate the obtained lines.
current_im = list_lines[0]
for idx, img_line in enumerate(list_lines):
if idx == 0:
pass
else:
current_im = get_concat_v(current_im, img_line)
return current_im
def cropped_to_img(cropped, nb_lines, nb_cols):
''' Returns an image according to the given configuration.
Strategy:
1) We'll start concatenate each line of the final configuration.
2) Only then are we going to concatenate those lines vertically.
Args:
- cropped ({(x, y): Image Object, ...}): dictionary mapping from the current configuration
to the shuffled puzzle.
- nb_lines (int)
- nb_cols (int)
Returns:
- an Image object
'''
## Step 1:
list_lines = []
for j in range(nb_lines): # We process line by line...
# We start from the left-most image.
current_im = cropped[(0,j)] # NB: The * allows to unpack the given tuple
for i in range(1, nb_cols): # For each piece of the line...
new_piece = cropped[(i,j)] # we get the juxtaposed piece just right to the previous one
current_im = get_concat_h(current_im, new_piece)
list_lines.append(current_im)
# Now we can vertically concatenate the obtained lines.
current_im = list_lines[0]
for idx, img_line in enumerate(list_lines):
if idx == 0:
pass
else:
current_im = get_concat_v(current_im, img_line)
return current_im
def get_grad_orientation(im_1, im_2, orientation):
'''Returns the gradient considering im_1 as the reference image and im_2
concatenated right next to im_1 with the given orientation. The gradient is
calculated at the limit.
Orientation must be in ['N', 'E', 'W', 'S'].
Args:
- im_1 (Image object)
- im_2 (Image object)
- orientation (str)
Returns:
- grad (float)
'''
assert orientation in ['N', 'E', 'W', 'S'], 'Given input for orientation not understood.'
if orientation == 'E':
return grad_y(im_1, im_2)
elif orientation == 'W':
return grad_y(im_2, im_1)
elif orientation == 'S':
return grad_x(im_1, im_2)
elif orientation == 'N':
return grad_x(im_2, im_1)
def getBestConfig(cropped, nb_lines, nb_cols):
'''Returns a dictionary that contains another dictionary that gives
the ID of the piece with the best gradient according to the current direction
('N' for North, 'E' for East, 'W' for West and 'S' for South) which is used
as the key. Moreover, one can access the gradient value using the 'grad_N'
(or grad_E, etc...) key.
Args:
- cropped {key: image}
- nb_lines (int)
- nb_cols (int)
Returns:
- dicBestConfig {curr_piece: {'N': (x_best_N, y_best_N), ..., 'grad_N': min_grad_N, ...}}
'''
dicBestConfig = {}
orientations = ['N', 'E', 'W', 'S']
for curr_piece_ID in cropped.keys(): # For every piece of the puzzle...
# Creating an empty dict for the current piece.
dicBestConfig[curr_piece_ID] = {}
for orientation in orientations: # For every single of the 4 orientation...
# Preparing the key for storing the gradient.
grad_orientation = 'grad_' + orientation
# Variables to store the best candidate.
min_grad = np.inf
best_piece_ID = None
for piece_ID in cropped.keys(): # For every piece of the puzzle...
if piece_ID == curr_piece_ID: # We skip duplicates...
continue
else: # If we have two different pieces...
curr_grad = get_grad_orientation(
im_1=cropped[curr_piece_ID],
im_2=cropped[piece_ID],
orientation=orientation)
if curr_grad < min_grad: # If it's a better candidate...
# Overwriting the previous variables.
min_grad = curr_grad
best_piece_ID = piece_ID
dicBestConfig[curr_piece_ID][orientation] = best_piece_ID
dicBestConfig[curr_piece_ID][grad_orientation] = min_grad
return dicBestConfig
def getOrderedConfigsByConfig(dicBestConfig, orientation, reverse=False):
'''Returns a sorted list of elements from dicBestConfig.values().
Args:
- dicBestConfig (dict)
- orientation (str)
- reverse (bool)
Returns:
- ordered_list [(value from dicBestConfig.values()) ordered by the
gradient according to the given orientation]
'''
orientations = ['N', 'E', 'W', 'S']
assert orientation in ['N', 'E', 'W', 'S'], 'Given input for orientation not understood.'
grad_orientation_key = 'grad_' + orientation
return sorted(dicBestConfig.items(), key=lambda x: x[1][grad_orientation_key], reverse=reverse)
def getOrderedConfigs(dicBestConfig, reverse=False):
"""Returns a sorted list of elements from dicBestConfig.values().
We don't consider orientation in this function.
Args:
- dicBestConfig (dict)
- reverse (bool)
Returns:
- ordered_list (list): list of named tuples of the form
(start, end, orientation, score)
"""
list_temp = []
list_orientations = ['N', 'E', 'W', 'S']
# Creating a namedtuple for convenience:
Config = namedtuple('Config', ['start', 'end', 'orientation', 'score'])
for start, val in dicBestConfig.items():
for orientation in list_orientations:
end = val[orientation]
score = val['grad_' + orientation]
list_temp.append(Config(start, end, orientation, score))
ordered_list = sorted(list_temp, key=lambda x: x.score, reverse=reverse)
return ordered_list
# ---------------- Brute force ----------------
def brute_force(cropped, nb_lines, nb_cols):
''' Brute force solve. VERY SLOW!!!
Saves all possibles configurations in the 'output' folder.
Args:
- cropped {dict}
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
for idx, map_config in enumerate(get_current_permutations(cropped)):
print(f'Current configuration: {idx}')
im_config = config_to_img(map_config, nb_lines, nb_cols)
filename = f'{idx}.jpg'
filepath = os.path.join('outputs', filename)
im_config.save(filepath)
clear_output(wait=True)
# ---------------- Manual solve ----------------
def config_switcher(cropped, nb_lines, nb_cols, coords_1, coords_2):
'''Switch places for two pieces and return a new cropped dictionary.
Args:
- cropped
- nb_lines
- nb_cols
- coords_1 (2-tuple): 1st piece to move
- coords_2 (2-tuple): 2nd piece to move
Returns:
- new_cropped
'''
new_cropped = deepcopy(cropped)
new_cropped[coords_1], new_cropped[coords_2] = new_cropped[coords_2], new_cropped[coords_1]
return new_cropped
def config_switcher_helper(cropped, nb_lines, nb_cols, coords_1, coords_2):
'''Show on the same plot the previous image and the new one after having
the pieces switched places.
Args:
- cropped
- nb_lines
- nb_cols
- coords_1 (2-tuple): 1st piece to move
- coords_2 (2-tuple): 2nd piece to move
Returns:
- new_cropped
'''
plt.figure(figsize=(12, 10))
plt.subplot(1, 2, 1)
old_image = cropped_to_img(cropped, nb_lines, nb_cols)
xticks_location = (old_image.width / nb_cols) / 2 + np.linspace(0, old_image.width, nb_cols+1)
yticks_location = (old_image.height / nb_lines) / 2 + np.linspace(0, old_image.height, nb_lines+1)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
plt.imshow(old_image)
plt.title('Old image')
plt.subplot(1, 2, 2)
new_cropped = config_switcher(cropped, nb_lines, nb_cols, coords_1, coords_2)
new_image = cropped_to_img(new_cropped, nb_lines, nb_cols)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
plt.imshow(new_image)
plt.title('New image')
return
# ---------------- Backtracking ----------------
def get_next_location(nb_pieces, nb_lines, nb_cols):
'''Returns the next coords (i,j) of the piece according to the
completion strategy.
Completion strategy:
Adds a piece with increasing x and, if the x are the same,
with increasing y. In other terms, we complete the puzzle from
left to right and from top to bottom.'''
# Get the previous coords (not trivial)
if nb_pieces % nb_cols == 0: # If we have a full line...
y = (nb_pieces // nb_cols) - 1
x = nb_cols - 1
else: #If the line isn't fully completed yet...
y = nb_pieces // nb_cols
x = (nb_pieces % nb_cols) - 1
if x == nb_cols - 1: # If we are already at the end of a line...
x_new = 0
y_new = y + 1
else: # If there is still some room on the line...
x_new = x + 1
y_new = y
print(f'Added new piece at: ({x_new}, {y_new})')
assert 0 <= x_new < nb_cols, 'Error with the x axis!'
assert 0 <= y_new < nb_lines, 'Error with the y axis!'
return (x_new, y_new)
def score(config, cropped, nb_lines, nb_cols):
'''Computes the score of the current config, which is in this case
the squared mean gradient with respect to x and y divided by the
total number of pieces in the puzzle.'''
# In order to call the mean_grad function, we first
# have to generate a dictionary that has the same
# format as 'cropped': {(0, 0): <PIL.Image.Image>, ...}.
# Currently, 'config' has the shape {(new_coords): (old_coords), ...}.
# Next line allows to obtain the wanted dictionary.
new_cropped = get_config_mapped(config=config, cropped=cropped)
score = mean_grad(new_cropped, nb_lines, nb_cols)**2 / 2
return score
def get_config_mapped(config, cropped):
'''Converts a config dictionary to the same format as a cropped dictionary.
Args:
- config ({(new_coords): (old_coords), ...}): current configuration (not necessarily
completed)
- cropped ({(0, 0): <PIL.Image.Image>, ...}): dictionary of every single piece
of the puzzle
'''
return {new_coords: cropped[old_coords] for new_coords, old_coords in config.items()}
def partial_score(partial_config, cropped, nb_lines, nb_cols):
'''Computes the score of a partial configuration.'''
res = 0
config_mapped = get_config_mapped(config=partial_config, cropped=cropped)
# Gradient wrt to x:
for j in range(nb_lines):
for i in range(nb_cols-1):
if (i,j) in config_mapped.keys() and (i+1,j) in config_mapped.keys():
res += grad_x(config_mapped[(i,j)], config_mapped[(i+1,j)])
# Gradient wrt to y:
for i in range(nb_cols):
for j in range(nb_lines-1):
if (i,j) in config_mapped.keys() and (i,j+1) in config_mapped.keys():
res += grad_y(config_mapped[(i,j)], config_mapped[(i,j+1)])
return (res/2)**2 / (nb_lines * nb_cols)
def solve_backtracking(cropped, nb_lines, nb_cols):
'''
Applies backtracking for building the puzzle.
In what follows, 'config' is a dictionary with the
shape {(x, y): (i, j), ...}, ie that links the current
configuration to the suffled one.
Args:
- cropped ({(0, 0): <PIL.Image.Image>, ...}): dictionary of
every single piece of the puzzle
'''
bestScore = np.inf
bestSol = None
nb_pieces_total = len(cropped)
config = {}
# ------ Auxiliary functions ------
def is_terminal(config):
'''Returns True if we have generated a complete
solution for the puzzle.'''
return len(config) == nb_pieces_total
def is_promising(partial_config, bestScore):
'''Returns True iif the gradient score of the partial configuration
is lower or equal to bestScore.'''
current_score = partial_score(partial_config, cropped, nb_lines, nb_cols)
print(f'current_score: {current_score}')
return current_score < bestScore
def children(config, cropped, bestScore):
'''Generator for a list of configurations that have one supplementary piece
when compared to 'config'.
Args:
- config ({new_coords: old_coords, ...})
- cropped ({(i,j): Image object, ...})
Completion strategy:
Adds a piece with increasing x and, if the x are the same,
with increasing y. In other terms, we complete the puzzle from
left to right and from top to bottom.'''
# We get the location (i, j) of the next piece.
nb_pieces = len(config)
next_location = get_next_location(nb_pieces=nb_pieces, nb_lines=nb_lines, nb_cols=nb_cols)
# config.values() contains the old coords that have already been used
# cropped.keys() contains all the possible coords
remaining_pieces = [coords for coords in cropped.keys() if coords not in config.values()]
for next_piece in remaining_pieces:
config_copy = deepcopy(config)
assert next_location not in config_copy.keys(), 'issue when completing the current config'
config_copy[next_location] = next_piece
# print(f'config_copy = {config_copy}\n')
if is_promising(config_copy, bestScore):
print('Promising branch.\n')
yield config_copy
else:
print('Not promising branch.\n')
continue # get directly to next iteration
def backtracking(config, cropped, bestScore, bestSol):
'''
Backtracking for building the puzzle (recursive).
Args:
- config: dictionary giving the mapping from the current
configuration to a given configuration of the puzzle
(the dictionary doesn't have to contain alle the puzzle pieces
since it's being built on the moment)
- cropped
- bestScore (float)
- bestSol (dict)
Returns:
- new_bestScore
- new_bestSol
'''
if is_terminal(config):
# print('Viewing current configuration:')
# current_img = create_config(config, nb_lines, nb_cols)
# plt.figure(figsize = (5,2))
# plt.imshow(current_img)
# pdb.set_trace()
print('is_terminal')
current_score = score(config, cropped, nb_lines, nb_cols)
# clear_output(wait=True)
print(f'current_score: {current_score}\n')
if current_score < bestScore:
new_bestScore = current_score
new_bestSol = deepcopy(config)
print(f'New bestScore: {new_bestScore}\n')
else:
print(f'not terminal, current nb of pieces: {len(config)}')
for new_config in children(config, cropped, bestScore):
new_bestScore, new_bestSol = backtracking(new_config, cropped, bestScore, bestSol)
return new_bestScore, new_bestSol
# ------ Main ------
return backtracking(config, cropped, bestScore, bestSol)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"PIL.Image.new",
"copy.deepcopy",
"matplotlib.pyplot.imshow",
"itertools.permutations",
"numpy.square",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"numpy.array",
"collections.namedtuple",
"numpy.linspace",
"IPython.display.clear_output",
"os.path.join"
] |
[((380, 400), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (390, 400), False, 'from PIL import Image\n'), ((1631, 1658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1641, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1979, 1994), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1989, 1994), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3362), 'itertools.permutations', 'permutations', (['list_keys'], {}), '(list_keys)\n', (3351, 3362), False, 'from itertools import permutations\n'), ((3779, 3792), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (3787, 3792), True, 'import numpy as np\n'), ((3804, 3817), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (3812, 3817), True, 'import numpy as np\n'), ((4422, 4435), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (4430, 4435), True, 'import numpy as np\n'), ((4447, 4460), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (4455, 4460), True, 'import numpy as np\n'), ((5463, 5516), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im1.width + im2.width, im1.height)'], {}), "('RGB', (im1.width + im2.width, im1.height))\n", (5472, 5516), False, 'from PIL import Image\n'), ((5695, 5749), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im1.width, im1.height + im2.height)'], {}), "('RGB', (im1.width, im1.height + im2.height))\n", (5704, 5749), False, 'from PIL import Image\n'), ((12471, 12533), 'collections.namedtuple', 'namedtuple', (['"""Config"""', "['start', 'end', 'orientation', 'score']"], {}), "('Config', ['start', 'end', 'orientation', 'score'])\n", (12481, 12533), False, 'from collections import namedtuple\n'), ((13934, 13951), 'copy.deepcopy', 'deepcopy', (['cropped'], {}), '(cropped)\n', (13942, 13951), False, 'from copy import deepcopy\n'), ((14457, 14485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (14467, 14485), True, 'import matplotlib.pyplot as plt\n'), ((14493, 14513), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (14504, 14513), True, 'import matplotlib.pyplot as plt\n'), ((14882, 14903), 'matplotlib.pyplot.imshow', 'plt.imshow', (['old_image'], {}), '(old_image)\n', (14892, 14903), True, 'import matplotlib.pyplot as plt\n'), ((14908, 14930), 'matplotlib.pyplot.title', 'plt.title', (['"""Old image"""'], {}), "('Old image')\n", (14917, 14930), True, 'import matplotlib.pyplot as plt\n'), ((14940, 14960), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (14951, 14960), True, 'import matplotlib.pyplot as plt\n'), ((15207, 15228), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_image'], {}), '(new_image)\n', (15217, 15228), True, 'import matplotlib.pyplot as plt\n'), ((15233, 15255), 'matplotlib.pyplot.title', 'plt.title', (['"""New image"""'], {}), "('New image')\n", (15242, 15255), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1748), 'numpy.linspace', 'np.linspace', (['(0)', 'img.width', '(nb_cols + 1)'], {}), '(0, img.width, nb_cols + 1)\n', (1721, 1748), True, 'import numpy as np\n'), ((1799, 1839), 'numpy.linspace', 'np.linspace', (['(0)', 'img.height', '(nb_lines + 1)'], {}), '(0, img.height, nb_lines + 1)\n', (1810, 1839), True, 'import numpy as np\n'), ((1957, 1973), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1966, 1973), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2884), 'os.path.join', 'os.path.join', (['"""cropped"""', 'filename'], {}), "('cropped', filename)\n", (2863, 2884), False, 'import os\n'), ((4071, 4112), 'numpy.square', 'np.square', (['(arr1[-1, :, :] - arr2[0, :, :])'], {}), '(arr1[-1, :, :] - arr2[0, :, :])\n', (4080, 4112), True, 'import numpy as np\n'), ((4712, 4753), 'numpy.square', 'np.square', (['(arr1[:, 0, :] - arr2[:, -1, :])'], {}), '(arr1[:, 0, :] - arr2[:, -1, :])\n', (4721, 4753), True, 'import numpy as np\n'), ((5305, 5344), 'os.path.join', 'os.path.join', (['"""cropped"""', 'f"""{i}-{j}.jpg"""'], {}), "('cropped', f'{i}-{j}.jpg')\n", (5317, 5344), False, 'import os\n'), ((13432, 13465), 'os.path.join', 'os.path.join', (['"""outputs"""', 'filename'], {}), "('outputs', filename)\n", (13444, 13465), False, 'import os\n'), ((13507, 13530), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (13519, 13530), False, 'from IPython.display import clear_output\n'), ((14630, 14674), 'numpy.linspace', 'np.linspace', (['(0)', 'old_image.width', '(nb_cols + 1)'], {}), '(0, old_image.width, nb_cols + 1)\n', (14641, 14674), True, 'import numpy as np\n'), ((14731, 14777), 'numpy.linspace', 'np.linspace', (['(0)', 'old_image.height', '(nb_lines + 1)'], {}), '(0, old_image.height, nb_lines + 1)\n', (14742, 14777), True, 'import numpy as np\n'), ((20394, 20410), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (20402, 20410), False, 'from copy import deepcopy\n'), ((22125, 22141), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (22133, 22141), False, 'from copy import deepcopy\n')]
|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
import django
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from app.models import User, mailAddress, Mail
from django.db.models import Count, Avg, Q, Case, When
from django.db.models.functions import TruncDate
from django.utils.timezone import datetime
@login_required(login_url="/login/")
def index(request):
template = 'index.html'
email_exchanges = Mail.objects.count()
num_collaborators = User.objects.filter(in_enron=True).count()
extern_contact = User.objects.filter(in_enron=False).count()
intern_exchange = Mail.objects.filter(is_intern=True).count() / Mail.objects.filter(is_intern=False).count()
context = {'email_exchanges':email_exchanges,
'num_collaborators':num_collaborators,
'extern_contact':extern_contact,
'intern_exchange':round(intern_exchange,1),}
return render(request, template, context)
@login_required(login_url="/login/")
def employees(request):
template = 'employees.html'
start_date = request.GET.get('start_date')
if not start_date:
start_date = datetime(1900,1,1)
end_date = request.GET.get('end_date')
if not end_date:
end_date=datetime(2100,1,1)
lines = request.GET.get('lines')
if not lines:
lines = 5
low_thr = request.GET.get('low_thr')
if not low_thr:
low_thr = 0
else:
try:
low_thr = int(low_thr)
except ValueError:
low_thr = 0
high_thr = request.GET.get('high_thr')
if not high_thr:
high_thr = 10**6
else:
try:
high_thr = int(high_thr)
except ValueError:
high_thr = 10**6
mci = User.objects.raw(f"""SELECT cnt.id, cnt.name, cnt.category, cnt.c
FROM(
SELECT u.name AS name, u.category AS category, u.id, count(u.id) AS c
FROM app_user as u, app_mailaddress as ma, app_mail as m
WHERE u.id=ma.user_id AND ma.id=m.sender_id
AND m.is_intern=True
AND date(m.date) > '{start_date}'
AND date(m.date) < '{end_date}'
GROUP BY u.id
) AS cnt
WHERE cnt.c > {low_thr} AND cnt.c < {high_thr}
ORDER BY cnt.c DESC
LIMIT {lines};""")
tqr = User.objects.raw(f""" SELECT u.id, u.name, u.category, s.c AS sent_reply_cnt, r.c AS received_cnt, s.c*100/r.c AS ratio
FROM app_user AS u,
(SELECT u.id, count(u.id) AS c
FROM app_user as u, app_mailaddress as ma, app_mail as m
WHERE u.id=ma.user_id AND ma.id=m.recipient_id
AND date(m.date) > '{start_date}'
AND date(m.date) < '{end_date}'
GROUP BY u.id
) AS r,
(SELECT u.id, count(u.id) AS c
FROM app_user as u, app_mailaddress as ma, app_mail as m
WHERE u.id=ma.user_id AND ma.id=m.sender_id
AND m.is_reply=True
AND date(m.date) > '{start_date}'
AND date(m.date) < '{end_date}'
GROUP BY u.id
) AS s
WHERE u.id=r.id AND u.id=s.id
AND s.c*100/r.c < 100
AND s.c*100/r.c > {low_thr}
AND s.c*100/r.c < {high_thr}
ORDER BY ratio DESC
LIMIT {lines};
""")
gib = User.objects.raw(f""" SELECT u.id, u.name, u.category, s.c AS sent_cnt, r.c AS received_cnt, s.c - r.c AS diff
FROM app_user AS u,
(SELECT u.name, u.category, u.id, count(u.id) AS c
FROM app_user as u, app_mailaddress as ma, app_mail as m
WHERE u.id=ma.user_id AND ma.id=m.recipient_id
AND m.is_reply = True
AND date(m.date) > '{start_date}'
AND date(m.date) < '{end_date}'
GROUP BY u.id
) AS r,
(SELECT u.id, count(u.id) AS c
FROM app_user as u, app_mailaddress as ma, app_mail as m
WHERE u.id=ma.user_id AND ma.id=m.sender_id
AND m.is_reply=False
AND date(m.date) > '{start_date}'
AND date(m.date) < '{end_date}'
GROUP BY u.id
) AS s
WHERE u.id=r.id AND u.id=s.id
AND s.c - r.c > {low_thr}
AND s.c - r.c < {high_thr}
ORDER BY diff DESC
LIMIT {lines};
""")
context = {
'mci':mci,
'tqr':tqr,
'gib':gib,
'start_date':start_date,
'end_date':end_date,
'low_thr':low_thr if low_thr != 0 else '',
'high_thr':high_thr if high_thr != 10**6 else ''
}
return render(request, template, context)
@login_required(login_url="/login/")
def couples(request):
template = 'couples.html'
start_date = request.GET.get('start_date')
if not start_date:
start_date = datetime(1900,1,1)
end_date = request.GET.get('end_date')
if not end_date:
end_date = datetime(2100,1,1)
lines = request.GET.get('lines')
if not lines:
lines = 5
else:
try:
lines = int(lines)
except ValueError:
lines = 5
low_thr = request.GET.get('low_thr')
if not low_thr:
low_thr = 0
else:
try:
low_thr = int(low_thr)
except ValueError:
low_thr = 0
high_thr = request.GET.get('high_thr')
if not high_thr:
high_thr = 10**6
else:
try:
high_thr = int(high_thr)
except ValueError:
high_thr = 10**6
couples = Mail.objects.filter(date__gte=start_date,date__lte=end_date,is_intern=1)\
.values('sender_id__user_id__name','sender_id__user_id__category','recipient_id__user_id__name','recipient_id__user_id__category')\
.annotate(dcount=Count('enron_id')).order_by('-dcount').filter(dcount__gte=low_thr,dcount__lte=high_thr)[:lines]
context = {
'couples':couples,
'start_date':start_date,
'end_date':end_date,
'low_thr':low_thr if low_thr != 0 else '',
'high_thr':high_thr if high_thr != 10**6 else ''
}
return render(request, template, context)
@login_required(login_url="/login/")
def days(request):
template = 'days.html'
start_date = request.GET.get('start_date')
if not start_date:
start_date = datetime(1900,1,1)
end_date = request.GET.get('end_date')
if not end_date:
end_date = datetime(2100,1,1)
threshold = request.GET.get('threshold')
if not threshold:
threshold = 0
else:
try:
threshold = int(threshold)
except ValueError:
threshold = 0
mails_per_day = Mail.objects.annotate(time=TruncDate('date')).values('time')\
.filter(date__gte=start_date,date__lte=end_date).annotate(dcount=Count('enron_id'))\
.order_by('-dcount').filter(dcount__gte=threshold)
lines = request.GET.get('lines')
if not lines:
paginator = Paginator(mails_per_day, 10)
else:
try:
paginator = Paginator(mails_per_day, int(lines))
except ValueError:
paginator = Paginator(mails_per_day, 10)
page = request.GET.get('page')
try:
mails_per_day = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
mails_per_day = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
mails_per_day = paginator.page(paginator.num_pages)
context = {
"days":mails_per_day,
'start_date':start_date,
'end_date':end_date,
'threshold':threshold if threshold != 0 else ''
}
return render(request, template, context)
@login_required(login_url="/login/")
def profile(request):
template = 'profile.html'
name = request.GET.get('name')
if not name:
context = {'code':0}
return render(request, template, context)
try:
user = User.objects.get(name=name)
except:
context = {'code':-1,
'name':name
}
return render(request, template, context)
if user.in_enron == False:
context = {'code':-2,
'name':name
}
return render(request, template, context)
response_time = request.GET.get('response_time')
if not response_time:
response_time = 0
else:
try:
response_time = int(response_time)
except ValueError:
response_time = 0
#mails sent / received per day
user_mails = mailAddress.objects.filter(user_id=user.id)
mails = Mail.objects.filter(Q(sender_id__in=user_mails)|Q(recipient_id__in=user_mails))
sent_per_day = mails.filter(sender_id__in=user_mails).annotate(time=TruncDate('date'))\
.values('time').annotate(dcount=Count('enron_id')).aggregate(Avg('dcount'))['dcount__avg']
received_per_day = mails.filter(recipient_id__in=user_mails).annotate(time=TruncDate('date'))\
.values('time').annotate(dcount=Count('subject')).aggregate(Avg('dcount'))['dcount__avg']
if sent_per_day is None:
sent_per_day = 0
if received_per_day is None:
received_per_day = 0
#average response time
average_response_time = 0
if response_time==1:
replies = mails.filter(sender_id__in=user_mails,is_reply=1)
number_of_responses = 0
for mail in replies:
previous_mail = mails.filter(sender_id=mail.recipient_id, recipient_id=mail.sender_id,
date__lt=mail.date, subject__contains=mail.subject[4:]).order_by('-date')[:1]
previous_mail = list(previous_mail)
if previous_mail != []:
previous_mail = previous_mail[0]
number_of_responses += 1
average_response_time += (mail.date - previous_mail.date).total_seconds()
if number_of_responses!=0:
average_response_time /= number_of_responses
#I/E Ratio
number_of_internal_mails = mails.filter(is_intern=1).annotate(count=Count('is_intern'))
number_of_external_mails = mails.filter(is_intern=0).annotate(count=Count('is_intern'))
#Internal contacts
contacts = User.objects.raw(f"""SELECT u.id, u.name, u.category, u.in_enron, contact.id
FROM app_user AS u,
(SELECT m.recipient_id AS id FROM app_user AS u, app_mailaddress AS ma, app_mail AS m
WHERE m.sender_id=ma.id AND ma.user_id={user.id}
GROUP BY m.recipient_id
) AS contact
WHERE u.id=contact.id AND u.in_enron=True;""")
context = {'code':1,
'name':name,
'category':user.category,
'average_sent':round(sent_per_day,2),
'average_received':round(received_per_day,2),
'average_response_time':f'{round(average_response_time/3600,2)}h',
'number_of_internal_mails':number_of_internal_mails,
'number_of_external_mails':number_of_external_mails,
'contacts':contacts,
}
return render(request, template, context)
|
[
"django.contrib.auth.decorators.login_required",
"app.models.User.objects.get",
"app.models.Mail.objects.count",
"app.models.User.objects.raw",
"app.models.Mail.objects.filter",
"django.utils.timezone.datetime",
"app.models.User.objects.filter",
"django.db.models.functions.TruncDate",
"django.db.models.Q",
"app.models.mailAddress.objects.filter",
"django.core.paginator.Paginator",
"django.shortcuts.render",
"django.db.models.Avg",
"django.db.models.Count"
] |
[((581, 616), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (595, 616), False, 'from django.contrib.auth.decorators import login_required\n'), ((1218, 1253), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (1232, 1253), False, 'from django.contrib.auth.decorators import login_required\n'), ((6650, 6685), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (6664, 6685), False, 'from django.contrib.auth.decorators import login_required\n'), ((8198, 8233), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (8212, 8233), False, 'from django.contrib.auth.decorators import login_required\n'), ((9834, 9869), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/login/"""'}), "(login_url='/login/')\n", (9848, 9869), False, 'from django.contrib.auth.decorators import login_required\n'), ((689, 709), 'app.models.Mail.objects.count', 'Mail.objects.count', ([], {}), '()\n', (707, 709), False, 'from app.models import User, mailAddress, Mail\n'), ((1180, 1214), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (1186, 1214), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2009, 2933), 'app.models.User.objects.raw', 'User.objects.raw', (['f"""SELECT cnt.id, cnt.name, cnt.category, cnt.c\n FROM(\n SELECT u.name AS name, u.category AS category, u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_intern=True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\'\n\n GROUP BY u.id\n ) AS cnt\n WHERE cnt.c > {low_thr} AND cnt.c < {high_thr}\n ORDER BY cnt.c DESC\n LIMIT {lines};"""'], {}), '(\n f"""SELECT cnt.id, cnt.name, cnt.category, cnt.c\n FROM(\n SELECT u.name AS name, u.category AS category, u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_intern=True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\'\n\n GROUP BY u.id\n ) AS cnt\n WHERE cnt.c > {low_thr} AND cnt.c < {high_thr}\n ORDER BY cnt.c DESC\n LIMIT {lines};"""\n )\n', (2025, 2933), False, 'from app.models import User, mailAddress, Mail\n'), ((2935, 4634), 'app.models.User.objects.raw', 'User.objects.raw', (['f""" SELECT u.id, u.name, u.category, s.c AS sent_reply_cnt, r.c AS received_cnt, s.c*100/r.c AS ratio\n FROM app_user AS u,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.recipient_id\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS r,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_reply=True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS s\n WHERE u.id=r.id AND u.id=s.id\n AND s.c*100/r.c < 100\n AND s.c*100/r.c > {low_thr}\n AND s.c*100/r.c < {high_thr}\n ORDER BY ratio DESC\n LIMIT {lines};\n """'], {}), '(\n f""" SELECT u.id, u.name, u.category, s.c AS sent_reply_cnt, r.c AS received_cnt, s.c*100/r.c AS ratio\n FROM app_user AS u,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.recipient_id\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS r,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_reply=True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS s\n WHERE u.id=r.id AND u.id=s.id\n AND s.c*100/r.c < 100\n AND s.c*100/r.c > {low_thr}\n AND s.c*100/r.c < {high_thr}\n ORDER BY ratio DESC\n LIMIT {lines};\n """\n )\n', (2951, 4634), False, 'from app.models import User, mailAddress, Mail\n'), ((4640, 6355), 'app.models.User.objects.raw', 'User.objects.raw', (['f""" SELECT u.id, u.name, u.category, s.c AS sent_cnt, r.c AS received_cnt, s.c - r.c AS diff\n FROM app_user AS u,\n (SELECT u.name, u.category, u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.recipient_id\n AND m.is_reply = True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS r,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_reply=False\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS s\n WHERE u.id=r.id AND u.id=s.id\n AND s.c - r.c > {low_thr}\n AND s.c - r.c < {high_thr}\n ORDER BY diff DESC\n LIMIT {lines};\n """'], {}), '(\n f""" SELECT u.id, u.name, u.category, s.c AS sent_cnt, r.c AS received_cnt, s.c - r.c AS diff\n FROM app_user AS u,\n (SELECT u.name, u.category, u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.recipient_id\n AND m.is_reply = True\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS r,\n (SELECT u.id, count(u.id) AS c\n FROM app_user as u, app_mailaddress as ma, app_mail as m\n WHERE u.id=ma.user_id AND ma.id=m.sender_id\n AND m.is_reply=False\n AND date(m.date) > \'{start_date}\'\n AND date(m.date) < \'{end_date}\' \n GROUP BY u.id\n ) AS s\n WHERE u.id=r.id AND u.id=s.id\n AND s.c - r.c > {low_thr}\n AND s.c - r.c < {high_thr}\n ORDER BY diff DESC\n LIMIT {lines};\n """\n )\n', (4656, 6355), False, 'from app.models import User, mailAddress, Mail\n'), ((6612, 6646), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (6618, 6646), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((8160, 8194), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (8166, 8194), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((9796, 9830), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (9802, 9830), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((10723, 10766), 'app.models.mailAddress.objects.filter', 'mailAddress.objects.filter', ([], {'user_id': 'user.id'}), '(user_id=user.id)\n', (10749, 10766), False, 'from app.models import User, mailAddress, Mail\n'), ((12419, 13027), 'app.models.User.objects.raw', 'User.objects.raw', (['f"""SELECT u.id, u.name, u.category, u.in_enron, contact.id\n FROM app_user AS u,\n (SELECT m.recipient_id AS id FROM app_user AS u, app_mailaddress AS ma, app_mail AS m\n WHERE m.sender_id=ma.id AND ma.user_id={user.id}\n GROUP BY m.recipient_id\n ) AS contact\n WHERE u.id=contact.id AND u.in_enron=True;"""'], {}), '(\n f"""SELECT u.id, u.name, u.category, u.in_enron, contact.id\n FROM app_user AS u,\n (SELECT m.recipient_id AS id FROM app_user AS u, app_mailaddress AS ma, app_mail AS m\n WHERE m.sender_id=ma.id AND ma.user_id={user.id}\n GROUP BY m.recipient_id\n ) AS contact\n WHERE u.id=contact.id AND u.in_enron=True;"""\n )\n', (12435, 13027), False, 'from app.models import User, mailAddress, Mail\n'), ((13510, 13544), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (13516, 13544), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1403, 1423), 'django.utils.timezone.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (1411, 1423), False, 'from django.utils.timezone import datetime\n'), ((1504, 1524), 'django.utils.timezone.datetime', 'datetime', (['(2100)', '(1)', '(1)'], {}), '(2100, 1, 1)\n', (1512, 1524), False, 'from django.utils.timezone import datetime\n'), ((6835, 6855), 'django.utils.timezone.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (6843, 6855), False, 'from django.utils.timezone import datetime\n'), ((6938, 6958), 'django.utils.timezone.datetime', 'datetime', (['(2100)', '(1)', '(1)'], {}), '(2100, 1, 1)\n', (6946, 6958), False, 'from django.utils.timezone import datetime\n'), ((8373, 8393), 'django.utils.timezone.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (8381, 8393), False, 'from django.utils.timezone import datetime\n'), ((8476, 8496), 'django.utils.timezone.datetime', 'datetime', (['(2100)', '(1)', '(1)'], {}), '(2100, 1, 1)\n', (8484, 8496), False, 'from django.utils.timezone import datetime\n'), ((9039, 9067), 'django.core.paginator.Paginator', 'Paginator', (['mails_per_day', '(10)'], {}), '(mails_per_day, 10)\n', (9048, 9067), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((10020, 10054), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (10026, 10054), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((10084, 10111), 'app.models.User.objects.get', 'User.objects.get', ([], {'name': 'name'}), '(name=name)\n', (10100, 10111), False, 'from app.models import User, mailAddress, Mail\n'), ((10389, 10423), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (10395, 10423), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((734, 768), 'app.models.User.objects.filter', 'User.objects.filter', ([], {'in_enron': '(True)'}), '(in_enron=True)\n', (753, 768), False, 'from app.models import User, mailAddress, Mail\n'), ((798, 833), 'app.models.User.objects.filter', 'User.objects.filter', ([], {'in_enron': '(False)'}), '(in_enron=False)\n', (817, 833), False, 'from app.models import User, mailAddress, Mail\n'), ((10223, 10257), 'django.shortcuts.render', 'render', (['request', 'template', 'context'], {}), '(request, template, context)\n', (10229, 10257), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((10799, 10826), 'django.db.models.Q', 'Q', ([], {'sender_id__in': 'user_mails'}), '(sender_id__in=user_mails)\n', (10800, 10826), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((10827, 10857), 'django.db.models.Q', 'Q', ([], {'recipient_id__in': 'user_mails'}), '(recipient_id__in=user_mails)\n', (10828, 10857), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((11029, 11042), 'django.db.models.Avg', 'Avg', (['"""dcount"""'], {}), "('dcount')\n", (11032, 11042), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((11239, 11252), 'django.db.models.Avg', 'Avg', (['"""dcount"""'], {}), "('dcount')\n", (11242, 11252), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((12263, 12281), 'django.db.models.Count', 'Count', (['"""is_intern"""'], {}), "('is_intern')\n", (12268, 12281), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((12355, 12373), 'django.db.models.Count', 'Count', (['"""is_intern"""'], {}), "('is_intern')\n", (12360, 12373), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((864, 899), 'app.models.Mail.objects.filter', 'Mail.objects.filter', ([], {'is_intern': '(True)'}), '(is_intern=True)\n', (883, 899), False, 'from app.models import User, mailAddress, Mail\n'), ((910, 946), 'app.models.Mail.objects.filter', 'Mail.objects.filter', ([], {'is_intern': '(False)'}), '(is_intern=False)\n', (929, 946), False, 'from app.models import User, mailAddress, Mail\n'), ((9203, 9231), 'django.core.paginator.Paginator', 'Paginator', (['mails_per_day', '(10)'], {}), '(mails_per_day, 10)\n', (9212, 9231), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((11000, 11017), 'django.db.models.Count', 'Count', (['"""enron_id"""'], {}), "('enron_id')\n", (11005, 11017), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((11211, 11227), 'django.db.models.Count', 'Count', (['"""subject"""'], {}), "('subject')\n", (11216, 11227), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((8872, 8889), 'django.db.models.Count', 'Count', (['"""enron_id"""'], {}), "('enron_id')\n", (8877, 8889), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((7829, 7846), 'django.db.models.Count', 'Count', (['"""enron_id"""'], {}), "('enron_id')\n", (7834, 7846), False, 'from django.db.models import Count, Avg, Q, Case, When\n'), ((7550, 7624), 'app.models.Mail.objects.filter', 'Mail.objects.filter', ([], {'date__gte': 'start_date', 'date__lte': 'end_date', 'is_intern': '(1)'}), '(date__gte=start_date, date__lte=end_date, is_intern=1)\n', (7569, 7624), False, 'from app.models import User, mailAddress, Mail\n'), ((10932, 10949), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""date"""'], {}), "('date')\n", (10941, 10949), False, 'from django.db.models.functions import TruncDate\n'), ((11143, 11160), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""date"""'], {}), "('date')\n", (11152, 11160), False, 'from django.db.models.functions import TruncDate\n'), ((8752, 8769), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""date"""'], {}), "('date')\n", (8761, 8769), False, 'from django.db.models.functions import TruncDate\n')]
|
import datetime
from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey
from sqlalchemy.orm import relation
from db import Base
class CreateCommand(Base):
"""語録を登録するコマンドを管理するModel
"""
__tablename__ = 'create_command'
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False, unique=True)
creator = Column(Unicode(100), nullable=False)
ctime = Column(DateTime, default=datetime.datetime.now, nullable=False)
terms = relation('Term', backref='create_commands')
class Term(Base):
"""追加したコマンドに登録する語録を管理するModel
"""
__tablename__ = 'term'
id = Column(Integer, primary_key=True)
create_command = Column(Integer, ForeignKey(
'create_command.id',
onupdate='CASCADE',
ondelete='CASCADE'))
word = Column(Unicode(1024), nullable=False)
creator = Column(Unicode(100), nullable=False)
ctime = Column(DateTime, default=datetime.datetime.now, nullable=False)
|
[
"sqlalchemy.Unicode",
"sqlalchemy.orm.relation",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] |
[((256, 289), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (262, 289), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((414, 477), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.datetime.now', 'nullable': '(False)'}), '(DateTime, default=datetime.datetime.now, nullable=False)\n', (420, 477), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((490, 533), 'sqlalchemy.orm.relation', 'relation', (['"""Term"""'], {'backref': '"""create_commands"""'}), "('Term', backref='create_commands')\n", (498, 533), False, 'from sqlalchemy.orm import relation\n'), ((632, 665), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (638, 665), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((913, 976), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.datetime.now', 'nullable': '(False)'}), '(DateTime, default=datetime.datetime.now, nullable=False)\n', (919, 976), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((308, 320), 'sqlalchemy.Unicode', 'Unicode', (['(100)'], {}), '(100)\n', (315, 320), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((372, 384), 'sqlalchemy.Unicode', 'Unicode', (['(100)'], {}), '(100)\n', (379, 384), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((703, 774), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""create_command.id"""'], {'onupdate': '"""CASCADE"""', 'ondelete': '"""CASCADE"""'}), "('create_command.id', onupdate='CASCADE', ondelete='CASCADE')\n", (713, 774), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((819, 832), 'sqlalchemy.Unicode', 'Unicode', (['(1024)'], {}), '(1024)\n', (826, 832), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n'), ((871, 883), 'sqlalchemy.Unicode', 'Unicode', (['(100)'], {}), '(100)\n', (878, 883), False, 'from sqlalchemy import Column, Integer, Unicode, DateTime, ForeignKey\n')]
|
#!/usr/bin/env python3
# Write a Shannon entropy calculator: H = -sum(pi * log(pi))
# The values should come from the command line
# E.g. python3 entropy.py 0.4 0.3 0.2 0.1
# Put the probabilities into a new list
# Don't forget to convert them to numbers
import math
import sys
p = sys.argv[1:]
y = len(p)
H = 0
for i in range(y):
p[i] = float(p[i])
for h in p:
H += (h * math.log(h,2))
print(-H)
"""
python3 31entropy.py 0.1 0.2 0.3 0.4
1.846
"""
|
[
"math.log"
] |
[((376, 390), 'math.log', 'math.log', (['h', '(2)'], {}), '(h, 2)\n', (384, 390), False, 'import math\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import weakref
import gc
class SomeClass:
def __init__(self, name):
self.name = name
def __del__(self):
print(f"{self.name} is dying")
def __repr__(self):
return f"SomeClass[{self.name}]"
def __str__(self):
return self.__repr__()
if __name__ == '__main__':
gc.disable()
a = SomeClass('a')
b = SomeClass('b')
a.ref = weakref.proxy(b)
b.ref = weakref.proxy(a)
print(f'a: {a}, b: {b}')
print(f'a.ref: {a.ref}, b.ref: {b.ref}')
a = None
b = None
print('start gc...')
gc.collect()
print('all done')
|
[
"gc.collect",
"gc.disable",
"weakref.proxy"
] |
[((363, 375), 'gc.disable', 'gc.disable', ([], {}), '()\n', (373, 375), False, 'import gc\n'), ((434, 450), 'weakref.proxy', 'weakref.proxy', (['b'], {}), '(b)\n', (447, 450), False, 'import weakref\n'), ((463, 479), 'weakref.proxy', 'weakref.proxy', (['a'], {}), '(a)\n', (476, 479), False, 'import weakref\n'), ((609, 621), 'gc.collect', 'gc.collect', ([], {}), '()\n', (619, 621), False, 'import gc\n')]
|
"""Fake client that polls different API endpoints
"""
import datetime
import logging
import time
import requests
logging.basicConfig(level=logging.DEBUG)
def run_requests():
host = 'http://test-app:5000'
paths = [
'/',
'/',
'/base-test?resp=200',
'/base-test?resp=500',
'/base-test?resp=500',
'/base-test?sleep=1',
'/base-test?sleep=2',
]
logging.info(f'*** Starting Request Batch ***')
for p in paths:
req_url: str = f'{host}{p}'
r = requests.get(req_url)
logging.info(f'Response from {req_url}: {r.status_code}')
logging.info(f'--')
logging.info(f'--')
logging.info(f'--')
sleep_sec = 60
logging.info(f'>>>>>>> sleeping for {sleep_sec}s at {datetime.datetime.now()}')
logging.info(f'...')
logging.info(f'...')
logging.info(f'...')
time.sleep(sleep_sec)
if __name__ == '__main__':
run_requests()
|
[
"logging.basicConfig",
"time.sleep",
"logging.info",
"requests.get",
"datetime.datetime.now"
] |
[((116, 156), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (135, 156), False, 'import logging\n'), ((417, 464), 'logging.info', 'logging.info', (['f"""*** Starting Request Batch ***"""'], {}), "(f'*** Starting Request Batch ***')\n", (429, 464), False, 'import logging\n'), ((627, 646), 'logging.info', 'logging.info', (['f"""--"""'], {}), "(f'--')\n", (639, 646), False, 'import logging\n'), ((651, 670), 'logging.info', 'logging.info', (['f"""--"""'], {}), "(f'--')\n", (663, 670), False, 'import logging\n'), ((675, 694), 'logging.info', 'logging.info', (['f"""--"""'], {}), "(f'--')\n", (687, 694), False, 'import logging\n'), ((803, 823), 'logging.info', 'logging.info', (['f"""..."""'], {}), "(f'...')\n", (815, 823), False, 'import logging\n'), ((828, 848), 'logging.info', 'logging.info', (['f"""..."""'], {}), "(f'...')\n", (840, 848), False, 'import logging\n'), ((853, 873), 'logging.info', 'logging.info', (['f"""..."""'], {}), "(f'...')\n", (865, 873), False, 'import logging\n'), ((878, 899), 'time.sleep', 'time.sleep', (['sleep_sec'], {}), '(sleep_sec)\n', (888, 899), False, 'import time\n'), ((534, 555), 'requests.get', 'requests.get', (['req_url'], {}), '(req_url)\n', (546, 555), False, 'import requests\n'), ((564, 621), 'logging.info', 'logging.info', (['f"""Response from {req_url}: {r.status_code}"""'], {}), "(f'Response from {req_url}: {r.status_code}')\n", (576, 621), False, 'import logging\n'), ((772, 795), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (793, 795), False, 'import datetime\n')]
|
from numpy import random
import numpy as np
import matplotlib.pyplot as plt
import math
### Defining theta
theta = math.pi/4
### Generates count number of random values in the range [0, 1]
def getU(count):
u = []
for i in range(count):
key = random.rand()
u.append(key)
return u
def getX(u):
x = []
for t in u:
res = -theta*(math.log(1-t))
x.append(res)
return x
def getSampleMeanVariance(x):
sum = 0.00
for i in x:
sum += i
avg = sum/(len(x))
cnt = 0.00
for i in x:
cnt += (i-avg)**2
variance = cnt/(len(x)-1)
return avg, variance
def plotCDF(data):
data_size = len(data)
data_set = sorted(set(data))
bins = np.append(data_set, data_set[-1]+1)
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts = counts.astype(float)/data_size
cdf = np.cumsum(counts)
plt.plot(bin_edges[0:-1], cdf, linestyle='--', marker='o', color='b')
plt.ylim((0, 1))
plt.ylabel("CDF")
plt.grid(True)
plt.show()
# Plots y = 1 - e^(-x/theta)
def plotActualDistributionFunction():
a = -1
b = 1/theta
c = 1
x = np.linspace(0, 10, 256, endpoint = True)
y = (a * np.exp(-b*x)) + c
plt.plot(x, y, '-r', label=r'$y = 1 - e^{-x/theta}$')
axes = plt.gca()
axes.set_xlim([x.min(), x.max()])
axes.set_ylim([y.min(), y.max()])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Actual Distribution')
plt.legend(loc='upper left')
plt.show()
def execute(cnt):
print("For input size of : " + str(cnt))
u = getU(cnt)
u.sort()
# print(u)
x = getX(u)
# print(x)
sMean, sVariance = getSampleMeanVariance(x)
# Actual Mean is theta
print("Sample Mean: " + str(sMean) + " " + "Actual Mean: " + str(theta))
print("Abs. Difference : " + str(abs(sMean-theta)))
# Actual Variance is theta^2
print("Sample Variance: " + str(sVariance) + " " + "Actual Variance: " + str(theta**2))
print("Abs. Difference : " + str(abs(sVariance-theta**2)))
print()
plotCDF(x)
def main():
plotActualDistributionFunction()
execute(10)
execute(100)
execute(1000)
execute(10000)
execute(100000)
# execute(1000000)
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.append",
"numpy.histogram",
"numpy.cumsum",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"math.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((649, 686), 'numpy.append', 'np.append', (['data_set', '(data_set[-1] + 1)'], {}), '(data_set, data_set[-1] + 1)\n', (658, 686), True, 'import numpy as np\n'), ((707, 751), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bins', 'density': '(False)'}), '(data, bins=bins, density=False)\n', (719, 751), True, 'import numpy as np\n'), ((802, 819), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (811, 819), True, 'import numpy as np\n'), ((822, 891), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_edges[0:-1]', 'cdf'], {'linestyle': '"""--"""', 'marker': '"""o"""', 'color': '"""b"""'}), "(bin_edges[0:-1], cdf, linestyle='--', marker='o', color='b')\n", (830, 891), True, 'import matplotlib.pyplot as plt\n'), ((893, 909), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (901, 909), True, 'import matplotlib.pyplot as plt\n'), ((911, 928), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (921, 928), True, 'import matplotlib.pyplot as plt\n'), ((930, 944), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (938, 944), True, 'import matplotlib.pyplot as plt\n'), ((947, 957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (955, 957), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1099), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(256)'], {'endpoint': '(True)'}), '(0, 10, 256, endpoint=True)\n', (1072, 1099), True, 'import numpy as np\n'), ((1132, 1185), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-r"""'], {'label': '"""$y = 1 - e^{-x/theta}$"""'}), "(x, y, '-r', label='$y = 1 - e^{-x/theta}$')\n", (1140, 1185), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1205), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1203, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1293), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1288, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1305, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1344), 'matplotlib.pyplot.title', 'plt.title', (['"""Actual Distribution"""'], {}), "('Actual Distribution')\n", (1321, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1374), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1356, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1385, 1387), True, 'import matplotlib.pyplot as plt\n'), ((248, 261), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (259, 261), False, 'from numpy import random\n'), ((340, 355), 'math.log', 'math.log', (['(1 - t)'], {}), '(1 - t)\n', (348, 355), False, 'import math\n'), ((1112, 1126), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (1118, 1126), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import Backbone, build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork", "ProposalNetwork1"]
class AugmentedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dk, dv, Nh, shape=0, relative=False, stride=1):
super(AugmentedConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dk = dk
self.dv = dv
self.Nh = Nh
self.shape = shape
self.relative = relative
self.stride = stride
self.padding = (self.kernel_size - 1) // 2
#print("conv param", padding, stride)
assert self.Nh != 0, "integer division or modulo by zero, Nh >= 1"
assert self.dk % self.Nh == 0, "dk should be divided by Nh. (example: out_channels: 20, dk: 40, Nh: 4)"
assert self.dv % self.Nh == 0, "dv should be divided by Nh. (example: out_channels: 20, dv: 4, Nh: 4)"
assert stride in [1, 2], str(stride) + " Up to 2 strides are allowed."
self.conv_out = nn.Conv2d(self.in_channels, self.out_channels - self.dv, self.kernel_size, stride=stride, padding=self.padding)
#self.conv_out = nn.Conv2d(self.in_channels, self.out_channels , self.kernel_size, stride=stride, padding=self.padding)
self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=self.kernel_size, stride=stride, padding=self.padding)
self.attn_out = nn.Conv2d(self.dv, self.dv, kernel_size=1, stride=1)
if self.relative:
self.key_rel_w = nn.Parameter(torch.randn((2 * self.shape - 1, dk // Nh), requires_grad=True))
self.key_rel_h = nn.Parameter(torch.randn((2 * self.shape - 1, dk // Nh), requires_grad=True))
def forward(self, x):
# Input x
# (batch_size, channels, height, width)
# batch, _, height, width = x.size()
# conv_out
# (batch_size, out_channels, height, width)
x = x.reshape(-1, 5 * x.shape[1], x.shape[2], x.shape[3])
conv_out = self.conv_out(x)
batch, _, height, width = conv_out.size()
#batch, _, height, width = x.size()
#height= width=self.shape
#print(conv_out.size())
# flat_q, flat_k, flat_v
# (batch_size, Nh, height * width, dvh or dkh)
# dvh = dv / Nh, dkh = dk / Nh
# q, k, v
# (batch_size, Nh, height, width, dv or dk)
#print("input to qkv", x.shape)
flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(x, self.dk, self.dv, self.Nh)
logits = torch.matmul(flat_q.transpose(2, 3), flat_k)
if self.relative:
h_rel_logits, w_rel_logits = self.relative_logits(q)
logits += h_rel_logits
logits += w_rel_logits
weights = F.softmax(logits, dim=-1)
# attn_out
# (batch, Nh, height * width, dvh)
attn_out = torch.matmul(weights, flat_v.transpose(2, 3))
attn_out = torch.reshape(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))
#print("attn",attn_out.size())
# combine_heads_2d
# (batch, out_channels, height, width)
#print("input to attn", attn_out.size())
attn_out = self.combine_heads_2d(attn_out)
attn_out = self.attn_out(attn_out)
return torch.cat((conv_out, attn_out), dim=1)
#return conv_out
def compute_flat_qkv(self, x, dk, dv, Nh):
qkv = self.qkv_conv(x)
#print("qkv",qkv.size())
N, _, H, W = qkv.size()
q, k, v = torch.split(qkv, [dk, dk, dv], dim=1)
q = self.split_heads_2d(q, Nh)
k = self.split_heads_2d(k, Nh)
v = self.split_heads_2d(v, Nh)
dkh = dk // Nh
q *= dkh ** -0.5
flat_q = torch.reshape(q, (N, Nh, dk // Nh, H * W))
flat_k = torch.reshape(k, (N, Nh, dk // Nh, H * W))
flat_v = torch.reshape(v, (N, Nh, dv // Nh, H * W))
return flat_q, flat_k, flat_v, q, k, v
def split_heads_2d(self, x, Nh):
batch, channels, height, width = x.size()
ret_shape = (batch, Nh, channels // Nh, height, width)
split = torch.reshape(x, ret_shape)
return split
def combine_heads_2d(self, x):
batch, Nh, dv, H, W = x.size()
ret_shape = (batch, Nh * dv, H, W)
return torch.reshape(x, ret_shape)
def relative_logits(self, q):
B, Nh, dk, H, W = q.size()
q = torch.transpose(q, 2, 4).transpose(2, 3)
rel_logits_w = self.relative_logits_1d(q, self.key_rel_w, H, W, Nh, "w")
rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), self.key_rel_h, W, H, Nh, "h")
return rel_logits_h, rel_logits_w
def relative_logits_1d(self, q, rel_k, H, W, Nh, case):
rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1))
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W))
rel_logits = torch.unsqueeze(rel_logits, dim=3)
rel_logits = rel_logits.repeat((1, 1, 1, H, 1, 1))
if case == "w":
rel_logits = torch.transpose(rel_logits, 3, 4)
elif case == "h":
rel_logits = torch.transpose(rel_logits, 2, 4).transpose(4, 5).transpose(3, 5)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H * W, H * W))
return rel_logits
def rel_to_abs(self, x):
B, Nh, L, _ = x.size()
col_pad = torch.zeros((B, Nh, L, 1)).to(x)
x = torch.cat((x, col_pad), dim=3)
flat_x = torch.reshape(x, (B, Nh, L * 2 * L))
flat_pad = torch.zeros((B, Nh, L - 1)).to(x)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))
final_x = final_x[:, :, :L, L - 1:]
return final_x
#augmented_conv_128 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=2, shape=64)
augmented_conv_64 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=64)
augmented_conv_32 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=32)
augmented_conv_16 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=16)
augmented_conv_8 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=8)
augmented_conv_4 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=4)
#attention with more dk dv = equal to convolution
# augmented_conv_128 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=2, shape=64)
# augmented_conv_64 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=64)
# augmented_conv_32 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=32)
# augmented_conv_16 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=16)
# augmented_conv_8 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=8)
#up_sample = nn.ConvTranspose2d(256, 256, 3,stride=2, padding=1, output_padding=1)
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: Tuple[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs: Tuple[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork1(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
#self.augmentedConv_128 = augmented_conv_128
self.augmentedConv_64 = augmented_conv_64
self.augmentedConv_32 = augmented_conv_32
self.augmentedConv_16 = augmented_conv_16
self.augmentedConv_8 = augmented_conv_8#AugmentedConv()
self.augmentedConv_4 = augmented_conv_4
#self.up_sample = up_sample
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN1).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD1).view(-1, 1, 1))
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
temp_images = ()
for im in images:
temp_images += im.split(3)
images = ImageList.from_tensors(temp_images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
# for key in features.keys():
# print(key,features[key].shape)
feature_fused = {}
feature_fused['p3'] = self.augmentedConv_64(features['p3'])
#print('feature_128.shape up sample',feature_fused['p2'].shape)
feature_fused['p4'] = self.augmentedConv_32(features['p4'])
feature_fused['p5'] = self.augmentedConv_16(features['p5'])
feature_fused['p6'] = self.augmentedConv_8(features['p6'])
feature_fused['p7'] = self.augmentedConv_4(features['p7'])
my_image = images.tensor[3::5] #5 slice
#my_image = images.tensor[4::9]
#print(my_image.shape)
my_image_sizes = [(my_image.shape[-2], my_image.shape[-1]) for im in my_image]
#print(image_sizes)
images = ImageList(my_image,my_image_sizes)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, feature_fused, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
|
[
"detectron2.structures.ImageList.from_tensors",
"torch.jit.is_scripting",
"torch.cat",
"torch.randn",
"detectron2.utils.logger.log_first_n",
"detectron2.utils.visualizer.Visualizer",
"detectron2.utils.events.get_event_storage",
"torch.Tensor",
"torch.zeros",
"torch.split",
"torch.nn.Conv2d",
"detectron2.structures.ImageList",
"torch.einsum",
"torch.unsqueeze",
"torch.reshape",
"numpy.concatenate",
"torch.nn.functional.softmax",
"torch.tensor",
"torch.transpose"
] |
[((1746, 1861), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_channels', '(self.out_channels - self.dv)', 'self.kernel_size'], {'stride': 'stride', 'padding': 'self.padding'}), '(self.in_channels, self.out_channels - self.dv, self.kernel_size,\n stride=stride, padding=self.padding)\n', (1755, 1861), False, 'from torch import nn\n'), ((2019, 2141), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_channels', '(2 * self.dk + self.dv)'], {'kernel_size': 'self.kernel_size', 'stride': 'stride', 'padding': 'self.padding'}), '(self.in_channels, 2 * self.dk + self.dv, kernel_size=self.\n kernel_size, stride=stride, padding=self.padding)\n', (2028, 2141), False, 'from torch import nn\n'), ((2162, 2214), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.dv', 'self.dv'], {'kernel_size': '(1)', 'stride': '(1)'}), '(self.dv, self.dv, kernel_size=1, stride=1)\n', (2171, 2214), False, 'from torch import nn\n'), ((3519, 3544), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3528, 3544), True, 'import torch.nn.functional as F\n'), ((3692, 3768), 'torch.reshape', 'torch.reshape', (['attn_out', '(batch, self.Nh, self.dv // self.Nh, height, width)'], {}), '(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))\n', (3705, 3768), False, 'import torch\n'), ((4040, 4078), 'torch.cat', 'torch.cat', (['(conv_out, attn_out)'], {'dim': '(1)'}), '((conv_out, attn_out), dim=1)\n', (4049, 4078), False, 'import torch\n'), ((4275, 4312), 'torch.split', 'torch.split', (['qkv', '[dk, dk, dv]'], {'dim': '(1)'}), '(qkv, [dk, dk, dv], dim=1)\n', (4286, 4312), False, 'import torch\n'), ((4496, 4538), 'torch.reshape', 'torch.reshape', (['q', '(N, Nh, dk // Nh, H * W)'], {}), '(q, (N, Nh, dk // Nh, H * W))\n', (4509, 4538), False, 'import torch\n'), ((4556, 4598), 'torch.reshape', 'torch.reshape', (['k', '(N, Nh, dk // Nh, H * W)'], {}), '(k, (N, Nh, dk // Nh, H * W))\n', (4569, 4598), False, 'import torch\n'), ((4616, 4658), 'torch.reshape', 'torch.reshape', (['v', '(N, Nh, dv // Nh, H * W)'], {}), '(v, (N, Nh, dv // Nh, H * W))\n', (4629, 4658), False, 'import torch\n'), ((4873, 4900), 'torch.reshape', 'torch.reshape', (['x', 'ret_shape'], {}), '(x, ret_shape)\n', (4886, 4900), False, 'import torch\n'), ((5055, 5082), 'torch.reshape', 'torch.reshape', (['x', 'ret_shape'], {}), '(x, ret_shape)\n', (5068, 5082), False, 'import torch\n'), ((5517, 5558), 'torch.einsum', 'torch.einsum', (['"""bhxyd,md->bhxym"""', 'q', 'rel_k'], {}), "('bhxyd,md->bhxym', q, rel_k)\n", (5529, 5558), False, 'import torch\n'), ((5580, 5633), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh * H, W, 2 * W - 1)'], {}), '(rel_logits, (-1, Nh * H, W, 2 * W - 1))\n', (5593, 5633), False, 'import torch\n'), ((5705, 5749), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh, H, W, W)'], {}), '(rel_logits, (-1, Nh, H, W, W))\n', (5718, 5749), False, 'import torch\n'), ((5771, 5805), 'torch.unsqueeze', 'torch.unsqueeze', (['rel_logits'], {'dim': '(3)'}), '(rel_logits, dim=3)\n', (5786, 5805), False, 'import torch\n'), ((6087, 6136), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh, H * W, H * W)'], {}), '(rel_logits, (-1, Nh, H * W, H * W))\n', (6100, 6136), False, 'import torch\n'), ((6288, 6318), 'torch.cat', 'torch.cat', (['(x, col_pad)'], {'dim': '(3)'}), '((x, col_pad), dim=3)\n', (6297, 6318), False, 'import torch\n'), ((6337, 6373), 'torch.reshape', 'torch.reshape', (['x', '(B, Nh, L * 2 * L)'], {}), '(x, (B, Nh, L * 2 * L))\n', (6350, 6373), False, 'import torch\n'), ((6451, 6487), 'torch.cat', 'torch.cat', (['(flat_x, flat_pad)'], {'dim': '(2)'}), '((flat_x, flat_pad), dim=2)\n', (6460, 6487), False, 'import torch\n'), ((6507, 6562), 'torch.reshape', 'torch.reshape', (['flat_x_padded', '(B, Nh, L + 1, 2 * L - 1)'], {}), '(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))\n', (6520, 6562), False, 'import torch\n'), ((11513, 11532), 'detectron2.utils.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (11530, 11532), False, 'from detectron2.utils.events import get_event_storage\n'), ((16993, 17056), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['images', 'self.backbone.size_divisibility'], {}), '(images, self.backbone.size_divisibility)\n', (17015, 17056), False, 'from detectron2.structures import ImageList, Instances\n'), ((19763, 19826), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['images', 'self.backbone.size_divisibility'], {}), '(images, self.backbone.size_divisibility)\n', (19785, 19826), False, 'from detectron2.structures import ImageList, Instances\n'), ((22678, 22746), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['temp_images', 'self.backbone.size_divisibility'], {}), '(temp_images, self.backbone.size_divisibility)\n', (22700, 22746), False, 'from detectron2.structures import ImageList, Instances\n'), ((23572, 23607), 'detectron2.structures.ImageList', 'ImageList', (['my_image', 'my_image_sizes'], {}), '(my_image, my_image_sizes)\n', (23581, 23607), False, 'from detectron2.structures import ImageList, Instances\n'), ((5335, 5359), 'torch.transpose', 'torch.transpose', (['q', '(2)', '(3)'], {}), '(q, 2, 3)\n', (5350, 5359), False, 'import torch\n'), ((5915, 5948), 'torch.transpose', 'torch.transpose', (['rel_logits', '(3)', '(4)'], {}), '(rel_logits, 3, 4)\n', (5930, 5948), False, 'import torch\n'), ((11751, 11772), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['img', 'None'], {}), '(img, None)\n', (11761, 11772), False, 'from detectron2.utils.visualizer import Visualizer\n'), ((11978, 11999), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['img', 'None'], {}), '(img, None)\n', (11988, 11999), False, 'from detectron2.utils.visualizer import Visualizer\n'), ((12200, 12244), 'numpy.concatenate', 'np.concatenate', (['(anno_img, prop_img)'], {'axis': '(1)'}), '((anno_img, prop_img), axis=1)\n', (12214, 12244), True, 'import numpy as np\n'), ((14444, 14463), 'detectron2.utils.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (14461, 14463), False, 'from detectron2.utils.events import get_event_storage\n'), ((2284, 2347), 'torch.randn', 'torch.randn', (['(2 * self.shape - 1, dk // Nh)'], {'requires_grad': '(True)'}), '((2 * self.shape - 1, dk // Nh), requires_grad=True)\n', (2295, 2347), False, 'import torch\n'), ((2391, 2454), 'torch.randn', 'torch.randn', (['(2 * self.shape - 1, dk // Nh)'], {'requires_grad': '(True)'}), '((2 * self.shape - 1, dk // Nh), requires_grad=True)\n', (2402, 2454), False, 'import torch\n'), ((5165, 5189), 'torch.transpose', 'torch.transpose', (['q', '(2)', '(4)'], {}), '(q, 2, 4)\n', (5180, 5189), False, 'import torch\n'), ((6243, 6269), 'torch.zeros', 'torch.zeros', (['(B, Nh, L, 1)'], {}), '((B, Nh, L, 1))\n', (6254, 6269), False, 'import torch\n'), ((6393, 6420), 'torch.zeros', 'torch.zeros', (['(B, Nh, L - 1)'], {}), '((B, Nh, L - 1))\n', (6404, 6420), False, 'import torch\n'), ((16470, 16494), 'torch.jit.is_scripting', 'torch.jit.is_scripting', ([], {}), '()\n', (16492, 16494), False, 'import torch\n'), ((20062, 20161), 'detectron2.utils.logger.log_first_n', 'log_first_n', (['logging.WARN', '"""\'targets\' in the model inputs is now renamed to \'instances\'!"""'], {'n': '(10)'}), '(logging.WARN,\n "\'targets\' in the model inputs is now renamed to \'instances\'!", n=10)\n', (20073, 20161), False, 'from detectron2.utils.logger import log_first_n\n'), ((23795, 23894), 'detectron2.utils.logger.log_first_n', 'log_first_n', (['logging.WARN', '"""\'targets\' in the model inputs is now renamed to \'instances\'!"""'], {'n': '(10)'}), '(logging.WARN,\n "\'targets\' in the model inputs is now renamed to \'instances\'!", n=10)\n', (23806, 23894), False, 'from detectron2.utils.logger import log_first_n\n'), ((9958, 9982), 'torch.tensor', 'torch.tensor', (['pixel_mean'], {}), '(pixel_mean)\n', (9970, 9982), False, 'import torch\n'), ((10048, 10071), 'torch.tensor', 'torch.tensor', (['pixel_std'], {}), '(pixel_std)\n', (10060, 10071), False, 'import torch\n'), ((18662, 18686), 'torch.tensor', 'torch.tensor', (['pixel_mean'], {}), '(pixel_mean)\n', (18674, 18686), False, 'import torch\n'), ((18752, 18775), 'torch.tensor', 'torch.tensor', (['pixel_std'], {}), '(pixel_std)\n', (18764, 18775), False, 'import torch\n'), ((21810, 21845), 'torch.Tensor', 'torch.Tensor', (['cfg.MODEL.PIXEL_MEAN1'], {}), '(cfg.MODEL.PIXEL_MEAN1)\n', (21822, 21845), False, 'import torch\n'), ((21904, 21938), 'torch.Tensor', 'torch.Tensor', (['cfg.MODEL.PIXEL_STD1'], {}), '(cfg.MODEL.PIXEL_STD1)\n', (21916, 21938), False, 'import torch\n'), ((6000, 6033), 'torch.transpose', 'torch.transpose', (['rel_logits', '(2)', '(4)'], {}), '(rel_logits, 2, 4)\n', (6015, 6033), False, 'import torch\n')]
|
# Generated by Django 3.2.4 on 2021-06-30 13:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('distriblists', '0002_copy_lists_data'),
]
operations = [
migrations.AlterField(
model_name='distributionlist',
name='approver',
field=models.ForeignKey(blank=True, limit_choices_to={'is_external': False}, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='related_lists_as_approver', to=settings.AUTH_USER_MODEL, verbose_name='Approver'),
),
migrations.AlterField(
model_name='distributionlist',
name='leader',
field=models.ForeignKey(limit_choices_to={'is_external': False}, on_delete=django.db.models.deletion.PROTECT, related_name='related_lists_as_leader', to=settings.AUTH_USER_MODEL, verbose_name='Leader'),
),
migrations.AlterField(
model_name='distributionlist',
name='reviewers',
field=models.ManyToManyField(blank=True, limit_choices_to={'is_external': False}, related_name='related_lists_as_reviewer', to=settings.AUTH_USER_MODEL, verbose_name='Reviewers'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField"
] |
[((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((483, 720), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'limit_choices_to': "{'is_external': False}", 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""related_lists_as_approver"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Approver"""'}), "(blank=True, limit_choices_to={'is_external': False}, null\n =True, on_delete=django.db.models.deletion.PROTECT, related_name=\n 'related_lists_as_approver', to=settings.AUTH_USER_MODEL, verbose_name=\n 'Approver')\n", (500, 720), False, 'from django.db import migrations, models\n'), ((837, 1042), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'limit_choices_to': "{'is_external': False}", 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""related_lists_as_leader"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Leader"""'}), "(limit_choices_to={'is_external': False}, on_delete=django\n .db.models.deletion.PROTECT, related_name='related_lists_as_leader', to\n =settings.AUTH_USER_MODEL, verbose_name='Leader')\n", (854, 1042), False, 'from django.db import migrations, models\n'), ((1167, 1347), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'limit_choices_to': "{'is_external': False}", 'related_name': '"""related_lists_as_reviewer"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Reviewers"""'}), "(blank=True, limit_choices_to={'is_external': False},\n related_name='related_lists_as_reviewer', to=settings.AUTH_USER_MODEL,\n verbose_name='Reviewers')\n", (1189, 1347), False, 'from django.db import migrations, models\n')]
|
# Generated by Django 2.2.6 on 2020-01-11 16:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_category'),
]
operations = [
migrations.AlterField(
model_name='post',
name='category',
field=models.CharField(choices=[('gadgets', 'Gadgets'), ('machine learning', 'Machine Learning'), ('events', 'Events'), ('not', 'Not')], default='not', max_length=20),
),
]
|
[
"django.db.models.CharField"
] |
[((328, 497), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('gadgets', 'Gadgets'), ('machine learning', 'Machine Learning'), (\n 'events', 'Events'), ('not', 'Not')]", 'default': '"""not"""', 'max_length': '(20)'}), "(choices=[('gadgets', 'Gadgets'), ('machine learning',\n 'Machine Learning'), ('events', 'Events'), ('not', 'Not')], default=\n 'not', max_length=20)\n", (344, 497), False, 'from django.db import migrations, models\n')]
|
import os
import configparser
config = configparser.ConfigParser()
config.read(os.environ['PARACHUTE_CONFIG_FILE'])
|
[
"configparser.ConfigParser"
] |
[((41, 68), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (66, 68), False, 'import configparser\n')]
|
import os
print('1: {}\n1.14: {}\nTrue: {}\nFalse: {}\nHahA: {}\n'.format(type(1), type(1.14), type(True), type(False), type('HahA')))
os.system('pause')
|
[
"os.system"
] |
[((137, 155), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (146, 155), False, 'import os\n')]
|
import sys
sys.path.append('.')
import asyncio
import time
import subprocess
import logging
logging.basicConfig(level=logging.DEBUG)
import pytest
from xwing.mailbox import init_node, start_node, spawn
from xwing.network.transport.socket.client import Client
FRONTEND_ADDRESS = '127.0.0.1:5555'
def setup_module(module):
module.hub_process = subprocess.Popen('bin/xwing')
time.sleep(1)
module.server_process = subprocess.Popen(
['python', 'tests/integration/run_server.py'])
def teardown_module(module):
module.hub_process.kill()
module.server_process.kill()
@pytest.mark.skip()
class TestSocket:
@classmethod
def setup_class(cls):
cls.loop = asyncio.get_event_loop()
cls.client = Client(cls.loop, FRONTEND_ADDRESS)
async def connect(cls):
while True:
try:
cls.connection = await cls.client.connect('server0')
except ConnectionError:
await asyncio.sleep(1)
continue
else:
break
cls.loop.run_until_complete(asyncio.wait_for(connect(cls), 30))
@classmethod
def teardown_class(cls):
cls.connection.close()
def test_send_and_recv_str(self):
async def run(self):
data = 'ping'
await self.connection.send_str(data)
await self.connection.recv_str()
return True
event_loop = asyncio.get_event_loop()
assert event_loop.run_until_complete(run(self))
def test_send_and_recv(self):
async def run(self):
data = b'ping'
await self.connection.send(data)
await self.connection.recv()
return True
event_loop = asyncio.get_event_loop()
assert event_loop.run_until_complete(run(self))
@pytest.mark.skip()
class TestMailbox(object):
def setup_class(self):
init_node()
def test_send_and_recv(self):
async def echo_server(mailbox):
message, pid = await mailbox.recv()
await mailbox.send(pid, message)
async def echo_client(mailbox, pid_server):
await mailbox.send(pid_server, 'hello', mailbox.pid)
await mailbox.recv()
pid = spawn(echo_server)
spawn(echo_client, pid)
start_node()
|
[
"sys.path.append",
"xwing.mailbox.spawn",
"subprocess.Popen",
"asyncio.get_event_loop",
"logging.basicConfig",
"asyncio.sleep",
"xwing.mailbox.init_node",
"time.sleep",
"xwing.mailbox.start_node",
"xwing.network.transport.socket.client.Client",
"pytest.mark.skip"
] |
[((11, 31), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (26, 31), False, 'import sys\n'), ((93, 133), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (112, 133), False, 'import logging\n'), ((598, 616), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (614, 616), False, 'import pytest\n'), ((1863, 1881), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (1879, 1881), False, 'import pytest\n'), ((352, 381), 'subprocess.Popen', 'subprocess.Popen', (['"""bin/xwing"""'], {}), "('bin/xwing')\n", (368, 381), False, 'import subprocess\n'), ((386, 399), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (396, 399), False, 'import time\n'), ((428, 491), 'subprocess.Popen', 'subprocess.Popen', (["['python', 'tests/integration/run_server.py']"], {}), "(['python', 'tests/integration/run_server.py'])\n", (444, 491), False, 'import subprocess\n'), ((698, 722), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (720, 722), False, 'import asyncio\n'), ((744, 778), 'xwing.network.transport.socket.client.Client', 'Client', (['cls.loop', 'FRONTEND_ADDRESS'], {}), '(cls.loop, FRONTEND_ADDRESS)\n', (750, 778), False, 'from xwing.network.transport.socket.client import Client\n'), ((1475, 1499), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1497, 1499), False, 'import asyncio\n'), ((1779, 1803), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1801, 1803), False, 'import asyncio\n'), ((1945, 1956), 'xwing.mailbox.init_node', 'init_node', ([], {}), '()\n', (1954, 1956), False, 'from xwing.mailbox import init_node, start_node, spawn\n'), ((2291, 2309), 'xwing.mailbox.spawn', 'spawn', (['echo_server'], {}), '(echo_server)\n', (2296, 2309), False, 'from xwing.mailbox import init_node, start_node, spawn\n'), ((2318, 2341), 'xwing.mailbox.spawn', 'spawn', (['echo_client', 'pid'], {}), '(echo_client, pid)\n', (2323, 2341), False, 'from xwing.mailbox import init_node, start_node, spawn\n'), ((2350, 2362), 'xwing.mailbox.start_node', 'start_node', ([], {}), '()\n', (2360, 2362), False, 'from xwing.mailbox import init_node, start_node, spawn\n'), ((996, 1012), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (1009, 1012), False, 'import asyncio\n')]
|
import sys
import numpy as np
file = sys.argv[-1]
with open(file) as f:
cnt = f.readlines()
count = []
distortion = []
calibration = []
linf = []
for line in cnt:
if line.startswith('Adversarial Example Found Successfully:'):
count.append(int(line.split(' ')[-2]))
distortion.append(eval(line.split(' ')[-6]))
elif line.startswith('1 Predicted label'):
calibration.append(eval(line.split(' ')[-3]))
linf.append(eval(line.split(' ')[-2]))
print('len:', len(count))
print('count:', np.mean(count), np.median(count), np.min(count), np.max(count))
print('distortion:', np.mean(distortion), np.median(distortion), np.min(distortion), np.max(distortion))
if len(linf) != 0:
print('calibration:', np.mean(calibration), np.median(calibration), np.min(calibration), np.max(calibration))
print('linf:', np.mean(linf), np.median(linf), np.min(linf), np.max(linf))
|
[
"numpy.median",
"numpy.min",
"numpy.mean",
"numpy.max"
] |
[((493, 507), 'numpy.mean', 'np.mean', (['count'], {}), '(count)\n', (500, 507), True, 'import numpy as np\n'), ((509, 525), 'numpy.median', 'np.median', (['count'], {}), '(count)\n', (518, 525), True, 'import numpy as np\n'), ((527, 540), 'numpy.min', 'np.min', (['count'], {}), '(count)\n', (533, 540), True, 'import numpy as np\n'), ((542, 555), 'numpy.max', 'np.max', (['count'], {}), '(count)\n', (548, 555), True, 'import numpy as np\n'), ((578, 597), 'numpy.mean', 'np.mean', (['distortion'], {}), '(distortion)\n', (585, 597), True, 'import numpy as np\n'), ((599, 620), 'numpy.median', 'np.median', (['distortion'], {}), '(distortion)\n', (608, 620), True, 'import numpy as np\n'), ((622, 640), 'numpy.min', 'np.min', (['distortion'], {}), '(distortion)\n', (628, 640), True, 'import numpy as np\n'), ((642, 660), 'numpy.max', 'np.max', (['distortion'], {}), '(distortion)\n', (648, 660), True, 'import numpy as np\n'), ((707, 727), 'numpy.mean', 'np.mean', (['calibration'], {}), '(calibration)\n', (714, 727), True, 'import numpy as np\n'), ((729, 751), 'numpy.median', 'np.median', (['calibration'], {}), '(calibration)\n', (738, 751), True, 'import numpy as np\n'), ((753, 772), 'numpy.min', 'np.min', (['calibration'], {}), '(calibration)\n', (759, 772), True, 'import numpy as np\n'), ((774, 793), 'numpy.max', 'np.max', (['calibration'], {}), '(calibration)\n', (780, 793), True, 'import numpy as np\n'), ((814, 827), 'numpy.mean', 'np.mean', (['linf'], {}), '(linf)\n', (821, 827), True, 'import numpy as np\n'), ((829, 844), 'numpy.median', 'np.median', (['linf'], {}), '(linf)\n', (838, 844), True, 'import numpy as np\n'), ((846, 858), 'numpy.min', 'np.min', (['linf'], {}), '(linf)\n', (852, 858), True, 'import numpy as np\n'), ((860, 872), 'numpy.max', 'np.max', (['linf'], {}), '(linf)\n', (866, 872), True, 'import numpy as np\n')]
|
from django import forms
from django.contrib.auth.models import User
from captcha.fields import CaptchaField
from .models import *
class ProxyForm(forms.Form):
proxyvalue=forms.CharField(label='代理值',required=False)
class LoginForm(forms.Form):
username=forms.CharField(label='用户名',max_length=100,error_messages={'required': "用户名不能为空"})
password=forms.CharField(label='密码',widget=forms.PasswordInput(),error_messages={'required': "密码不能为空"})
captcha = CaptchaField(label='验证码')
def clean_username(self):
value = self.cleaned_data.get('username')
user=User.objects.filter(username=value)
if len(user) == 0:
raise forms.ValidationError('用户%s不存在' % value)
return value
class PasswdForm(forms.Form):
oldpass=forms.CharField(label='旧密码')
newpass=forms.CharField(label='新密码')
rnewpass=forms.CharField(label='重复新密码')
class ExchangeForm(forms.ModelForm):
apikey=forms.CharField(required=False)
secretkey = forms.CharField(required=False)
class Meta:
model=Exchange
fields=['code','name','status','apikey','secretkey']
def clean(self):
cleaned_data = super(ExchangeForm, self).clean()
statusvalue= cleaned_data.get('status')
apikeyvalue = cleaned_data.get('apikey')
secretkeyvalue = cleaned_data.get('secretkey')
if statusvalue:
if apikeyvalue == '' or secretkeyvalue == '':
raise forms.ValidationError('API_KEY或SECRET_KEY未设置,无法启用此交易所')
return cleaned_data
class CastForm(forms.ModelForm):
class Meta:
model=Cast
fields = ['name', 'minute', 'hour', 'day','exid','symbol','amount','sellpercent']
class ConditionForm(forms.ModelForm):
DCHOICES = (
('buy', '买入'),
('sell', '卖出'),
)
direction=forms.ChoiceField(choices=DCHOICES,widget=forms.Select(attrs={'class':'form-control'}))
class Meta:
model=Condition
fields = ['name','exid','symbol','direction','number','price']
|
[
"django.forms.Select",
"captcha.fields.CaptchaField",
"django.contrib.auth.models.User.objects.filter",
"django.forms.PasswordInput",
"django.forms.ValidationError",
"django.forms.CharField"
] |
[((178, 222), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""代理值"""', 'required': '(False)'}), "(label='代理值', required=False)\n", (193, 222), False, 'from django import forms\n'), ((264, 352), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""用户名"""', 'max_length': '(100)', 'error_messages': "{'required': '用户名不能为空'}"}), "(label='用户名', max_length=100, error_messages={'required':\n '用户名不能为空'})\n", (279, 352), False, 'from django import forms\n'), ((469, 494), 'captcha.fields.CaptchaField', 'CaptchaField', ([], {'label': '"""验证码"""'}), "(label='验证码')\n", (481, 494), False, 'from captcha.fields import CaptchaField\n'), ((776, 804), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""旧密码"""'}), "(label='旧密码')\n", (791, 804), False, 'from django import forms\n'), ((817, 845), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""新密码"""'}), "(label='新密码')\n", (832, 845), False, 'from django import forms\n'), ((859, 889), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""重复新密码"""'}), "(label='重复新密码')\n", (874, 889), False, 'from django import forms\n'), ((939, 970), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (954, 970), False, 'from django import forms\n'), ((987, 1018), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (1002, 1018), False, 'from django import forms\n'), ((590, 625), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'username': 'value'}), '(username=value)\n', (609, 625), False, 'from django.contrib.auth.models import User\n'), ((398, 419), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (417, 419), False, 'from django import forms\n'), ((671, 711), 'django.forms.ValidationError', 'forms.ValidationError', (["('用户%s不存在' % value)"], {}), "('用户%s不存在' % value)\n", (692, 711), False, 'from django import forms\n'), ((1861, 1906), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1873, 1906), False, 'from django import forms\n'), ((1453, 1508), 'django.forms.ValidationError', 'forms.ValidationError', (['"""API_KEY或SECRET_KEY未设置,无法启用此交易所"""'], {}), "('API_KEY或SECRET_KEY未设置,无法启用此交易所')\n", (1474, 1508), False, 'from django import forms\n')]
|
from test_runner import run_test
def sliding_window(string, char_set):
left, right, best_score = 0, 0, float('inf')
letter_map = {}
characters_encountered = 0
while right < len(string) or characters_encountered == len(char_set):
if characters_encountered != len(char_set):
curr_right = string[right]
if curr_right in char_set:
letter_map[curr_right] = letter_map.get(curr_right, 0)+1
if letter_map[curr_right] == 1:
characters_encountered +=1
right += 1
else:
curr_left = string[left]
if curr_left in char_set:
letter_map[curr_left] -=1
if letter_map[curr_left] == 0:
characters_encountered -=1
left += 1
best_score = min(best_score, right - left +1)
return best_score if best_score !=float('inf') else -1
run_test(sliding_window)
|
[
"test_runner.run_test"
] |
[((831, 855), 'test_runner.run_test', 'run_test', (['sliding_window'], {}), '(sliding_window)\n', (839, 855), False, 'from test_runner import run_test\n')]
|
import easycorrector.confusion_model.confusion_correct as confusion_correct
import easycorrector.ngram_model.ngram_correct as ngram_correct
import easycorrector.preprocess.preprocess as preprocess
import easycorrector.base_bert_model.base_bert_correct as base_bert_correct
import easycorrector.chinese_bert_model.chinese_bert_correct as chinese_bert_correct
import easycorrector.csc_pretrain_bert_model.csc_pretrain_bert_correct as csc_pretrain_bert_correct
import easycorrector.preprocess.cut_sentences as cut_sen
from collections import defaultdict
models = [
#confusion_correct,
# ngram_correct,
# base_bert_correct,
chinese_bert_correct
#csc_pretrain_bert_correct
]
def correct(text):
if isinstance(text, list):
sentences = [preprocess.preprocess(sen) for sen in text]
else:
text = preprocess.preprocess(text)
sentences = cut_sen.cut_sentence(text)
correct_sentences = []
for sen in sentences:
conf_res = confusion_correct.correct(sen)
csc_pre_res = csc_pretrain_bert_correct.correct(sen)
ngram_res = ngram_correct.correct(sen)
res = defaultdict(list)
for item in conf_res+ ngram_res + csc_pre_res :
res[item.start].append(item)
cor_sen = ""
for i in range(len(sen)):
if i in res:
cor_sen += "<i>" + res[i][0].replace + "</i>"
else:
cor_sen += sen[i]
print(sen, cor_sen)
correct_sentences.append(cor_sen)
return [sentences, correct_sentences]
def test_correct():
correct('''
1.前不久,D市一男子武某醉驾被查却免于起诉,这引起了社会的广泛关注。该名男子为何酒驾免罚?难道是打法律的“擦边球”?还是执法者滥用职权无视法律?
据悉,武某酒后驾车被执勤交警当场查获,经鉴定达到醉驾标准。武某酒后驾车的行为已涉嫌危险驾驶罪,但为抢救其幼女生命情形紧迫所为,主观恶性较小,属犯罪情节轻微。通过多方查证,证实了武某案发当日酒后驾车事件的紧迫性,经研究,依法对武某作出了相对不起诉的决定。
武某酒驾确实违法了,执法部门也秉公执法了,而相对不起诉则体现了执法新理念,让更多的人体会到法律的温度。其实,法律的最高境界不是无情,而是情与法相对完美的结合,从西周“明德慎罚”到现在的“以人为本”,乃至“未成年人不公开审理”,法律体现了人文关怀和对人格尊严的维护。
近日,该市交警大队执勤民警在城区巡逻过程中,发现西苑路一辆外地货运车临街停车卸货,车用篷布随意堆放,占据了由北向南行驶车道二分之一的路面,导致该车道车辆只能占用对向车道行驶,道路拥挤,存在安全隐患。
经了解,该驾驶员常年从事长途货运,这是第一次来到D市,加之正值“五一”假期,以为没有交警执勤,就在路边停车开始卸货。
交警发现后,第一时间协助清除路面隐患,随后对驾驶员以善意提醒的方式进行教育,详细解释了占用机动车道路的危害和要承担的法律责任,叮嘱驾驶员一定要遵守交通规则,不能方便了自己,却影响了他人的出行安全。
“平常违章了就交罚款,以为今天交警要处罚我,看到公安交警耐心的向我讲道理,并且还告诉我什么该做什么不该做,让我真正了解了交规交法,这样的教育比直接处罚有用多了”,驾驶员感慨地说道。
“景区停车位已满,路两侧有临时停车区域,请有序停放。”5月4日上午十时许,D市国家森林公园景区停车场已经饱和,但前来游览的车辆依旧络绎不绝。
为满足外地游客“五一”假期停车需求,D市公安交警在不影响交通安全的前提下,指挥车辆靠路边临时停车,全力满足景区车辆合理停放需求。清脆响亮的哨声伴着执勤交警挥动的手臂,一辆辆车有序停放、秩序井然。
“去年听同事说假期森林公园车堵得上不了山,今年带着家人来旅游,没想到能直接开到景区门口,停车场没位置了,给我们安排停在路边,真是太方便了,为D市交警的服务点赞。”王先生高兴的说。
车被套牌,不同车型相同车牌,遇到这种情况怎么办?D市给出答案。近日,D市交警部门在微信公众号开通了套牌车报案功能,机动车所有人可以通过D市交警微信公众号进行套牌车自助报案。“‘假套牌’报案微信自助办理渠道的开通,实现了群众在家即可通过网上进行报案,减少群众往来报案的人力、物力,缩短了假套牌违法从报案到查处的时间。”D市交警相关负责人介绍,市民只需通过微信公众号,关注“D市交警”,进入“违法事故”—“套牌车报案”,然后根据报案提示,“选择车辆”-“选择违法”-“填写报案证据”-“上传报案人资料”-“上传车辆资料”,即可实现“假套牌”报案。
“你们交警同志这么耐心的给我们讲道理,下次我再也不违规拉人了,他们也都再也不坐农用车了……”
5月6日,交警大队在进行路检路查过程中,发现一辆农用车违法载人。
''')
def test_correct1():
sentences = []
for txt in open("test_data.txt"):
sentences.append(txt)
correct(sentences)
def run_models():
for txt in open("test_data.txt"):
print(txt)
for model in models:
for item in model.correct(txt):
print(model.model_name + ":" + txt[item.start:item.end], "----", item.replace if item.replace else "建议检查")
if __name__ == '__main__':
run_models()
|
[
"easycorrector.preprocess.preprocess.preprocess",
"easycorrector.ngram_model.ngram_correct.correct",
"easycorrector.preprocess.cut_sentences.cut_sentence",
"collections.defaultdict",
"easycorrector.confusion_model.confusion_correct.correct",
"easycorrector.csc_pretrain_bert_model.csc_pretrain_bert_correct.correct"
] |
[((834, 861), 'easycorrector.preprocess.preprocess.preprocess', 'preprocess.preprocess', (['text'], {}), '(text)\n', (855, 861), True, 'import easycorrector.preprocess.preprocess as preprocess\n'), ((882, 908), 'easycorrector.preprocess.cut_sentences.cut_sentence', 'cut_sen.cut_sentence', (['text'], {}), '(text)\n', (902, 908), True, 'import easycorrector.preprocess.cut_sentences as cut_sen\n'), ((981, 1011), 'easycorrector.confusion_model.confusion_correct.correct', 'confusion_correct.correct', (['sen'], {}), '(sen)\n', (1006, 1011), True, 'import easycorrector.confusion_model.confusion_correct as confusion_correct\n'), ((1034, 1072), 'easycorrector.csc_pretrain_bert_model.csc_pretrain_bert_correct.correct', 'csc_pretrain_bert_correct.correct', (['sen'], {}), '(sen)\n', (1067, 1072), True, 'import easycorrector.csc_pretrain_bert_model.csc_pretrain_bert_correct as csc_pretrain_bert_correct\n'), ((1093, 1119), 'easycorrector.ngram_model.ngram_correct.correct', 'ngram_correct.correct', (['sen'], {}), '(sen)\n', (1114, 1119), True, 'import easycorrector.ngram_model.ngram_correct as ngram_correct\n'), ((1134, 1151), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1145, 1151), False, 'from collections import defaultdict\n'), ((765, 791), 'easycorrector.preprocess.preprocess.preprocess', 'preprocess.preprocess', (['sen'], {}), '(sen)\n', (786, 791), True, 'import easycorrector.preprocess.preprocess as preprocess\n')]
|
from django.conf.urls import url
from ocfweb.api import hours
from ocfweb.api import lab
urlpatterns = [
url(r'^hours$', hours.get_hours_all, name='hours_all'),
url(r'^hours/today$', hours.get_hours_today, name='hours_today'),
url(r'^lab/desktops$', lab.desktop_usage, name='desktop_usage'),
]
|
[
"django.conf.urls.url"
] |
[((111, 164), 'django.conf.urls.url', 'url', (['"""^hours$"""', 'hours.get_hours_all'], {'name': '"""hours_all"""'}), "('^hours$', hours.get_hours_all, name='hours_all')\n", (114, 164), False, 'from django.conf.urls import url\n'), ((171, 234), 'django.conf.urls.url', 'url', (['"""^hours/today$"""', 'hours.get_hours_today'], {'name': '"""hours_today"""'}), "('^hours/today$', hours.get_hours_today, name='hours_today')\n", (174, 234), False, 'from django.conf.urls import url\n'), ((241, 303), 'django.conf.urls.url', 'url', (['"""^lab/desktops$"""', 'lab.desktop_usage'], {'name': '"""desktop_usage"""'}), "('^lab/desktops$', lab.desktop_usage, name='desktop_usage')\n", (244, 303), False, 'from django.conf.urls import url\n')]
|
import time
from snake.utils import logger_levels
class Logger:
__log_level = logger_levels.NONE
@staticmethod
def set_log_level(level):
Logger.__log_level = level
@staticmethod
def log(level, sender, message):
if level >= Logger.__log_level:
print("[{}][{}][{}] - {}".format(
time.ctime(),
level,
sender.__class__.__name__ if sender != None else "Game",
message
))
@staticmethod
def log_fps(sender, clock):
msg = "FPS: {}".format(clock.get_fps())
Logger.log(
logger_levels.FPS,
sender,
msg
)
@staticmethod
def log_debug(sender, message):
Logger.log(
logger_levels.DEBUG,
sender,
message
)
@staticmethod
def log_trace(sender, message):
Logger.log(
logger_levels.TRACE,
sender,
message
)
@staticmethod
def log_info(sender, message):
Logger.log(
logger_levels.INFO,
sender,
message
)
|
[
"time.ctime"
] |
[((346, 358), 'time.ctime', 'time.ctime', ([], {}), '()\n', (356, 358), False, 'import time\n')]
|
#
# (C) Copyright 2012 <NAME> <<EMAIL>>
# (C) Copyright 2011 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""XFacebookPlatform authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 4752 <http://www.ietf.org/rfc/rfc4752.txt>`__
"""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import logging
from .core import ClientAuthenticator, Response, Success
from .core import sasl_mechanism
import time, urllib
logger = logging.getLogger("pyxmpp2.sasl.xfb")
@sasl_mechanism(name='X-FACEBOOK-PLATFORM', secure=False, preference=100)
class XFacebookPlatformClientAuthenticator(ClientAuthenticator):
"""Provides client-side XFacebookPlatform authentication."""
def __init__(self):
self.access_token = None
self.api_key = None
ClientAuthenticator.__init__(self)
@classmethod
def are_properties_sufficient(cls, properties):
if ('facebook_access_token' in properties
and 'facebook_api_key' in properties):
return True
return False
def start(self, properties):
self.access_token = properties['facebook_access_token']
self.api_key = properties['facebook_api_key']
return Response(None)
def challenge(self, challenge):
in_params = dict([part.split('=') for part in challenge.split('&')])
out_params = {}
out_params['nonce'] = in_params['nonce']
out_params['method'] = in_params['method']
out_params['access_token'] = self.access_token
out_params['api_key'] = self.api_key
out_params['call_id'] = float(round(time.time() * 1000))
out_params['v'] = '1.0'
data = urllib.urlencode(out_params)
return Response(data)
def finish(self, data):
return Success(None)
|
[
"urllib.urlencode",
"logging.getLogger",
"time.time"
] |
[((1111, 1148), 'logging.getLogger', 'logging.getLogger', (['"""pyxmpp2.sasl.xfb"""'], {}), "('pyxmpp2.sasl.xfb')\n", (1128, 1148), False, 'import logging\n'), ((2346, 2374), 'urllib.urlencode', 'urllib.urlencode', (['out_params'], {}), '(out_params)\n', (2362, 2374), False, 'import time, urllib\n'), ((2278, 2289), 'time.time', 'time.time', ([], {}), '()\n', (2287, 2289), False, 'import time, urllib\n')]
|
from typing import List, Any
from ply.lex import LexToken
from windyquery.ctx import Ctx
from windyquery.validator import ValidationError
from ._base import Base, StartInsertToken
TOKEN = 'INSERT'
class InsertToken(LexToken):
def __init__(self, value):
self.type = TOKEN
self.value = value
self.lineno = 0
self.lexpos = 0
class Insert(Base):
def insert(self, columns: List[str], values: List[Any]):
try:
sqlColumns = self.validator.validate_insert_columns(columns)
sqlValues = []
args = []
for row in values:
ctx = Ctx(self.paramOffset, [])
sqlValues.append(
self.validator.validate_insert_values(row, ctx))
self.paramOffset += len(ctx.args)
args += ctx.args
except ValidationError as err:
raise UserWarning(f'invalid INSERT: {err}') from None
columns.sort()
key = ','.join(columns)
self.append(InsertToken(
{'columns': sqlColumns, 'values': ', '.join(sqlValues), 'params': args, 'key': key}))
self.add_start(StartInsertToken())
|
[
"windyquery.ctx.Ctx"
] |
[((634, 659), 'windyquery.ctx.Ctx', 'Ctx', (['self.paramOffset', '[]'], {}), '(self.paramOffset, [])\n', (637, 659), False, 'from windyquery.ctx import Ctx\n')]
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: william
@contact: <EMAIL>
@site: http://www.xiaolewei.com
@file: catcher.py
@time: 12/04/2018 18:32
"""
from dc.core import db, config
import influxdb
class Catcher(object):
def __init__(self):
self._db = db.get_mysql_client(config.get('app.db.mysql'))
cfg = config.get('app.db.influxdb')
self._influxdb = influxdb.InfluxDBClient(host=cfg['host'],
port=cfg['port'],
username=cfg['user'],
password=cfg['password'],
database=cfg['database'])
|
[
"dc.core.config.get",
"influxdb.InfluxDBClient"
] |
[((337, 366), 'dc.core.config.get', 'config.get', (['"""app.db.influxdb"""'], {}), "('app.db.influxdb')\n", (347, 366), False, 'from dc.core import db, config\n'), ((392, 530), 'influxdb.InfluxDBClient', 'influxdb.InfluxDBClient', ([], {'host': "cfg['host']", 'port': "cfg['port']", 'username': "cfg['user']", 'password': "cfg['password']", 'database': "cfg['database']"}), "(host=cfg['host'], port=cfg['port'], username=cfg[\n 'user'], password=cfg['password'], database=cfg['database'])\n", (415, 530), False, 'import influxdb\n'), ((295, 321), 'dc.core.config.get', 'config.get', (['"""app.db.mysql"""'], {}), "('app.db.mysql')\n", (305, 321), False, 'from dc.core import db, config\n')]
|
import numpy as np
import pytest
from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, \
reset_variable_tracker
def _runs_are_the_same(var_gen_1, var_gen_2, use_assert = False):
delete_vars(['_test_random_var_32r5477w32'])
for run, gen in [(0, var_gen_1), (1, var_gen_2)]:
reset_variable_tracker()
for v in gen:
if use_assert:
assert_variable_matches_between_runs(v, '_test_random_var_32r5477w32')
else:
its_a_match=variable_matches_between_runs(v, '_test_random_var_32r5477w32')
if run==0:
assert its_a_match is None
else:
if not its_a_match:
return False
return True
def test_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
assert _runs_are_the_same(gen1, gen2)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
assert not _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
assert _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
assert not _runs_are_the_same(gen1, gen2)
def test_assert_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
if __name__ == '__main__':
test_variable_matches_between_runs()
test_assert_variable_matches_between_runs()
|
[
"artemis.general.nondeterminism_hunting.delete_vars",
"numpy.random.RandomState",
"artemis.general.nondeterminism_hunting.variable_matches_between_runs",
"pytest.raises",
"artemis.general.nondeterminism_hunting.reset_variable_tracker",
"artemis.general.nondeterminism_hunting.assert_variable_matches_between_runs"
] |
[((268, 312), 'artemis.general.nondeterminism_hunting.delete_vars', 'delete_vars', (["['_test_random_var_32r5477w32']"], {}), "(['_test_random_var_32r5477w32'])\n", (279, 312), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((891, 918), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (912, 918), True, 'import numpy as np\n'), ((978, 1005), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (999, 1005), True, 'import numpy as np\n'), ((1107, 1134), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1128, 1134), True, 'import numpy as np\n'), ((1574, 1601), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1595, 1601), True, 'import numpy as np\n'), ((1661, 1688), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1682, 1688), True, 'import numpy as np\n'), ((1800, 1827), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1821, 1827), True, 'import numpy as np\n'), ((375, 399), 'artemis.general.nondeterminism_hunting.reset_variable_tracker', 'reset_variable_tracker', ([], {}), '()\n', (397, 399), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((1931, 1960), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1944, 1960), False, 'import pytest\n'), ((2227, 2256), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2240, 2256), False, 'import pytest\n'), ((465, 535), 'artemis.general.nondeterminism_hunting.assert_variable_matches_between_runs', 'assert_variable_matches_between_runs', (['v', '"""_test_random_var_32r5477w32"""'], {}), "(v, '_test_random_var_32r5477w32')\n", (501, 535), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((582, 645), 'artemis.general.nondeterminism_hunting.variable_matches_between_runs', 'variable_matches_between_runs', (['v', '"""_test_random_var_32r5477w32"""'], {}), "(v, '_test_random_var_32r5477w32')\n", (611, 645), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n')]
|
import os
import sys
dir1 = sys.argv[1]
dir2 = sys.argv[2]
def fn_matchingfile(inputfile,comparedir):
for dirName, subdirList, fileList in os.walk(comparedir):
# print('Found directory: %s' % dirName)
for fname in fileList:
if fname==inputfile:
with open('matchingfile.txt','wa') as f:
f.write('Source File: {}\{} \t Matching File: {}\{}'.format(dir1,inputfile, dirName, fname))
for eachFile in os.listdir(dir1):
fn_matchingfile(eachFile,dir2)
|
[
"os.walk",
"os.listdir"
] |
[((469, 485), 'os.listdir', 'os.listdir', (['dir1'], {}), '(dir1)\n', (479, 485), False, 'import os\n'), ((146, 165), 'os.walk', 'os.walk', (['comparedir'], {}), '(comparedir)\n', (153, 165), False, 'import os\n')]
|
# Crie um programa que leia o ano de nascimento de sete pessoas. No final, mostre a quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
import datetime
atual = datetime.date.today().year
maior = 0
menor = 0
for c in range( 0, 7):
ano = int(input('Digite o ano de nascimento: '))
if atual - ano < 18:
menor += 1
else:
maior += 1
print('Tivemos {} maior de idade'.format(maior))
print('Tivemos {} menor de idade'.format(menor))
|
[
"datetime.date.today"
] |
[((183, 204), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (202, 204), False, 'import datetime\n')]
|
import random
import string
# TODO: Method naming rule: verb
def random_digit_with_number(length_of_values: int=7) -> str:
choices = string.ascii_uppercase + string.digits + string.ascii_lowercase
random_value = ''.join(random.SystemRandom().choice(choices) for _ in range(length_of_values))
return random_value
# TODO: Method naming rule: verb
def random_number(length_of_values: int=6) -> str:
choices = string.digits
random_value = ''.join(random.SystemRandom().choice(choices) for _ in range(length_of_values))
return random_value
|
[
"random.SystemRandom"
] |
[((230, 251), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (249, 251), False, 'import random\n'), ((467, 488), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (486, 488), False, 'import random\n')]
|
"""jia URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from courses.views import LessonCatalog, LessonRetrieve,SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve
app_name = 'courses'
urlpatterns = [
path('lessons/', LessonCatalog.as_view(), name="allessons"),
path('lesson/details/<int:pk>/', LessonRetrieve.as_view(), name="lesson-details"),
path('sublessons/', SubLessonCatalog.as_view(), name="asublessons"),
path('sublesson/details/<int:pk>/', SubLessonRetrieve.as_view(), name="sublesson-details"),
path('courses/', CourseCatalog.as_view(), name="courses"),
path('course/details/<int:pk>/', CourseRetrieve.as_view(), name="'course-details"),
path('quizes/', QuizCatalog.as_view(), name="quizes"),
path('quiz/details/<int:pk>/', QuizRetrieve.as_view(), name="quiz-details"),
]
|
[
"courses.views.CourseCatalog.as_view",
"courses.views.LessonRetrieve.as_view",
"courses.views.QuizCatalog.as_view",
"courses.views.CourseRetrieve.as_view",
"courses.views.QuizRetrieve.as_view",
"courses.views.SubLessonRetrieve.as_view",
"courses.views.SubLessonCatalog.as_view",
"courses.views.LessonCatalog.as_view"
] |
[((868, 891), 'courses.views.LessonCatalog.as_view', 'LessonCatalog.as_view', ([], {}), '()\n', (889, 891), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((949, 973), 'courses.views.LessonRetrieve.as_view', 'LessonRetrieve.as_view', ([], {}), '()\n', (971, 973), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1023, 1049), 'courses.views.SubLessonCatalog.as_view', 'SubLessonCatalog.as_view', ([], {}), '()\n', (1047, 1049), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1112, 1139), 'courses.views.SubLessonRetrieve.as_view', 'SubLessonRetrieve.as_view', ([], {}), '()\n', (1137, 1139), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1189, 1212), 'courses.views.CourseCatalog.as_view', 'CourseCatalog.as_view', ([], {}), '()\n', (1210, 1212), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1268, 1292), 'courses.views.CourseRetrieve.as_view', 'CourseRetrieve.as_view', ([], {}), '()\n', (1290, 1292), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1339, 1360), 'courses.views.QuizCatalog.as_view', 'QuizCatalog.as_view', ([], {}), '()\n', (1358, 1360), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n'), ((1413, 1435), 'courses.views.QuizRetrieve.as_view', 'QuizRetrieve.as_view', ([], {}), '()\n', (1433, 1435), False, 'from courses.views import LessonCatalog, LessonRetrieve, SubLessonCatalog, SubLessonRetrieve, CourseCatalog, CourseRetrieve, QuizCatalog, QuizRetrieve\n')]
|
import unittest
def _makeRootAndUser():
from Acquisition import Explicit
from Acquisition import Implicit
from AccessControl.rolemanager import RoleManager
class DummyContext(Implicit, RoleManager):
__roles__ = ('Manager',)
class DummyUser(Explicit):
def getRoles(self):
return ('Manager',)
def getRolesInContext(self, context):
return ('Manager',)
def has_permission(self, permission, context):
return True
class DummyAclUsers(Explicit):
def getUser(self, user_id):
user = DummyUser()
return user.__of__(self)
def absolute_url(self, relative=0):
return 'acl_users'
class DummyRoot(Explicit):
acl_users = DummyAclUsers()
root = DummyRoot()
root.acl_users = DummyAclUsers()
root.context1 = DummyContext()
root.context2 = DummyContext()
user = DummyUser().__of__(root.acl_users)
return root, user
class TestRoleManager(unittest.TestCase):
def tearDown(self):
from AccessControl.SecurityManagement import noSecurityManager
noSecurityManager()
def test_interfaces(self):
from zope.interface.verify import verifyClass
from AccessControl.interfaces import IRoleManager
from AccessControl.rolemanager import RoleManager
verifyClass(IRoleManager, RoleManager)
def test_manage_getUserRolesAndPermissions(self):
from AccessControl.ImplPython import verifyAcquisitionContext
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.SecurityManagement import newSecurityManager
root, user = _makeRootAndUser()
newSecurityManager(None, user)
root.context1.manage_getUserRolesAndPermissions('dummy_user')
user = getSecurityManager().getUser()
self.assertTrue(verifyAcquisitionContext(user, root.context2, ()))
def test_has_local_roles(self):
root, user = _makeRootAndUser()
self.assertFalse(root.context1.has_local_roles())
def test_get_local_roles(self):
root, user = _makeRootAndUser()
root.context1.__ac_local_roles__ = {'user1': ['Role1']}
roles = root.context1.get_local_roles()
self.assertEqual(roles, (
('user1', ('Role1',)),
))
def test_manage_addLocalRoles(self):
root, user = _makeRootAndUser()
root.context1.manage_addLocalRoles('user1', ['Role1'])
roles = root.context1.get_local_roles_for_userid('user1')
self.assertEqual(roles, ('Role1',))
def test_manage_setLocalRoles(self):
root, user = _makeRootAndUser()
root.context1.__ac_local_roles__ = {'user1': ('Role1',)}
root.context1.manage_setLocalRoles('user1', ['Role2'])
roles = root.context1.get_local_roles_for_userid('user1')
self.assertEqual(roles, ('Role2',))
def test_manage_delLocalRoles(self):
root, user = _makeRootAndUser()
root.context1.__ac_local_roles__ = {'user1': ('Role1',)}
root.context1.manage_delLocalRoles(['user1'])
roles = root.context1.get_local_roles_for_userid('user1')
self.assertEqual(roles, ())
def test_valid_roles(self):
from AccessControl.rolemanager import RoleManager
root, user = _makeRootAndUser()
# default case, __ac_roles__ not overridden
self.assertEqual(set(root.context1.valid_roles()),
set(RoleManager.__ac_roles__))
# forcing our own roles
root.context1.__ac_roles__ = ('Role2', 'Role1')
roles = root.context1.valid_roles()
self.assertEqual(roles, ('Role1', 'Role2'))
def test_validate_roles(self):
from AccessControl.rolemanager import RoleManager
root, user = _makeRootAndUser()
# default case, __ac_roles__ not overridden
self.assertTrue(root.context1.validate_roles(RoleManager.__ac_roles__))
self.assertFalse(root.context1.validate_roles(('Role1', 'Role2')))
# forcing our own roles
root.context1.__ac_roles__ = ('Role2', 'Role1')
validator = root.context1.validate_roles
self.assertFalse(validator(RoleManager.__ac_roles__))
self.assertTrue(validator(('Role1', 'Role2')))
def test_userdefined_roles(self):
root, user = _makeRootAndUser()
# default case, __ac_roles__ not overridden
self.assertEqual(root.context1.userdefined_roles(), ())
# forcing our own roles
root.context1.__ac_roles__ = ('Role2', 'Role1')
self.assertEqual(
('Role2', 'Role1'),
root.context1.userdefined_roles(),
)
|
[
"zope.interface.verify.verifyClass",
"AccessControl.SecurityManagement.noSecurityManager",
"AccessControl.SecurityManagement.newSecurityManager",
"AccessControl.ImplPython.verifyAcquisitionContext",
"AccessControl.SecurityManagement.getSecurityManager"
] |
[((1135, 1154), 'AccessControl.SecurityManagement.noSecurityManager', 'noSecurityManager', ([], {}), '()\n', (1152, 1154), False, 'from AccessControl.SecurityManagement import noSecurityManager\n'), ((1367, 1405), 'zope.interface.verify.verifyClass', 'verifyClass', (['IRoleManager', 'RoleManager'], {}), '(IRoleManager, RoleManager)\n', (1378, 1405), False, 'from zope.interface.verify import verifyClass\n'), ((1723, 1753), 'AccessControl.SecurityManagement.newSecurityManager', 'newSecurityManager', (['None', 'user'], {}), '(None, user)\n', (1741, 1753), False, 'from AccessControl.SecurityManagement import newSecurityManager\n'), ((1894, 1943), 'AccessControl.ImplPython.verifyAcquisitionContext', 'verifyAcquisitionContext', (['user', 'root.context2', '()'], {}), '(user, root.context2, ())\n', (1918, 1943), False, 'from AccessControl.ImplPython import verifyAcquisitionContext\n'), ((1839, 1859), 'AccessControl.SecurityManagement.getSecurityManager', 'getSecurityManager', ([], {}), '()\n', (1857, 1859), False, 'from AccessControl.SecurityManagement import getSecurityManager\n')]
|
from __future__ import print_function
import time
from base import valDict, malicious
from sdk.actions import (
GetBlockHeight,
GetFrozenMap,
)
from sdk.cmd_call import (
KillNode,
)
from sdk.rpc_call import (
node_1,
)
def test_release():
print("Starting release test")
# checking if validators is not frozen
fMap = GetFrozenMap()
assert len(fMap) == 0, 'Frozen validator found'
is_killed = KillNode(node_1)
assert is_killed is True, 'Failed to kill node %s' % node_1
# freezeing height and waiting 4 blocks
height = GetBlockHeight()
check_height = height + 4
print("Waiting height %d to proceed (current: %s)" % (
check_height, height,
))
while check_height >= height:
height = GetBlockHeight()
time.sleep(1)
print("Height %s ready" % check_height)
# checking if validator is frozen
fMap = GetFrozenMap()
assert valDict['1']['address'] in fMap, 'Validator %s not frozen' % malicious
print("Validator Frozen successfully passed!")
if __name__ == "__main__":
test_release()
|
[
"sdk.cmd_call.KillNode",
"time.sleep",
"sdk.actions.GetFrozenMap",
"sdk.actions.GetBlockHeight"
] |
[((349, 363), 'sdk.actions.GetFrozenMap', 'GetFrozenMap', ([], {}), '()\n', (361, 363), False, 'from sdk.actions import GetBlockHeight, GetFrozenMap\n'), ((433, 449), 'sdk.cmd_call.KillNode', 'KillNode', (['node_1'], {}), '(node_1)\n', (441, 449), False, 'from sdk.cmd_call import KillNode\n'), ((572, 588), 'sdk.actions.GetBlockHeight', 'GetBlockHeight', ([], {}), '()\n', (586, 588), False, 'from sdk.actions import GetBlockHeight, GetFrozenMap\n'), ((900, 914), 'sdk.actions.GetFrozenMap', 'GetFrozenMap', ([], {}), '()\n', (912, 914), False, 'from sdk.actions import GetBlockHeight, GetFrozenMap\n'), ((766, 782), 'sdk.actions.GetBlockHeight', 'GetBlockHeight', ([], {}), '()\n', (780, 782), False, 'from sdk.actions import GetBlockHeight, GetFrozenMap\n'), ((791, 804), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (801, 804), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='flask-neomodel',
version='0.1',
description='Flask extension for OGM on neo4j python driver',
author="<NAME>",
author_email='<EMAIL>',
# url='',
license='MIT',
packages=['.'],
# package_data={
# main_package: [
# 'logging.ini',
# 'logging_tests.ini'
# ]
# },
python_requires='>=3.4',
install_requires=[
'flask', # 1.0.2
'neomodel' # 3.2.8
],
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['flask', 'neo4j', 'models', 'neomodel']
)
|
[
"setuptools.setup"
] |
[((55, 674), 'setuptools.setup', 'setup', ([], {'name': '"""flask-neomodel"""', 'version': '"""0.1"""', 'description': '"""Flask extension for OGM on neo4j python driver"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['.']", 'python_requires': '""">=3.4"""', 'install_requires': "['flask', 'neomodel']", 'classifiers': "['Programming Language :: Python', 'Intended Audience :: Developers',\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6']", 'keywords': "['flask', 'neo4j', 'models', 'neomodel']"}), "(name='flask-neomodel', version='0.1', description=\n 'Flask extension for OGM on neo4j python driver', author='<NAME>',\n author_email='<EMAIL>', license='MIT', packages=['.'], python_requires=\n '>=3.4', install_requires=['flask', 'neomodel'], classifiers=[\n 'Programming Language :: Python', 'Intended Audience :: Developers',\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], keywords=['flask', 'neo4j',\n 'models', 'neomodel'])\n", (60, 674), False, 'from setuptools import setup\n')]
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import tensorflow as tf
from adnc.model.utils import layer_norm
@pytest.fixture()
def session():
with tf.Session() as sess:
yield sess
tf.reset_default_graph()
@pytest.fixture()
def np_rng():
seed = np.random.randint(1, 999)
return np.random.RandomState(seed)
def test_layer_norm(session, np_rng):
np_weights = np_rng.normal(0, 1, [64, 128])
weights = tf.constant(np_weights, dtype=tf.float32)
weights_ln = layer_norm(weights, 'test')
session.run(tf.global_variables_initializer())
weights_ln = session.run(weights_ln)
assert weights_ln.shape == (64, 128)
|
[
"adnc.model.utils.layer_norm",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"pytest.fixture",
"tensorflow.Session",
"tensorflow.constant",
"numpy.random.RandomState",
"numpy.random.randint"
] |
[((751, 767), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (765, 767), False, 'import pytest\n'), ((864, 880), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (878, 880), False, 'import pytest\n'), ((837, 861), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (859, 861), True, 'import tensorflow as tf\n'), ((906, 931), 'numpy.random.randint', 'np.random.randint', (['(1)', '(999)'], {}), '(1, 999)\n', (923, 931), True, 'import numpy as np\n'), ((943, 970), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (964, 970), True, 'import numpy as np\n'), ((1073, 1114), 'tensorflow.constant', 'tf.constant', (['np_weights'], {'dtype': 'tf.float32'}), '(np_weights, dtype=tf.float32)\n', (1084, 1114), True, 'import tensorflow as tf\n'), ((1132, 1159), 'adnc.model.utils.layer_norm', 'layer_norm', (['weights', '"""test"""'], {}), "(weights, 'test')\n", (1142, 1159), False, 'from adnc.model.utils import layer_norm\n'), ((792, 804), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (802, 804), True, 'import tensorflow as tf\n'), ((1177, 1210), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1208, 1210), True, 'import tensorflow as tf\n')]
|
from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level
from floodsystem.datafetcher import *
from floodsystem.plot import plot_water_level_with_fit
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.utils import sorted_by_key
from floodsystem.station import MonitoringStation
def run():
stations = build_station_list()
update_water_levels(stations)
severe_old = stations_level_over_threshold(stations,1.5)
high_old = stations_level_over_threshold(stations,1.2)
moderate_old = stations_level_over_threshold(stations,0.9)
low_old = stations_level_over_threshold(stations,0.6)
hs = high_old[len(severe_old):]
ms = moderate_old[len(high_old):]
ls = low_old[len(moderate_old):]
severe_towns = []
severe = []
high_towns = []
high = []
low_town = []
low = []
moderate_town = []
moderate = []
for i in severe_old:
if i[0].town == None:
pass
elif i[0].town in severe_towns:
pass
else:
severe.append(i[0].town)
for j in hs:
if j[0].town == None:
pass
elif j[0].town in severe_towns or high_towns:
pass
else:
high.append(j[0].town)
for k in ms:
if k[0].town == None:
pass
elif k[0].town in severe_towns or high_towns or moderate_town:
pass
else:
moderate.append(k[0].town)
for l in ls:
if l[0].town == None:
pass
elif l[0].town in severe_towns or high_towns or moderate_town or low_town:
pass
else:
low.append(l[0].town)
severe.sort()
high.sort()
moderate.sort()
low.sort()
print("SEVERE RISK")
print("\n")
for a in severe:
if a != None:
print("TOWN NAME: ",a)
print("\n")
print("HIGH RISK")
print("\n")
for b in high:
if b != None:
print("TOWN NAME: ",b)
print("\n")
print("MODERATE RISK")
print("\n")
for c in moderate:
if c != None:
print("TOWN NAME: ",c)
print("\n")
print("LOW RISK")
print("\n")
for d in low:
if d != None:
print("TOWN NAME: ",d)
print("\n")
if __name__ == "__main__":
print("*** Task 2G: CUED Part IA Flood Warning System ***")
run()
|
[
"floodsystem.stationdata.build_station_list",
"floodsystem.flood.stations_level_over_threshold",
"floodsystem.stationdata.update_water_levels"
] |
[((378, 398), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (396, 398), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((403, 432), 'floodsystem.stationdata.update_water_levels', 'update_water_levels', (['stations'], {}), '(stations)\n', (422, 432), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((450, 494), 'floodsystem.flood.stations_level_over_threshold', 'stations_level_over_threshold', (['stations', '(1.5)'], {}), '(stations, 1.5)\n', (479, 494), False, 'from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level\n'), ((509, 553), 'floodsystem.flood.stations_level_over_threshold', 'stations_level_over_threshold', (['stations', '(1.2)'], {}), '(stations, 1.2)\n', (538, 553), False, 'from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level\n'), ((572, 616), 'floodsystem.flood.stations_level_over_threshold', 'stations_level_over_threshold', (['stations', '(0.9)'], {}), '(stations, 0.9)\n', (601, 616), False, 'from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level\n'), ((630, 674), 'floodsystem.flood.stations_level_over_threshold', 'stations_level_over_threshold', (['stations', '(0.6)'], {}), '(stations, 0.6)\n', (659, 674), False, 'from floodsystem.flood import stations_level_over_threshold, stations_highest_rel_level\n')]
|
import socket
from pyadept.strutil import split_data
def create_server_socket(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_pair = (host, port)
s.bind(socket_pair)
return s
def start_server(srv_socket, on_accept, on_exit=None, max_conn=10):
srv_socket.listen(max_conn)
while True:
try:
conn, addr = srv_socket.accept()
on_accept(conn, addr)
except KeyboardInterrupt:
print('\nStopping the server due to keyborard interrupt')
if on_exit is not None:
on_exit()
break
def create_client_socket():
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def create_connected_client_socket(remote_host, remote_port):
s = create_client_socket()
dest_pair = (remote_host, remote_port)
s.connect(dest_pair)
return s
def socket_send_string(socket, msg):
data = msg.encode()
socket.sendall(data)
def socket_send_bytes(socket, buff):
socket.sendall(buff)
def read_messages(socket, delimiter=b'\r\n', buffer_size=2048, prefix=b''):
'''
Reads data from socket, where sequences of bytes separated by the delimiter
constitute separate messages. Returns a tuple (messages, rest), where
messages correspond to the a list of messages and rest corresponds to
the remaining byte string. Possible return compinations are the following:
(messages, rest) -- one or more complete messages are received + the rest of bytes
(messages, None) -- a complete set of messages is received
(None, rest) -- a single sequence of bytes is received (without delimiter)
(None, None) -- peer has closed its socket
:param socket: a TCP socket object
:param delimiter: a bytes object used as a delimiter between messages
:param buffer_size: size of the buffer used for reading from the socket
:return: a tuple of messages and rest of bytes
'''
data = prefix + socket.recv(buffer_size)
return split_data(data, delimiter)
def read_complete_messages(socket, delimiter=b'\n', buffer_size=2048):
'''
Reads data from socket, where sequences of bytes separated by the delimiter
constitute separate messages. Continue reading from the socket until a set
of complete messages is obtained (the correspinding list of bytes object is returned)
or the peer has closed its socket (the function returns None)
:param socket: a TCP socket object
:param delimiter: a bytes object used as a delimiter between messages
:param buffer_size: size of the buffer used for reading from the socket
:return: a list of messages or None
'''
def perform_read(data=b''):
messages, rest = read_messages(socket, delimiter, buffer_size, prefix=data)
if messages is None and rest is None:
return None
if messages is None:
return perform_read(rest)
if rest is None:
return messages
return messages + perform_read(rest)
return perform_read()
|
[
"pyadept.strutil.split_data",
"socket.socket",
"socket.recv",
"socket.sendall"
] |
[((103, 152), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (116, 152), False, 'import socket\n'), ((721, 770), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (734, 770), False, 'import socket\n'), ((1018, 1038), 'socket.sendall', 'socket.sendall', (['data'], {}), '(data)\n', (1032, 1038), False, 'import socket\n'), ((1082, 1102), 'socket.sendall', 'socket.sendall', (['buff'], {}), '(buff)\n', (1096, 1102), False, 'import socket\n'), ((2081, 2108), 'pyadept.strutil.split_data', 'split_data', (['data', 'delimiter'], {}), '(data, delimiter)\n', (2091, 2108), False, 'from pyadept.strutil import split_data\n'), ((2044, 2068), 'socket.recv', 'socket.recv', (['buffer_size'], {}), '(buffer_size)\n', (2055, 2068), False, 'import socket\n')]
|
# -*- coding: utf-8 -*-
import time
import threading
import logging
from uuid import uuid1
from hashlib import md5
from django.core.cache import cache
from django.db import IntegrityError
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
class MiddlewareMixin(object):
pass
from user_agents import parse as parse_ua
import statsd
from . import models
from . import conf
logger = logging.getLogger('metrics')
request_id_keys = (
'HTTP_ACCEPT_CHARSET',
'HTTP_ACCEPT',
'HTTP_ACCEPT_ENCODING',
'HTTP_ACCEPT_LANGUAGE',
'HTTP_CONNECTION',
'HTTP_USER_AGENT',
'REMOTE_ADDR',
)
def id_request(request):
"""Generate a uniquish ID for a given request.
"""
key = '|'.join([
request.META.get(k, '')
for k in request_id_keys
])
return md5(uuid1().get_hex() + key).hexdigest()
class LocalStatsd(threading.local):
def __init__(self):
client = self.client = statsd.StatsClient(
host = conf.HOST,
port = conf.PORT,
prefix = conf.PREFIX,
)
try:
self.pipeline = client.pipeline()
except AttributeError:
# In case we're using an older statsd version.
self.pipeline = client
class MetricsMiddleware(MiddlewareMixin):
"""Middleware to capture basic metrics about a request.
Includes:
- Performance timing
- Last-seen data for authenticated users.
"""
scope = LocalStatsd()
def process_request(self, request):
request.statsd = self.scope.pipeline
request.zesty = self.scope
try:
if conf.TIME_RESPONSES:
self.start_timing(request)
except:
logger.exception('Exception occurred while logging to statsd.')
def process_exception(self, request, exception):
try:
if hasattr(self.scope, 'client'):
self.scope.pipeline.incr('view.exceptions')
view_name = (getattr(self.scope, 'view_name', 'UNKNOWN') +
'.exceptions')
self.scope.pipeline.incr(view_name)
except:
logger.exception('Exception occurred while logging to statsd.')
def process_view(self, request, view_func, view_args, view_kwargs):
if conf.TIME_RESPONSES:
self.gather_view_data(request, view_func)
def process_response(self, request, response):
if conf.TRACK_USER_ACTIVITY:
self.update_last_seen_data(request)
if conf.TIME_RESPONSES:
try:
self.stop_timing(request)
except:
logger.exception('Exception occurred while logging to statsd.')
return response
def start_timing(self, request):
"""Start performance timing.
"""
self.scope.request_start = time.time()
def gather_view_data(self, request, view_func):
"""Discover the view name.
"""
# View name is defined as module.view
# (e.g. django.contrib.auth.views.login)
name = view_func.__module__
# CBV specific
if hasattr(view_func, '__name__'):
name = '%s.%s' % (name, view_func.__name__)
elif hasattr(view_func, '__class__'):
name = '%s.%s' % (name, view_func.__class__.__name__)
method = request.method.lower()
if request.is_ajax():
method += '_ajax'
name = '%s.%s' % (name, method)
self.scope.id = id_request(request)
self.scope.agent = parse_ua(request.META.get('HTTP_USER_AGENT', ''))
self.scope.view_name = "view." + name
def stop_timing(self, request):
"""Stop performance timing.
"""
now = time.time()
started = getattr(self.scope, 'request_start', now)
time_elapsed = now - started
if hasattr(self.scope, 'client'):
client = self.scope.pipeline
view_name = getattr(self.scope, 'view_name', 'UNKNOWN')
if time_elapsed:
client.timing(
view_name,
time_elapsed,
conf.TIMING_SAMPLE_RATE)
client.timing(
'view.aggregate-response-time',
time_elapsed,
conf.TIMING_SAMPLE_RATE)
client.incr(view_name + '.requests')
client.incr('view.requests')
logger.info("Processed %s.%s in %ss", conf.PREFIX, view_name, time_elapsed)
try:
client.send()
except AttributeError:
# Client isn't a pipeline, data already sent.
pass
except IndexError:
# Nothing to send.
pass
logger.debug("Sent stats to %s:%s", conf.HOST, conf.PORT)
agent = getattr(self.scope, "agent", None)
rid = getattr(self.scope, "rid", None)
if agent and rid:
data = {
'started': started,
'server_time': time_elapsed,
'agent': agent,
'view_name': view_name,
}
cache.set('request:' + rid, data, 5 * 60)
# Other visit data
def update_last_seen_data(self, request):
"""Update the user's LastSeenData profile.
"""
try:
user = request.user
except AttributeError:
# No user, so nothing to do here.
return
if user.is_authenticated():
try:
data = models.LastSeenData.objects.get(user=user)
except models.LastSeenData.DoesNotExist:
data = models.LastSeenData(user=user)
try:
data.update(request)
except IntegrityError:
# User probably got created in a concurrent request?
pass
except:
logger.exception("Couldn't update user LastSeenData:")
|
[
"statsd.StatsClient",
"django.core.cache.cache.set",
"time.time",
"uuid.uuid1",
"logging.getLogger"
] |
[((427, 455), 'logging.getLogger', 'logging.getLogger', (['"""metrics"""'], {}), "('metrics')\n", (444, 455), False, 'import logging\n'), ((971, 1041), 'statsd.StatsClient', 'statsd.StatsClient', ([], {'host': 'conf.HOST', 'port': 'conf.PORT', 'prefix': 'conf.PREFIX'}), '(host=conf.HOST, port=conf.PORT, prefix=conf.PREFIX)\n', (989, 1041), False, 'import statsd\n'), ((2879, 2890), 'time.time', 'time.time', ([], {}), '()\n', (2888, 2890), False, 'import time\n'), ((3765, 3776), 'time.time', 'time.time', ([], {}), '()\n', (3774, 3776), False, 'import time\n'), ((5221, 5262), 'django.core.cache.cache.set', 'cache.set', (["('request:' + rid)", 'data', '(5 * 60)'], {}), "('request:' + rid, data, 5 * 60)\n", (5230, 5262), False, 'from django.core.cache import cache\n'), ((841, 848), 'uuid.uuid1', 'uuid1', ([], {}), '()\n', (846, 848), False, 'from uuid import uuid1\n')]
|
#!/usr/bin/python
# Refactor by <NAME>
# This is refactor script from https://github.com/aruba/aruba-ansible-modules/blob/master/aruba_module_installer/aruba_module_installer.py
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from subprocess import check_output
from shutil import copytree, copyfile, rmtree
from os.path import dirname, realpath, exists, isdir
from os import remove
from sys import exit
from re import search
import errno
COLORRED = "\033[0;31m{0}\033[00m"
SW_PATHS = {'module': 'modules/network/comware'}
CMD = 'ansible --version'
SRC_PATH = dirname(realpath(__file__))+'/library/'
ANS_PATH = ''
def define_arguments():
description = ('This tool installs all files/directories required by '
'Comware7,'
'\n\n'
'Requirements:'
'\n\t- Linux OS only'
'\n\t- Ansible release version 2.5 or later installed'
'\n\t- Python 2.7 or 3.5+ installed'
)
epilog = ('Directories added:'
'\n\t- <ansible_module_path>/modules/network/comware'
)
parser = ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter,
epilog=epilog)
parser.add_argument('-r', '--remove', required=False,
help=('remove all files & directories installed '
'by this script.'),
action='store_true')
parser.add_argument('--reinstall', required=False,
help=('remove all files & directories installed '
'by this script. Then re-install.'),
action='store_true')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--switch', required=False,
help=('only install files/directories required for '
'Comware.'
),
action='store_true')
return parser.parse_args()
def find_module_path():
global CMD, COLORRED
output = check_output(CMD, shell=True).strip()
re_path = search(r"ansible python module location = (?P<path>\S+)",
output.decode('utf-8'))
re_version = search(r"ansible\s(?P<version>\d\S+\d)", output.decode('utf-8'))
if re_path and re_version:
re_version = re_version.groupdict()['version']
re_path = re_path.groupdict()['path']
# Validate Ansible version is supported
if '2.5' <= re_version <= '2.9.9':
return re_path+'/'
else:
exit(COLORRED.format('There was an issue with your '
'ansible version: {}\n'
'The Aruba Modules support Ansible release '
'versions 2.5 or later.').format(re_version))
else:
exit(COLORRED.format('There was an issue finding your '
'ansible version.\n'
'Please run \'ansible --version\' from bash'
', resolve any errors, and verify version'
' is release version 2.5 or later.'))
def install_sw_modules():
global SW_PATHS, SRC_PATH, COLORRED, ANS_PATH
# Copy each directory and file to ansible module location
for source, path in SW_PATHS.items():
# If directories or files exist already, do nothing
if exists(ANS_PATH+path):
print(COLORRED.format('{} already exists'
' at {}...\n'.format(path, ANS_PATH+path)))
else:
print('Copying {} to {}...\n'.format(path, ANS_PATH+path))
if isdir(SRC_PATH+path):
copytree(SRC_PATH+path, ANS_PATH+path)
else:
copyfile(SRC_PATH+path, ANS_PATH+path)
if __name__ == "__main__":
args = define_arguments()
try:
ANS_PATH = find_module_path()
if args.remove:
remove_modules()
elif args.reinstall:
remove_modules()
install_sw_modules()
install_wlan_modules()
elif args.switch:
install_sw_modules()
else:
install_sw_modules()
except (OSError, IOError) as e:
if (e[0] == errno.EACCES):
print(e)
if args.remove:
exit(COLORRED.format("You need root permissions to execute "
"this script against "
"these files/directories.\n\n"
"re-run the installer using\n"
"sudo python"
"comware install module -r"))
else:
exit(COLORRED.format("You need root permissions to execute "
"this script against "
"these files/directories.\n\n"
"re-run the installer using\n"
"sudo python "
"comware installer.py"))
else:
raise e
|
[
"argparse.ArgumentParser",
"os.path.isdir",
"subprocess.check_output",
"os.path.realpath",
"os.path.exists",
"shutil.copyfile",
"shutil.copytree"
] |
[((1158, 1262), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'description', 'formatter_class': 'RawDescriptionHelpFormatter', 'epilog': 'epilog'}), '(description=description, formatter_class=\n RawDescriptionHelpFormatter, epilog=epilog)\n', (1172, 1262), False, 'from argparse import ArgumentParser, RawDescriptionHelpFormatter\n'), ((593, 611), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (601, 611), False, 'from os.path import dirname, realpath, exists, isdir\n'), ((3567, 3590), 'os.path.exists', 'exists', (['(ANS_PATH + path)'], {}), '(ANS_PATH + path)\n', (3573, 3590), False, 'from os.path import dirname, realpath, exists, isdir\n'), ((2186, 2215), 'subprocess.check_output', 'check_output', (['CMD'], {'shell': '(True)'}), '(CMD, shell=True)\n', (2198, 2215), False, 'from subprocess import check_output\n'), ((3822, 3844), 'os.path.isdir', 'isdir', (['(SRC_PATH + path)'], {}), '(SRC_PATH + path)\n', (3827, 3844), False, 'from os.path import dirname, realpath, exists, isdir\n'), ((3860, 3902), 'shutil.copytree', 'copytree', (['(SRC_PATH + path)', '(ANS_PATH + path)'], {}), '(SRC_PATH + path, ANS_PATH + path)\n', (3868, 3902), False, 'from shutil import copytree, copyfile, rmtree\n'), ((3933, 3975), 'shutil.copyfile', 'copyfile', (['(SRC_PATH + path)', '(ANS_PATH + path)'], {}), '(SRC_PATH + path, ANS_PATH + path)\n', (3941, 3975), False, 'from shutil import copytree, copyfile, rmtree\n')]
|