id stringlengths 3 8 | content stringlengths 100 981k |
|---|---|
11505069 | import torch
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import OneCycleLR, ReduceLROnPlateau
from yacs.config import CfgNode
from typing import Union
def build_optimizer(model: torch.nn.Module, opti_cfg: CfgNode) -> Optimizer:
"""
simple optimizer builder
:param model: already gpu pushed model
:param opti_cfg: config node
:return: the optimizer
"""
parameters = model.parameters()
opti_type = opti_cfg.NAME
lr = opti_cfg.BASE_LR
if opti_type == 'adam':
optimizer = torch.optim.Adam(parameters, lr=lr)
elif opti_type == 'sgd':
sgd_cfg = opti_cfg.SGD
momentum = sgd_cfg.MOMENTUM
nesterov = sgd_cfg.NESTEROV
optimizer = torch.optim.SGD(parameters, lr=lr, momentum=momentum, nesterov=nesterov)
else:
raise Exception('invalid optimizer, available choices adam/sgd')
return optimizer
def build_scheduler(optimizer: Optimizer, scheduler_cfg: CfgNode):
"""
:param optimizer:
:param optimizer: Optimizer
:param scheduler_cfg:
"param solver_cfg: CfgNode
:return:
"""
scheduler_type = scheduler_cfg.NAME
if scheduler_type == 'unchange':
return None
elif scheduler_type == 'multi_steps':
gamma = scheduler_cfg.LR_REDUCE_GAMMA
milestones = scheduler_cfg.MULTI_STEPS_LR_MILESTONES
scheduler = MultiStepLR(optimizer, milestones, gamma=gamma, last_epoch=-1)
return scheduler
elif scheduler_type == 'reduce_on_plateau':
gamma = scheduler_cfg.LR_REDUCE_GAMMA
scheduler = ReduceLROnPlateau(optimizer, patience=5, factor=gamma)
return scheduler
elif scheduler_type == 'OneCycleLR':
scheduler = OneCycleLR(optimizer,
max_lr=scheduler_cfg.MAX_LR,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
pct_start=scheduler_cfg.PCT_START,
anneal_strategy=scheduler_cfg.ANNEAL_STRATEGY,
div_factor=scheduler_cfg.DIV_FACTOR,
cycle_momentum=True)
return scheduler
else:
raise Exception('scheduler name invalid, choices are unchange/multi_steps/reduce_on_plateau/OneCycleLR')
|
11505116 | from typing import NoReturn, Union, Optional
from pandas import DataFrame
try:
import fbprophet as fbp
except ModuleNotFoundError:
raise ModuleNotFoundError("Could not find fbprophet. Install with: pip install fbprophet")
from timeatlas.abstract import AbstractBaseModel
from timeatlas.config.constants import (
COMPONENT_VALUES,
COMPONENT_META_CI_UPPER,
COMPONENT_META_CI_LOWER,
MODEL_TYPE_UNIVARIATE,
MODEL_TYPE_MULTIVARIATE
)
from timeatlas.time_series import TimeSeries
from timeatlas.time_series_dataset import TimeSeriesDataset
class Prophet(AbstractBaseModel):
def __init__(self):
super().__init__()
self.model = fbp.Prophet()
self.y = None
def fit(self, ts: Union[TimeSeries, TimeSeriesDataset],
y: Optional[int] = None) -> NoReturn:
"""
Fit a Prophet model to a time series. If given a TimeSeriesDataset, the
optional argument y must be given to indicate on which component the
model should be fitted to.
Args:
ts: TimeSeries or TimeSeriesDataset to fit
y: Optional int of the component index in a TimeSeriesDataset
Returns:
NoReturn
"""
super().fit(ts)
# Prepare the model
if isinstance(ts, TimeSeries):
self.type = MODEL_TYPE_UNIVARIATE
df = self.__prepare_time_series_for_prophet(self.X_train)
elif isinstance(ts, TimeSeriesDataset):
assert y is not None, "For multivariate prediction, the y " \
"argument must be given."
self.type = MODEL_TYPE_MULTIVARIATE
self.y = y
df = self.__prepare_time_series_dataset_for_prophet(
self.X_train,
self.y
)
# Add all components except y as extra regressor
regressors = df.columns.to_list()
regressors.remove("y")
regressors.remove("ds")
for r in regressors:
self.model.add_regressor(r)
else:
ValueError('The fit method accepts only TimeSeries or '
'TimeSeriesDataset as argument')
self.model.fit(df)
def predict(self, horizon: Union[str, TimeSeries, TimeSeriesDataset],
freq: str = None) \
-> TimeSeries:
super().predict(horizon)
# Prepare the data
if self.type == MODEL_TYPE_UNIVARIATE:
if isinstance(horizon, str):
future = self.make_future_dataframe(horizon, freq)
metadata = None
elif isinstance(horizon, TimeSeries):
future = self.__prepare_time_series_for_prophet(horizon.empty())
metadata = horizon.metadata
elif self.type == MODEL_TYPE_MULTIVARIATE:
if isinstance(horizon, TimeSeriesDataset):
horizon[:, self.y] = horizon[:, self.y].empty()
future = self.__prepare_time_series_dataset_for_prophet(
horizon, self.y)
metadata = horizon[:, self.y].data[self.y].metadata
else:
ValueError("horizon argument type isn't recognized")
# Predict
forecast = self.model.predict(future)
forecast.rename(columns={"yhat": COMPONENT_VALUES,
"yhat_lower": COMPONENT_META_CI_LOWER,
"yhat_upper": COMPONENT_META_CI_UPPER},
inplace=True)
df = forecast[[COMPONENT_VALUES,
COMPONENT_META_CI_LOWER,
COMPONENT_META_CI_UPPER]]
df.index = forecast["ds"]
# Register the prediction plot
ts = TimeSeries(df, metadata)
return ts
@staticmethod
def __prepare_time_series_for_prophet(ts: TimeSeries):
df = ts.to_df().copy()
df["ds"] = df.index
df = df.reset_index(drop=True)
df = df.rename(columns={"values": "y"})
df.columns = df.columns.astype(str)
return df
@staticmethod
def __prepare_time_series_dataset_for_prophet(tsd: TimeSeriesDataset,
y: int):
df = tsd.to_df().copy()
df["ds"] = df.index
df = df.reset_index(drop=True)
df = df.rename(columns={y: "y"})
df.columns = df.columns.astype(str)
return df
def make_future_dataframe(self, horizon: str, freq: str = None):
index = self.make_future_index(horizon, freq)
df = DataFrame(data=index.to_series(), columns=["ds"])
df = df.reset_index(drop=True)
return df
|
11505118 | import unittest
import gtirb
from gtirb_helpers import add_code_block, add_text_section, create_test_module
from pprinter_helpers import (
PPrinterTest,
can_mock_binaries,
interesting_lines,
run_binary_pprinter_mock,
run_asm_pprinter,
asm_lines,
)
@unittest.skipUnless(can_mock_binaries(), "cannot mock binaries")
class WindowsBinaryPrinterTests(PPrinterTest):
def test_windows_subsystem_gui(self):
# This tests the changes in MR 346.
ir, m = create_test_module(
file_format=gtirb.Module.FileFormat.PE,
isa=gtirb.Module.ISA.X64,
binary_type=["EXEC", "EXE", "WINDOWS_GUI"],
)
_, bi = add_text_section(m)
block = add_code_block(bi, b"\xC3")
m.entry_point = block
tools = list(run_binary_pprinter_mock(ir))
self.assertEqual(len(tools), 1)
self.assertEqual(tools[0].name, "ml64.exe")
self.assertIn("/SUBSYSTEM:windows", tools[0].args)
def test_windows_subsystem_console(self):
# This tests the changes in MR 346.
ir, m = create_test_module(
file_format=gtirb.Module.FileFormat.PE,
isa=gtirb.Module.ISA.X64,
binary_type=["EXEC", "EXE", "WINDOWS_CUI"],
)
_, bi = add_text_section(m)
block = add_code_block(bi, b"\xC3")
m.entry_point = block
tools = list(run_binary_pprinter_mock(ir))
self.assertEqual(len(tools), 1)
self.assertEqual(tools[0].name, "ml64.exe")
self.assertIn("/SUBSYSTEM:console", tools[0].args)
def test_windows_dll(self):
ir, m = create_test_module(
file_format=gtirb.Module.FileFormat.PE,
isa=gtirb.Module.ISA.X64,
binary_type=["EXEC", "DLL", "WINDOWS_CUI"],
)
_, bi = add_text_section(m)
add_code_block(bi, b"\xC3")
tools = list(run_binary_pprinter_mock(ir))
self.assertEqual(len(tools), 1)
self.assertEqual(tools[0].name, "ml64.exe")
self.assertIn("/DLL", tools[0].args)
def test_windows_defs(self):
ir, m = create_test_module(
file_format=gtirb.Module.FileFormat.PE,
isa=gtirb.Module.ISA.X64,
binary_type=["EXEC", "EXE", "WINDOWS_CUI"],
)
m.aux_data["peImportEntries"].data.append(
(0, -1, "GetMessageW", "USER32.DLL")
)
for tool in run_binary_pprinter_mock(ir):
if tool.name == "lib.exe":
def_arg = next(
(arg for arg in tool.args if arg.startswith("/DEF:")), None
)
self.assertIsNotNone(def_arg, "no /DEF in lib invocation")
self.assertIn("/MACHINE:X64", tool.args)
with open(def_arg[5:], "r") as f:
lines = interesting_lines(f.read())
self.assertEqual(
lines,
['LIBRARY "USER32.DLL"', "EXPORTS", "GetMessageW"],
)
break
else:
self.fail("did not see a lib.exe execution")
class WindowsBinaryPrinterTests_NoMock(PPrinterTest):
def test_windows_includelib(self):
ir, m = create_test_module(
file_format=gtirb.Module.FileFormat.PE,
isa=gtirb.Module.ISA.X64,
binary_type=["EXEC", "EXE", "WINDOWS_CUI"],
)
_, bi = add_text_section(m)
m.aux_data["libraries"].data.append(("WINSPOOL.DRV"))
m.aux_data["libraries"].data.append(("USER32.DLL"))
asm = run_asm_pprinter(ir)
self.assertContains(asm_lines(asm), ["INCLUDELIB WINSPOOL.lib"])
self.assertContains(asm_lines(asm), ["INCLUDELIB USER32.lib"])
self.assertNotContains(asm_lines(asm), ["INCLUDELIB WINSPOOL.DRV"])
self.assertNotContains(asm_lines(asm), ["INCLUDELIB USER32.DLL"])
|
11505147 | import pytest
import numpy as np
from emukit.multi_fidelity.convert_lists_to_array import \
convert_x_list_to_array, convert_y_list_to_array, convert_xy_lists_to_arrays
def test_convert_x_list_to_array():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
x_array = convert_x_list_to_array(x_list)
expected_output = np.array([[1, 0, 0], [2, 1, 0], [3, 2, 1], [4, 5, 1]])
assert np.array_equal(x_array, expected_output)
def test_convert_y_list_to_array():
y_list = [np.array([[0.0], [1.0]]), np.array([[2.0], [5.0]])]
y_array = convert_y_list_to_array(y_list)
expected_output = np.array([[0.], [1.0], [2.], [5.]])
assert np.array_equal(y_array, expected_output)
def test_convert_xy_lists_to_arrays():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([[0.0], [1.0]]), np.array([[2.0], [5.0]])]
x_array, y_array = convert_xy_lists_to_arrays(x_list, y_list)
expected_y = np.array([[0.], [1.0], [2.], [5.]])
expected_x = np.array([[1, 0, 0], [2, 1, 0], [3, 2, 1], [4, 5, 1]])
assert np.array_equal(y_array, expected_y)
assert np.array_equal(x_array, expected_x)
def test_convert_y_list_to_array_fails_with_1d_input():
y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_y_list_to_array(y_list)
def test_convert_x_list_to_array_fails_with_1d_input():
x_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_x_list_to_array(x_list)
def test_convert_xy_lists_to_arrays_fails_with_different_number_of_fidelities():
x_list = [np.array([[1, 0], [2, 1]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0]), np.array([3, 6])]
with pytest.raises(ValueError):
convert_xy_lists_to_arrays(x_list, y_list)
def test_convert_xy_lists_to_arrays_fails_with_different_number_of_points_at_fidelity():
x_list = [np.array([[1, 0], [2, 1], [3, 4]]), np.array([[3, 2], [4, 5]])]
y_list = [np.array([0.0, 1.0]), np.array([2.0, 5.0])]
with pytest.raises(ValueError):
convert_xy_lists_to_arrays(x_list, y_list)
|
11505154 | from .classic_league import ClassicLeague
from .fixture import Fixture
from .gameweek import Gameweek
from .h2h_league import H2HLeague
from .player import Player
from .team import Team
from .user import User
__all__ = ("ClassicLeague", "Fixture", "Gameweek", "H2HLeague", "Player",
"Team", "User")
|
11505163 | import unittest
import bottle
from tools import api
class TestRoute(unittest.TestCase):
@api('0.12')
def test_callback_inspection(self):
def x(a, b): pass
def d(f):
def w():
return f()
return w
route = bottle.Route(None, None, None, d(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
def d2(foo):
def d(f):
def w():
return f()
return w
return d
route = bottle.Route(None, None, None, d2('foo')(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
|
11505187 | import torch
import copy
import torch.nn as nn
from torch.autograd import Variable
import utils.io as io
import utils.pytorch_layers as pytorch_layers
from exp.hoi_classifier.models.verb_given_object_appearance import \
VerbGivenObjectAppearanceConstants, VerbGivenObjectAppearance
from exp.hoi_classifier.models.verb_given_human_appearance import \
VerbGivenHumanAppearanceConstants, VerbGivenHumanAppearance
from exp.hoi_classifier.models.verb_given_boxes_and_object_label import \
VerbGivenBoxesAndObjectLabelConstants, VerbGivenBoxesAndObjectLabel
from exp.hoi_classifier.models.verb_given_human_pose import \
VerbGivenHumanPoseConstants, VerbGivenHumanPose
from exp.hoi_classifier.models.scatter_verbs_to_hois import \
ScatterVerbsToHoisConstants, ScatterVerbsToHois
import pdb
class MTL(nn.Module):
def __init__(self):
super(MTL,self).__init__()
input_size = 512
num_cluster = 32#45+1
self.classifier = nn.Linear(input_size,117*num_cluster)# 117 or 600------------------(1) For MoE
#self.classifier = nn.Linear(input_size,(117-num_cluster+2)*num_cluster)# 117 or 600-----(2) For Anchor
self.embedding = nn.Linear(input_size,num_cluster)
def forward(self,feats):
output2 = nn.functional.softmax(self.embedding(feats))
output1 = self.classifier(feats)# B*(117*G)
return output1,output2
class ScatterClusterToHois(nn.Module):
def __init__(self,json_file):
super(ScatterClusterToHois,self).__init__()
self.gid2verb = io.load_json_object(json_file)
def forward(self,group_scores):
verb_scores = group_scores[:,self.gid2verb]
return verb_scores
class HoiClassifierConstants(io.JsonSerializableClass):
FACTOR_NAME_TO_MODULE_CONSTANTS = {
'verb_given_object_app': VerbGivenObjectAppearanceConstants(),
'verb_given_human_app': VerbGivenHumanAppearanceConstants(),
'verb_given_boxes_and_object_label': VerbGivenBoxesAndObjectLabelConstants(),
'verb_given_human_pose': VerbGivenHumanPoseConstants(),
}
def __init__(self):
super(HoiClassifierConstants,self).__init__()
self.verb_given_appearance = True
self.verb_given_human_appearance = True
self.verb_given_object_appearance = True
self.verb_given_boxes_and_object_label = True
self.verb_given_human_pose = True
self.rcnn_det_prob = True
self.use_object_label = True
self.use_log_feat = True
self.scatter_verbs_to_hois = ScatterVerbsToHoisConstants()
@property
def selected_factor_constants(self):
factor_constants = {}
for factor_name in self.selected_factor_names:
const = self.FACTOR_NAME_TO_MODULE_CONSTANTS[factor_name]
factor_constants[factor_name] = const
return factor_constants
@property
def selected_factor_names(self):
factor_names = []
if self.verb_given_appearance:
factor_names.append('verb_given_object_app')
factor_names.append('verb_given_human_app')
elif self.verb_given_human_appearance:
factor_names.append('verb_given_human_app')
elif self.verb_given_object_appearance:
factor_names.append('verb_given_object_app')
if self.verb_given_boxes_and_object_label:
factor_names.append('verb_given_boxes_and_object_label')
if self.verb_given_human_pose:
factor_names.append('verb_given_human_pose')
return factor_names
class HoiClassifier(nn.Module,io.WritableToFile):
FACTOR_NAME_TO_MODULE = {
'verb_given_object_app': VerbGivenObjectAppearance,
'verb_given_human_app': VerbGivenHumanAppearance,
'verb_given_boxes_and_object_label': VerbGivenBoxesAndObjectLabel,
'verb_given_human_pose': VerbGivenHumanPose,
}
def __init__(self,const):
super(HoiClassifier,self).__init__()
self.const = copy.deepcopy(const)
self.USE_cluster = True
self.FC = MTL()
self.sigmoid = pytorch_layers.get_activation('Sigmoid')
self.scatter_verbs_to_hois = ScatterVerbsToHois(
self.const.scatter_verbs_to_hois)
for name, const in self.const.selected_factor_constants.items():
self.create_factor(name,const)
def create_factor(self,factor_name,factor_const):
if factor_name in ['verb_given_boxes_and_object_label','verb_given_human_pose']:
factor_const.use_object_label = self.const.use_object_label
if factor_name in ['verb_given_boxes_and_object_label']:
factor_const.use_log_feat = self.const.use_log_feat
factor = self.FACTOR_NAME_TO_MODULE[factor_name](factor_const)
setattr(self,factor_name,factor)
def forward(self,feats):
factor_scores = {}
embedding = {}
any_verb_factor = False
verb_factor_scores = 0
for factor_name in self.const.selected_factor_names:
module = getattr(self,factor_name)
factor_scores[factor_name] = module(feats)
if 'verb_given' in factor_name:
any_verb_factor = True
verb_factor_scores += factor_scores[factor_name]
verb_factor_scores = verb_factor_scores/len(self.const.selected_factor_names)
verb_factor_scores,embedding = self.FC(verb_factor_scores)
if any_verb_factor:
verb_prob = self.sigmoid(verb_factor_scores)
if self.USE_cluster:
NUM_of_CLUSTER = 32#45+1
#self.scatter_cluster_to_hois = ScatterClusterToHois(f'anchor_only/gid2cid{NUM_of_CLUSTER-1}.json')#-------(*) only for anchors!!!
cluster_weight= embedding
cluster_weight = cluster_weight.unsqueeze(1)# B,1,G
#verb_prob = verb_prob.reshape(-1,NUM_of_CLUSTER,117-NUM_of_CLUSTER+2)# B,G,117#-(1) for anchors
verb_prob = verb_prob.reshape(-1,NUM_of_CLUSTER,117)# B,G,117#------------------(2) for MoE
verb_prob = torch.bmm(cluster_weight,verb_prob).squeeze()
#verb_prob = self.scatter_cluster_to_hois(torch.cat((embedding[:,1:-1],verb_prob),1))#-------(*) only for anchors!!!
assert(verb_prob.shape[1]==117)
verb_prob_vec = self.scatter_verbs_to_hois(verb_prob)
else:
verb_prob_vec = 0*feats['human_prob_vec'] + 1
if self.const.rcnn_det_prob:
human_prob_vec = feats['human_prob_vec']
object_prob_vec = feats['object_prob_vec']
else:
human_prob_vec = 0*feats['human_prob_vec'] + 1
object_prob_vec = 0*feats['object_prob_vec'] + 1
prob_vec = {
'human': human_prob_vec,
'object': object_prob_vec,
'verb': verb_prob_vec,
}
prob_vec['hoi'] = \
prob_vec['human'] * \
prob_vec['object'] * \
prob_vec['verb']
return prob_vec, factor_scores,embedding
|
11505190 | import functools
import warnings
"""
https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
by user2357112 supports Monica
"""
def deprecated_alias(**aliases):
"""
Deprecate a kwarg in favor of another kwarg
"""
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def rename_kwargs(func_name, kwargs, aliases):
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise TypeError('{} received both {} and {}'.format(
func_name, alias, new))
warnings.warn('{} is deprecated; use {}'.format(alias, new),
DeprecationWarning)
kwargs[new] = kwargs.pop(alias)
|
11505202 | from typing import (
Any,
Callable,
Iterable,
NewType,
Tuple,
)
from dataclasses import dataclass
# Elements: name, out_kind, data
#
# out_kind is the type of data:
# - "data" for generic
# - "ssz" for SSZ encoded bytes
# - "meta" for generic data to collect into a meta data dict.
TestCasePart = NewType("TestCasePart", Tuple[str, str, Any])
@dataclass
class TestCase(object):
fork_name: str
preset_name: str
runner_name: str
handler_name: str
suite_name: str
case_name: str
case_fn: Callable[[], Iterable[TestCasePart]]
@dataclass
class TestProvider(object):
# Prepares the context for the provider as a whole, as opposed to per-test-case changes.
prepare: Callable[[], None]
# Retrieves an iterable of cases, called after prepare()
make_cases: Callable[[], Iterable[TestCase]]
|
11505263 | from django.conf import settings
from django.views.generic.base import TemplateView
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from apps.search_head_api.models import Site, CaptureNode, SearchInfo
from apps.stats_api.views import GroupsByStat
from apps.stats_api.models import Stats, StatsInterface
from braces.views import LoginRequiredMixin
import datetime
import pytz
__author__ = 'pflarr'
class UserView(LoginRequiredMixin, TemplateView):
pass
class DashboardView(UserView):
template_name = 'dashboard.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Get the capture nodes with stats in the db.
context['sites'] = [site for site in Site.objects.all()]
context['capture_nodes'] = [node for node in CaptureNode.objects.all()]
context['interfaces'] = [(r.id, r.name) for r in StatsInterface.objects.all()]
context['bystat_groups'] = GroupsByStat.GetSerializer.GROUPINGS.items()
context['bystat_stat_types'] = GroupsByStat.GetSerializer.STAT_TYPES.items()
return context
class MyTasksView(UserView):
template_name = 'my_tasks.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class SearchView(UserView):
template_name = "search.html"
def get_context_data(self, **kwargs):
"""Pass the task_id back to the template if one is given.
:param kwargs:
"""
context = super().get_context_data(**kwargs)
context['search'] = None
if 'search_id' in kwargs and kwargs['search_id'] is not None:
try:
search = SearchInfo.objects.get(id=kwargs['search_id'])
except SearchInfo.DoesNotExist:
raise Http404
else:
now = pytz.UTC.localize(datetime.datetime.utcnow())
search = SearchInfo(start=now-datetime.timedelta(hours=1),
end=now,
proto=0)
context['search'] = search
context['search_protos'] = settings.SEARCH_TRANSPORT_PROTOS.items()
return context
# TODO: The permissions on each search result need to be checked. Right now they're served
# directly from /media, which makes anyone able to get to them. This is bad.
class SearchResultView(UserView):
template_name = None
def dispatch(self, request, *args, **kwargs):
"""Eventually this will give a page that displays a list of results for each site.
For now, just redirect either to a Pcap file, or to a SearchView populated with
the flow results (depending on the search type).
"""
try:
search = SearchInfo.objects.get(id=args[0])
except SearchInfo.DoesNotExist:
# No search, no page.
raise Http404
if search.type == search.T_FLOW:
# Redirect to a flow result page.
return HttpResponseRedirect(reverse('search_head_gui:search', [search.id]))
elif search.type == search.T_PCAP:
return HttpResponseRedirect(search.file.url())
else:
raise RuntimeError("Invalid search type.")
|
11505277 | import pytest
import rcpy.clock as clock
import time
def test1():
class MyAction(clock.Action):
def __init__(self):
self.count = 0
def run(self):
self.count += 1
action = MyAction()
obj = clock.Clock(action)
obj.start()
assert action.count == 0
time.sleep(1.1*obj.period)
assert action.count == 1
time.sleep(1.1*obj.period)
assert action.count == 2
obj.toggle()
time.sleep(1.1*obj.period)
assert action.count == 2
obj.toggle()
time.sleep(1.1*obj.period)
assert action.count == 3
obj.stop()
if __name__ == '__main__':
test1()
|
11505355 | from biicode.common.settings.version import Version
from biicode.common.settings.tool_info import ToolInfo
from biicode.common.settings.fixed_string import FixedString
import copy
from biicode.common.settings.tools import Architecture
import platform
import os
class OSFamily(FixedString):
values = {'Windows', 'Linux', 'MacOS', 'Java'}
class OSInfo(ToolInfo):
"""Store and capture information about running operating system."""
smart_serial = copy.copy(ToolInfo.smart_serial)
smart_serial['family'] = ('family', OSFamily, None)
smart_serial['arch'] = ('arch', Architecture, None)
platforms = {'windows': 'windows',
'win32': 'windows',
'linux': 'linux',
'linux2': 'linux',
'cygwin': 'linux',
'darwin': 'macos',
'os2': 'windows',
'os2emx': 'windows',
'riscos': 'linux',
'atheos': 'linux',
'java': 'java'
}
@staticmethod
def is_win():
return OSInfo.family() == 'Windows'
@staticmethod
def is_mac():
family = OSInfo.family()
return family == 'MacOS'
@staticmethod
def is_linux():
return OSInfo.family() == 'Linux'
@staticmethod
def is_debian_based_linux():
if not OSInfo.is_linux():
return False
tmp = platform.linux_distribution()[0]
return tmp == "debian" or tmp == "Ubuntu"
@staticmethod
def is_redhat_based_linux():
if not OSInfo.is_linux():
return False
tmp = platform.linux_distribution()[0]
return tmp == "Fedora" or tmp == "CentOS"
@staticmethod
def is_rpi():
return 'arm' == os.uname()[4][:3]
@staticmethod
def family():
os = platform.system().lower()
return OSFamily(OSInfo.platforms[os])
@staticmethod
def architecture():
return Architecture(platform.architecture()[0].lower())
@staticmethod
def capture():
""" Capture current OS information
"""
result = OSInfo()
result.family = OSInfo.family()
if result.family == 'windows':
# i.e: subfamily = '7', version = "6.66612"
result.subfamily = platform.release()
result.version = Version(platform.version())
elif result.family == 'linux':
result.subfamily = platform.linux_distribution()[0]
result.version = Version(platform.linux_distribution()[1])
elif result.family == 'macos':
result.subfamily = None
result.version = Version(platform.mac_ver()[0])
elif result.family == 'java':
result.subfamily = " ".join(platform.java_ver()[2])
result.version = Version(platform.release())
else:
result.subfamily = None # Default value is none in ToolInfo
result.version = Version() # Default value is Version() in ToolInfo
result.arch = OSInfo.architecture()
return result
|
11505397 | import errno
import subprocess
import sys
from subprocess import PIPE, STDOUT
from cookiecutter import utils
from cookiecutter.exceptions import FailedHookException
EXIT_SUCCESS = 0
def run_script(script_path, cwd="."):
"""Execute a script from a working directory.
:param script_path: Absolute path to the script to run.
:param cwd: The directory to run the script from.
"""
run_thru_shell = sys.platform.startswith("win")
if script_path.endswith(".py"):
script_command = [sys.executable, script_path]
else:
script_command = [script_path]
utils.make_executable(script_path)
try:
proc = subprocess.Popen(
script_command, shell=run_thru_shell, cwd=cwd, stderr=STDOUT, stdout=PIPE
)
exit_status = proc.wait()
output = proc.stdout.read().decode()
if exit_status != EXIT_SUCCESS:
raise FailedHookException(
"Hook script failed (exit status: {})\n {}".format(exit_status, output)
)
# Print the output from the hooks so that we can capture the information
# for the UI
print(output)
except OSError as os_error:
if os_error.errno == errno.ENOEXEC:
raise FailedHookException(
"Hook script failed, might be an " "empty file or missing a shebang"
)
raise FailedHookException("Hook script failed (error: {})".format(os_error))
|
11505405 | import pathlib
from mara_pipelines.commands.sql import ExecuteSQL
from mara_pipelines.pipelines import Pipeline, Task
pipeline = Pipeline(
id="e_commerce",
description="Builds the e-commerce cubes and datasets",
base_path=pathlib.Path(__file__).parent,
labels={"Schema": "ec_dim"})
pipeline.add_initial(
Task(id="initialize_schemas",
description="Recreates the schemas of the pipeline",
commands=[
ExecuteSQL(sql_file_name="recreate_schemas.sql")
]))
pipeline.add(
Task(id="preprocess_customer",
description="Preprocess customers and consolidate the data to a single record per customer with "
"a unique ID",
commands=[
ExecuteSQL(sql_file_name="preprocess_customer.sql")
]))
pipeline.add(
Task(id="preprocess_order",
description="Preprocess orders to get correct unique customer ID",
commands=[
ExecuteSQL(sql_file_name="preprocess_order.sql")
]),
upstreams=["preprocess_customer"])
pipeline.add(
Task(id="preprocess_order_item",
description="Preprocess order items to get correct unique customer ID",
commands=[
ExecuteSQL(sql_file_name="preprocess_order_item.sql")
]),
upstreams=["preprocess_order"])
pipeline.add(
Task(id="preprocess_product",
description="Preprocess products with product category in English",
commands=[
ExecuteSQL(sql_file_name="preprocess_product.sql")
]))
pipeline.add(
Task(id="preprocess_seller",
description="Preprocess sellers and compute the seller's first order",
commands=[
ExecuteSQL(sql_file_name="preprocess_seller.sql")
]))
pipeline.add(
Task(id="preprocess_zip_code",
description="Preprocess and collect zip codes from all data",
commands=[
ExecuteSQL(sql_file_name="preprocess_zip_code.sql")
]),
upstreams=["preprocess_seller", "preprocess_customer"])
pipeline.add(
Task(id="transform_zip_code",
description="Creates the zip_code dim table",
commands=[
ExecuteSQL(sql_file_name="transform_zip_code.sql")
]),
upstreams=["preprocess_zip_code"])
pipeline.add(
Task(id="transform_seller",
description="Creates the seller dim table",
commands=[
ExecuteSQL(sql_file_name="transform_seller.sql")
]),
upstreams=["preprocess_order_item", "preprocess_seller"])
pipeline.add(
Task(id="transform_order",
description="Creates the order dim table",
commands=[
ExecuteSQL(sql_file_name="transform_order.sql")
]),
upstreams=["preprocess_order"])
pipeline.add(
Task(id="transform_customer",
description="Creates the customer dim table",
commands=[
ExecuteSQL(sql_file_name="transform_customer.sql")
]),
upstreams=["preprocess_order_item", "preprocess_product"])
pipeline.add(
Task(id="transform_order_item",
description="Creates the order_item dim table",
commands=[
ExecuteSQL(sql_file_name="transform_order_item.sql")
]),
upstreams=["preprocess_order_item"])
pipeline.add(
Task(id="transform_product",
description="Creates the product dim table",
commands=[
ExecuteSQL(sql_file_name="transform_product.sql")
]),
upstreams=["preprocess_product", "preprocess_order_item"])
pipeline.add(
Task(id="constrain_tables",
description="Adds foreign key constrains between the dim tables",
commands=[
ExecuteSQL(sql_file_name="constrain_tables.sql", echo_queries=False)
]),
upstreams=["transform_seller",
"transform_customer",
"transform_order_item",
"transform_order",
"transform_product",
"transform_zip_code"])
pipeline.add_final(
Task(id="replace_schema",
description="Replaces the current ec_dim schema with the contents of ec_dim_next",
commands=[
ExecuteSQL(sql_statement="SELECT util.replace_schema('ec_dim', 'ec_dim_next');")
]))
|
11505465 | import argparse
import os
import sys
import json
import scipy.misc as misc
import numpy as np
in_dir = sys.argv[1]
# write the file header and footer
html_head = '<html><head><meta charset="UTF-8"><title>Simple Viewer</title>' + \
'<style>table {table-layout: fixed; }th, td { width: 100px; }</style></head><body>'
html_tail = '</body></html>'
def gen_html_for_tree_hier(html_fn, tree_hier, parts_render_dir):
fout = open(html_fn, 'w')
fout.write(html_head+'\n')
node_level = {}; node_loc = {}; all_nodes = [];
def find_level_loc(cur_tree_hier, cur_level, cur_loc):
node_id = cur_tree_hier['id']
all_nodes.append(node_id)
if 'children' in cur_tree_hier:
child_nodes = cur_tree_hier['children']
else:
child_nodes = []
if cur_level not in node_level.keys():
node_level[cur_level] = []
node_level[cur_level].append(node_id)
if len(child_nodes) == 0:
return 1
else:
old_cur_loc = cur_loc
for child_node in child_nodes:
child_loc = find_level_loc(child_node, cur_level+1, cur_loc)
node_loc[child_node['id']] = cur_loc
cur_loc += child_loc + 1
return cur_loc - old_cur_loc
root_node = tree_hier['id']
node_loc[root_node] = 0
find_level_loc(tree_hier, 0, 0)
max_level = max(node_level.keys())
fout.write('<table>')
tot_parts = 0
for level_id in range(max_level+1):
fout.write('<tr>')
cur_level_node_locs = {node_loc[item]: item for item in node_level[level_id]}
cur_level_locs_dict = cur_level_node_locs.keys()
tot_parts += len(cur_level_locs_dict)
max_loc = max(cur_level_locs_dict)
for width_id in range(max_loc+1):
if width_id in cur_level_locs_dict:
cur_part_img = os.path.join('parts_render/', str(cur_level_node_locs[width_id])+'.png')
cur_meta_file = os.path.join(in_dir, 'parts_render/', str(cur_level_node_locs[width_id])+'.txt')
with open(cur_meta_file, 'r') as fin:
meta = fin.readlines()[0].rstrip();
fout.write('<td><p>%s</p><a href="%s"><img src="%s" width="100px" height="100px"/></a></td>'%(meta, cur_part_img, cur_part_img))
else:
fout.write('<td></td>')
fout.write('</tr>')
fout.write('</table>')
fout.write(html_tail)
fout.close()
model_path = in_dir
tree_hier_json = os.path.join(model_path, 'result.json')
parts_render_dir = os.path.join(model_path, 'parts_render')
with open(tree_hier_json, 'r') as fin:
tree_hier = json.load(fin)[0]
html_fn = os.path.join(model_path, 'tree_hier.html')
gen_html_for_tree_hier(html_fn, tree_hier, parts_render_dir)
|
11505484 | from collections import defaultdict
class Solution(object):
def candy(self, ratings):
"""
:type ratings: List[int]
:rtype: int
"""
edges = defaultdict(list)
pointed_at = [0] * len(ratings)
candies = [1] * len(ratings)
for i in range(len(ratings) - 1):
if ratings[i] < ratings[i + 1]:
edges[i].append(i + 1)
pointed_at[i + 1] = 1
elif ratings[i] > ratings[i + 1]:
edges[i + 1].append(i)
pointed_at[i] = 1
for i in range(len(ratings)):
if pointed_at[i] == 0:
stack = [(1, i)]
while stack:
v, p = stack.pop(0)
candies[p] = v if v >= candies[p] else candies[p]
for edge in edges[p]:
stack.insert(0, (v + 1, edge))
return sum(candies)
|
11505485 | from enum import Enum
from .attribute import boolean_html_attribute, html_attribute, enum_attribute
from .element import Element
class Preload(Enum):
NONE = "none"
METADATA = "metadata"
AUTO = "auto"
class Video(Element):
controls = boolean_html_attribute("controls")
poster = html_attribute("poster")
preload = enum_attribute("preload", Preload)
def __init__(self, src: str) -> None:
super().__init__("video")
self.set_attribute("src", src)
|
11505494 | from invoicing.repository.base_repository import BaseRepository
class CompanyRepository(BaseRepository):
def __init__(self):
super().__init__('company')
|
11505499 | import FWCore.ParameterSet.Config as cms
# File: caloTowers.cfi
# Author: <NAME>
# Date: 03.04.2008
#
# Fill validation histograms for caloTowers.
# Assumes caloTowers are in event.
from DQMOffline.JetMET.caloTowers_cfi import *
analyzecaloTowersDQM = cms.Sequence(towerSchemeBAnalyzer)
|
11505568 | from random import choice
actions = ["rock", "paper", "scissors", "lizard", "spock"]
computer_action = choice(actions)
user_input = str(input("What is your act (rock, paper, scissors, lizard, spock): "))
if (user_input in actions):
user_action = user_input
def check_actions(computer_act, user_act):
computer_points, user_points = 0, 0
if user_act == actions[0]:
if computer_act == actions[0]:
print("Both same.")
elif computer_act == actions[1]:
print("Paper Covers Rock.")
computer_points = computer_points + 1
elif computer_act == actions[2]:
print("Rock crushes Scissors.")
user_points = user_points + 1
elif computer_act == actions[3]:
print("Rock crushes Lizard.")
user_points = user_points + 1
elif computer_act == actions[4]:
print("Spock smashes Scissors.")
computer_points = computer_points + 1
elif user_act == actions[1]:
if computer_act == actions[0]:
print("paper covers rock.")
user_points = user_points + 1
elif computer_act == actions[1]:
print("Both same.")
elif computer_act == actions[2]:
print("Scissors cuts Paper.")
computer_points = computer_points + 1
elif computer_act == actions[3]:
print("Lizard eats Paper")
computer_points = computer_points + 1
elif computer_act == actions[4]:
print("Paper disproves Spock.")
user_points = user_points + 1
elif user_act == actions[2]:
if computer_act == actions[0]:
print("Rock crushes Scissors.")
computer_points = computer_points + 1
elif computer_act == actions[1]:
print("Scissors cuts Paper.")
user_points = user_points + 1
elif computer_act == actions[2]:
print("Both same.")
elif computer_act == actions[3]:
print("Scissors cuts Lizard")
user_points = user_points + 1
elif computer_act == actions[4]:
print("Spock brokes Scissors.")
computer_points = computer_points + 1 |
11505616 | from project.infrastructure.drivers.rabbitmq.connector import RabbitMq
class RabbitMqAdapter(RabbitMq):
"""
RabbitMQ adapter class
"""
def __init__(self) -> None:
super().__init__()
async def get_buildinfo(self) -> bool:
"""
Verifica se a conexão está ou não
efetuada com sucesso
Returns:
bool
"""
connection = await self.connection()
is_closed = connection.is_closed
await connection.close()
return not is_closed
|
11505674 | from datetime import datetime, timedelta
import pytz
# https://github.com/halcy/Mastodon.py/
from mastodon import Mastodon
import config
def process():
api = Mastodon(
client_id=config.CLIENT_KEY,
client_secret=config.CLIENT_SECRET,
access_token=config.ACCESS_TOKEN,
api_base_url=config.API_URL,
ratelimit_method="throw"
)
api.log_in(
username=config.USERNAME,
password=<PASSWORD>,
scopes=["read", "write"]
)
toots = fetch_toots(api)
purge_old_toots(api, toots)
def fetch_toots(api):
# limit is 40 per API
return api.timeline_home(limit=40)
def purge_old_toots(api, toots):
max_keep_date = datetime.today() + timedelta(-config.DAYS_TO_KEEP)
for toot in toots:
if toot.created_at < max_keep_date.replace(tzinfo=pytz.UTC):
api.status_delete(toot.id)
print(".", end="")
print("FINISHED")
if __name__ == "__main__":
process()
|
11505684 | import unittest
import numpy as np
from laserchicken import load, keys
from laserchicken.feature_extractor.median_feature_extractor import MedianFeatureExtractor
from laserchicken.test_tools import create_point_cloud
class TestMedianZFeatureExtractor(unittest.TestCase):
def test_height_stats(self):
pc_in = load("testdata/AHN2.las")
neighborhood = [89664, 23893, 30638, 128795, 62052, 174453, 29129, 17127, 128215, 29667, 116156, 119157, 98591,
7018,
61494, 65194, 117931, 62971, 10474, 90322]
median_z = self.extractor.extract(pc_in, [neighborhood], None, None, None)[0]
np.testing.assert_allclose(median_z, 0.69999997377395629)
def test_height_stats_without_neighbors(self):
pc_in = load("testdata/AHN2.las")
neighborhood = []
median_z = self.extractor.extract(pc_in, [neighborhood], pc_in, None, None)[0]
assert np.isnan(median_z)
def test_default_provides_correct(self):
feature_names = self.extractor.provides()
self.assertIn('median_z', feature_names)
def setUp(self):
self.extractor = MedianFeatureExtractor()
class TestMedianNormZFeatureExtractor(unittest.TestCase):
def test_use_norm_z(self):
x = y = np.array([0, 0, 0])
z = np.array([2, 2, 2])
normalized_z = np.array([3, 4, 6])
point_cloud = create_point_cloud(x, y, z, normalized_z=normalized_z)
neighborhood = [[0, 1, 2]]
median = self.extractor.extract(point_cloud, neighborhood, None, None, None)
np.testing.assert_almost_equal(median, 4)
def test_default_provides_correct(self):
feature_names = self.extractor.provides()
self.assertIn('median_normalized_height', feature_names)
def setUp(self):
self.extractor = MedianFeatureExtractor(data_key=keys.normalized_height)
|
11505687 | from allmychanges.management.commands.send_digests2 import Command as BaseCommand
class Command(BaseCommand):
period = 'week'
|
11505689 | from server import app
from server.www.base import mobile_request
from server.logic import sync as logic_sync
logger = app.logger
@app.route("/sync", methods=['POST'])
@mobile_request
def sync_data(user_id, sync_token=None, sync_items=[], need_pull=True, **kwargs):
return logic_sync.sync_data(user_id, sync_token, sync_items, need_pull)
@app.route("/logs/sync", methods=['POST'])
@mobile_request
def sync_event_log(user_id, log_items, **kwargs):
return logic_sync.sync_event_log(user_id, log_items)
|
11505723 | assert None is None
y = None
x = None
assert x is y
def none():
pass
def none2():
return None
assert none() is none()
assert none() is x
assert none() is none2()
assert str(None) == 'None'
assert repr(None) == 'None'
assert type(None)() is None
assert None.__eq__(3) is NotImplemented
assert None.__ne__(3) is NotImplemented
assert None.__eq__(None) is True
assert None.__ne__(None) is False
|
11505734 | from contentbase.auditor import (
AuditFailure,
audit_checker,
)
from .conditions import rfa
targetBasedAssayList = [
'ChIP-seq',
'RNA Bind-n-Seq',
'ChIA-PET',
'RIP Array',
'RIP-seq',
'MeDIP-seq',
'iCLIP',
'shRNA knockdown followed by RNA-seq',
]
controlRequiredAssayList = [
'ChIP-seq',
'RNA Bind-n-Seq',
'RIP-seq',
'RAMPAGE',
'CAGE',
'shRNA knockdown followed by RNA-seq'
]
seq_assays = [
'RNA-seq',
'ChIP-seq',
'RNA Bind-n-Seq',
'MeDIP-seq',
'RNA-PET',
'DNA-PET',
'ChIA-PET',
'CAGE',
'RAMPAGE',
'RIP-seq',
]
non_seq_assays = [
'RNA profiling by array assay',
'DNA methylation profiling by array assay',
'Genotype',
'comparative genomic hybridization by array',
'RIP-chip',
'protein sequencing by tandem mass spectrometry assay',
'microRNA profiling by array assay',
'Switchgear',
'5C',
]
@audit_checker('experiment', frame='object')
def audit_experiment_release_date(value, system):
'''
Released experiments need release date.
This should eventually go to schema
'''
if value['status'] == 'released' and 'date_released' not in value:
detail = 'Experiment {} is released and requires a value in date_released'.format(value['@id'])
raise AuditFailure('missing date_released', detail, level='DCC_ACTION')
@audit_checker('experiment', frame=['replicates'])
def audit_experiment_replicated(value, system):
'''
Experiments in ready for review or release ready state should be replicated. If not,
wranglers should check with lab as to why before release.
'''
if value['status'] not in ['released', 'release ready', 'ready for review']:
return
num_bio_reps = set()
for rep in value['replicates']:
num_bio_reps.add(rep['biological_replicate_number'])
if len(num_bio_reps) <= 1:
if value['status'] in ['released']:
detail = 'Experiment {} has only one biological replicate and is released. Check for proper annotation of this state in the metadata'.format(value['@id'])
raise AuditFailure('unreplicated experiment', detail, level='DCC_ACTION')
if value['status'] in ['ready for review', 'release ready']:
detail = 'Experiment {} has only one biological replicate, more than one is typically expected before release'.format(value['@id'])
raise AuditFailure('unreplicated experiment', detail, level='WARNING')
@audit_checker('experiment', frame='object')
def audit_experiment_description(value, system):
'''
Experiments should have descriptions that contain the experimental variables and
read like phrases. I cannot get all of that here, but I thought I would start
with looking for funny characters.
'''
if value['status'] == 'deleted':
return
if 'description' not in value:
return
notallowed = ['=', ':', '!', ';']
if any(c in notallowed for c in value['description']):
detail = 'Experiment {} has odd character(s) in the description'.format(value['@id'])
raise AuditFailure('malformed description', detail, level='WARNING')
@audit_checker('experiment', frame=['replicates', 'replicates.library'])
def audit_experiment_documents(value, system):
'''
Experiments should have documents. Protocol documents or some sort of document.
'''
if value['status'] in ['deleted', 'replaced', 'proposed', 'preliminary']:
return
# If the experiment has documents, we are good
if len(value.get('documents')) > 0:
return
# If there are no replicates to check yet, why bother
if 'replicates' not in value:
return
lib_docs = 0
for rep in value['replicates']:
if 'library' in rep:
lib_docs += len(rep['library']['documents'])
# If there are no library documents anywhere, then we say something
if lib_docs == 0:
detail = 'Experiment {} has no attached documents'.format(value['@id'])
raise AuditFailure('missing documents', detail, level='WARNING')
@audit_checker('experiment', frame='object')
def audit_experiment_assay(value, system):
'''
Experiments should have assays with valid ontologies term ids and names that
are a valid synonym.
'''
if value['status'] == 'deleted':
return
if 'assay_term_id' not in value:
detail = 'Experiment {} is missing assay_term_id'.format(value['@id'])
yield AuditFailure('missing assay information', detail, level='ERROR')
return
# This should be a dependancy
if 'assay_term_name' not in value:
detail = 'Experiment {} is missing assay_term_name'.format(value['@id'])
yield AuditFailure('missing assay information', detail, level='ERROR')
return
# This should be a dependancy
ontology = system['registry']['ontology']
term_id = value.get('assay_term_id')
term_name = value.get('assay_term_name')
if term_id.startswith('NTR:'):
detail = 'Assay_term_id is a New Term Request ({} - {})'.format(term_id, term_name)
yield AuditFailure('NTR assay', detail, level='DCC_ACTION')
return
if term_id not in ontology:
detail = 'Assay_term_id {} is not found in cached version of ontology'.format(term_id)
yield AuditFailure('assay_term_id not in ontology', term_id, level='DCC_ACTION')
return
ontology_term_name = ontology[term_id]['name']
modifed_term_name = term_name + ' assay'
if (ontology_term_name != term_name and term_name not in ontology[term_id]['synonyms']) and \
(ontology_term_name != modifed_term_name and
modifed_term_name not in ontology[term_id]['synonyms']):
detail = 'Experiment has a mismatch between assay_term_name "{}" and assay_term_id "{}"'.format(
term_name,
term_id,
)
yield AuditFailure('mismatched assay_term_name', detail, level='DCC_ACTION')
return
@audit_checker('experiment', frame=['replicates.antibody', 'target', 'replicates.antibody.targets'])
def audit_experiment_target(value, system):
'''
Certain assay types (ChIP-seq, ...) require valid targets and the replicate's
antibodies should match.
'''
if value['status'] in ['deleted', 'proposed']:
return
if value.get('assay_term_name') not in targetBasedAssayList:
return
if 'target' not in value:
detail = '{} experiments require a target'.format(value['assay_term_name'])
yield AuditFailure('missing target', detail, level='ERROR')
return
target = value['target']
if 'control' in target['investigated_as']:
return
# Some assays don't need antibodies
if value['assay_term_name'] in ['RNA Bind-n-Seq', 'shRNA knockdown followed by RNA-seq']:
return
# Check that target of experiment matches target of antibody
for rep in value['replicates']:
if 'antibody' not in rep:
detail = 'Replicate {} in a {} assay requires an antibody'.format(
rep['@id'],
value['assay_term_name']
)
yield AuditFailure('missing antibody', detail, level='ERROR')
else:
antibody = rep['antibody']
if 'recombinant protein' in target['investigated_as']:
prefix = target['label'].split('-')[0]
unique_antibody_target = set()
unique_investigated_as = set()
for antibody_target in antibody['targets']:
label = antibody_target['label']
unique_antibody_target.add(label)
for investigated_as in antibody_target['investigated_as']:
unique_investigated_as.add(investigated_as)
if 'tag' not in unique_investigated_as:
detail = '{} is not to tagged protein'.format(antibody['@id'])
yield AuditFailure('not tagged antibody', detail, level='ERROR')
else:
if prefix not in unique_antibody_target:
detail = '{} is not found in target for {}'.format(
prefix,
antibody['@id']
)
yield AuditFailure('mismatched tag target', detail, level='ERROR')
else:
target_matches = False
for antibody_target in antibody['targets']:
if target['name'] == antibody_target.get('name'):
target_matches = True
if not target_matches:
detail = '{} is not found in target list for antibody {}'.format(
target['name'],
antibody['@id']
)
yield AuditFailure('mismatched target', detail, level='ERROR')
@audit_checker('experiment', frame=['target', 'possible_controls'])
def audit_experiment_control(value, system):
'''
Certain assay types (ChIP-seq, ...) require possible controls with a matching biosample.
Of course, controls do not require controls.
'''
if value['status'] in ['deleted', 'proposed']:
return
# Currently controls are only be required for ChIP-seq
if value.get('assay_term_name') not in controlRequiredAssayList:
return
# We do not want controls
if 'target' in value and 'control' in value['target']['investigated_as']:
return
if value['possible_controls'] == []:
detail = '{} experiments require a value in possible_control'.format(
value['assay_term_name']
)
raise AuditFailure('missing possible_controls', detail, level='NOT_COMPLIANT')
for control in value['possible_controls']:
if control.get('biosample_term_id') != value.get('biosample_term_id'):
detail = 'Control {} is for {} but experiment is done on {}'.format(
control['@id'],
control.get('biosample_term_name'),
value['biosample_term_name'])
raise AuditFailure('mismatched control', detail, level='ERROR')
@audit_checker('experiment', frame=['target', 'possible_controls', 'replicates', 'replicates.antibody', 'possible_controls.replicates', 'possible_controls.replicates.antibody', 'possible_controls.target'], condition=rfa('ENCODE3'))
def audit_experiment_ChIP_control(value, system):
if value['status'] in ['deleted', 'proposed', 'preliminary', 'replaced', 'revoked']:
return
# Currently controls are only be required for ChIP-seq
if value.get('assay_term_name') != 'ChIP-seq':
return
# We do not want controls
if 'target' in value and 'control' in value['target']['investigated_as']:
return
if not value['possible_controls']:
return
num_IgG_controls = 0
for control in value['possible_controls']:
if ('target' not in control) or ('control' not in control['target']['investigated_as']):
detail = 'Experiment {} is ChIP-seq but its control {} is not linked to a target with investigated.as = control'.format(
value['@id'],
control['@id'])
raise AuditFailure('invalid possible_control', detail, level='ERROR')
if not control['replicates']:
continue
if 'antibody' in control['replicates'][0]:
num_IgG_controls += 1
# If all of the possible_control experiments are mock IP control experiments
if num_IgG_controls == len(value['possible_controls']):
if value.get('assay_term_name') == 'ChIP-seq':
# The binding group agreed that ChIP-seqs all should have an input control.
detail = 'Experiment {} is ChIP-seq and requires at least one input control, as agreed upon by the binding group. {} is not an input control'.format(
value['@id'],
control['@id'])
raise AuditFailure('missing input control', detail, level='NOT_COMPLIANT')
@audit_checker('experiment', frame=['replicates', 'replicates.library'])
def audit_experiment_spikeins(value, system):
'''
All ENCODE 3 long (>200) RNA-seq experiments should specify their spikeins.
The spikeins specified should have datasets of type spikeins.
The spikeins datasets should have a fasta file, a document, and maybe a tsv
'''
if value['status'] in ['deleted', 'replaced']:
return
if value.get('assay_term_name') != 'RNA-seq':
return
for rep in value['replicates']:
lib = rep.get('library')
if lib is None:
continue
size_range = lib.get('size_range')
if size_range != '>200':
continue
spikes = lib.get('spikeins_used')
if (spikes is None) or (spikes == []):
detail = 'Library {} is in an RNA-seq experiment and has size_range >200. It requires a value for spikeins_used'.format(lib['@id'])
yield AuditFailure('missing spikeins_used', detail, level='NOT_COMPLIANT')
# Informattional if ENCODE2 and release error if ENCODE3
@audit_checker('experiment', frame='object')
def audit_experiment_biosample_term(value, system):
'''
The biosample term and id and type information should be present and
concordent with library biosamples,
Exception: RNA Bind-n-Seq
'''
if value['status'] in ['deleted', 'replaced']:
return
if value.get('assay_term_name') == 'RNA Bind-n-Seq':
return
ontology = system['registry']['ontology']
term_id = value.get('biosample_term_id')
term_type = value.get('biosample_type')
term_name = value.get('biosample_term_name')
if 'biosample_type' not in value:
detail = '{} is missing biosample_type'.format(value['@id'])
yield AuditFailure('missing biosample_type', detail, level='ERROR')
if 'biosample_term_name' not in value:
detail = '{} is missing biosample_term_name'.format(value['@id'])
yield AuditFailure('missing biosample_term_name', detail, level='ERROR')
# The type and term name should be put into dependancies
if term_id is None:
detail = '{} is missing biosample_term_id'.format(value['@id'])
yield AuditFailure('missing biosample_term_id', detail, level='ERROR')
elif term_id.startswith('NTR:'):
detail = '{} has an NTR biosample {} - {}'.format(value['@id'], term_id, term_name)
yield AuditFailure('NTR biosample', detail, level='DCC_ACTION')
elif term_id not in ontology:
detail = '{} has term_id {} which is not in ontology'.format(value['@id'], term_id)
yield AuditFailure('term_id not in ontology', term_id, level='DCC_ACTION')
else:
ontology_name = ontology[term_id]['name']
if ontology_name != term_name and term_name not in ontology[term_id]['synonyms']:
detail = '{} has a biosample mismatch {} - {} but ontology says {}'.format(
value['@id'],
term_id,
term_name,
ontology_name
)
yield AuditFailure('mismatched biosample_term_name', detail, level='ERROR')
for rep in value['replicates']:
if 'library' not in rep:
continue
lib = rep['library']
if 'biosample' not in lib:
detail = '{} is missing biosample, expecting one of type {}'.format(
lib['@id'],
term_name
)
yield AuditFailure('missing biosample', detail, level='NOT_COMPLIANT')
continue
biosample = lib['biosample']
bs_type = biosample.get('biosample_type')
bs_name = biosample.get('biosample_term_name')
if bs_type != term_type:
detail = '{} has mismatched biosample_type, {} - {}'.format(
lib['@id'],
term_type,
bs_type
)
yield AuditFailure('mismatched biosample_type', detail, level='ERROR')
if bs_name != term_name:
detail = '{} has mismatched biosample_term_name, {} - {}'.format(
lib['@id'],
term_name,
bs_name
)
yield AuditFailure('mismatched biosample_term_name', detail, level='ERROR')
@audit_checker(
'experiment',
frame=[
'target',
'replicates',
'replicates.antibody',
'replicates.antibody.lot_reviews.organisms',
'replicates.library',
'replicates.library.biosample',
'replicates.library.biosample.organism',
],
condition=rfa('ENCODE3', 'modERN'))
def audit_experiment_antibody_eligible(value, system):
'''Check that biosample in the experiment is eligible for new data for the given antibody.'''
if value['status'] in ['deleted', 'proposed']:
return
if value.get('assay_term_name') not in targetBasedAssayList:
return
if 'target' not in value:
return
target = value['target']
if 'control' in target['investigated_as']:
return
if value['assay_term_name'] in ['RNA Bind-n-Seq', 'shRNA knockdown followed by RNA-seq']:
return
for rep in value['replicates']:
if 'antibody' not in rep:
continue
if 'library' not in rep:
continue
antibody = rep['antibody']
lib = rep['library']
if 'biosample' not in lib:
continue
biosample = lib['biosample']
organism = biosample['organism']['name']
if 'histone modification' in target['investigated_as']:
for lot_review in antibody['lot_reviews']:
if (lot_review['status'] == 'eligible for new data') and (lot_review['biosample_term_id'] == 'NTR:00000000'):
organism_match = False
for lot_organism in lot_review['organisms']:
if organism == lot_organism['name']:
organism_match = True
if not organism_match:
detail = '{} is not eligible for {}'.format(antibody["@id"], organism)
yield AuditFailure('not eligible antibody', detail, level='NOT_COMPLIANT')
else:
detail = '{} is not eligible for {}'.format(antibody["@id"], organism)
yield AuditFailure('not eligible antibody', detail, level='NOT_COMPLIANT')
else:
biosample_term_id = value['biosample_term_id']
biosample_term_name = value['biosample_term_name']
experiment_biosample = (biosample_term_id, organism)
eligible_biosamples = set()
for lot_review in antibody['lot_reviews']:
if lot_review['status'] == 'eligible for new data':
for lot_organism in lot_review['organisms']:
eligible_biosample = (lot_review['biosample_term_id'], lot_organism['name'])
eligible_biosamples.add(eligible_biosample)
if experiment_biosample not in eligible_biosamples:
detail = '{} is not eligible for {} in {}'.format(antibody["@id"], biosample_term_name, organism)
yield AuditFailure('not eligible antibody', detail, level='NOT_COMPLIANT')
|
11505782 | from datetime import datetime
from django.conf import settings
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, DetailView, FormView, CreateView, View
from django.views.generic.edit import FormMixin
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.urls import reverse
from .forms import LoginForm, RegisterForm, ReactivateEmailForm
from .models import EmailActivation, News
from stock_bridge.mixins import (
AnonymousRequiredMixin,
RequestFormAttachMixin,
NextUrlMixin,
LoginRequiredMixin,
CountNewsMixin
)
from market.models import InvestmentRecord
User = get_user_model()
START_TIME = timezone.make_aware(getattr(settings, 'START_TIME'))
STOP_TIME = timezone.make_aware(getattr(settings, 'STOP_TIME'))
@login_required
def cancel_loan(request):
""" Deduct entire loan amount from user's balance """
if request.user.is_superuser:
for user in User.objects.all():
user.cancel_loan()
return HttpResponse('Loan Deducted', status=200)
return redirect('home')
@login_required
def deduct_interest(request):
""" Deduct interest from user's balance """
if request.user.is_superuser:
for user in User.objects.all():
user.deduct_interest()
return HttpResponse('Interest Deducted', status=200)
return redirect('home')
class NewsView(LoginRequiredMixin, CountNewsMixin, ListView):
template_name = 'accounts/news.html'
queryset = News.objects.filter(is_active=True)
class LoanView(LoginRequiredMixin, CountNewsMixin, View):
def get(self, request, *args, **kwargs):
return render(request, 'accounts/loan.html', {
'user': request.user
})
def post(self, request, *args, **kwargs):
current_time = timezone.make_aware(datetime.now())
if current_time >= START_TIME and current_time <= STOP_TIME: # transaction has to be within game time
mode = request.POST.get('mode')
user = request.user
if mode == 'issue':
if user.issue_loan():
messages.success(request, 'Loan has been issued.')
else:
messages.error(request, 'You can issue loan only 1 time!')
elif mode == 'pay':
if user.pay_installment():
messages.success(request, 'Installment paid!')
else:
messages.error(
request,
'Minimum installment amount has to be INR 5,000 and you should have sufficient balance.'
)
else:
# msg = 'The market will be live from {start} to {stop}'.format(
# start=START_TIME.strftime('%H:%M'),
# stop=STOP_TIME.strftime('%H:%M')
# )
msg = 'The market is closed!'
messages.info(request, msg)
return redirect('account:loan')
class ProfileView(LoginRequiredMixin, CountNewsMixin, DetailView):
template_name = 'accounts/profile.html'
def dispatch(self, request, *args, **kwargs):
# only the user himself can view his own profile
if request.user.username != kwargs.get('username'):
return redirect('/')
return super(ProfileView, self).dispatch(request, *args, **kwargs)
def get_object(self, *args, **kwargs):
username = self.kwargs.get('username')
instance = User.objects.filter(username=username).first()
if instance is None:
raise Http404('User not found')
return instance
def get_context_data(self, *args, **kwargs):
context = super(ProfileView, self).get_context_data(*args, **kwargs)
qs = InvestmentRecord.objects.filter(user=self.request.user)
if qs.count() >= 1:
context['net_worth'] = InvestmentRecord.objects.calculate_net_worth(self.request.user)
context['investments'] = qs
return context
class LeaderBoardView(CountNewsMixin, View):
template_name = 'accounts/leaderboard.html'
def get(self, request, *args, **kwargs):
data = []
user_qs = User.objects.all()
for user in user_qs:
net_worth = InvestmentRecord.objects.calculate_net_worth(user)
data.append((user.username, user.full_name, net_worth, user.coeff_of_variation))
data = sorted(data, key=lambda d: (-d[2], d[3]))
return render(request, 'accounts/leaderboard.html', {'data': data})
class AccountEmailActivateView(FormMixin, View):
success_url = '/login/'
form_class = ReactivateEmailForm
key = None
def get(self, request, key=None, *args, **kwargs):
self.key = key
if key is not None:
qs = EmailActivation.objects.filter(key__iexact=key)
confirm_qs = qs.confirmable()
if confirm_qs.count() == 1: # Not confirmed but confirmable
obj = confirm_qs.first()
obj.activate()
messages.success(request, 'Your email has been confirmed! Please login to continue.')
return redirect('login')
else:
activated_qs = qs.filter(activated=True)
if activated_qs.exists():
reset_link = reverse('password_reset')
msg = """Your email has already been confirmed.
Do you want to <a href="{link}">reset you password</a>?""".format(link=reset_link)
messages.success(request, mark_safe(msg))
return redirect('login')
context = {'form': self.get_form(), 'key': key} # get_form() works because of the mixin
return render(request, 'registration/activation_error.html', context)
def post(self, request, *args, **kwargs):
# create a form to receive an email
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
msg = 'Activation link sent. Please check your email.'
messages.success(self.request, msg)
email = form.cleaned_data.get('email')
obj = EmailActivation.objects.email_exists(email).first()
user = obj.user
new_activation = EmailActivation.objects.create(user=user, email=email)
new_activation.send_activation()
return super(AccountEmailActivateView, self).form_valid(form)
def form_invalid(self, form):
"""
This method had to be explicitly written because this view uses the basic django "View" class.
If it had used some other view like ListView etc. Django would have handled it automatically.
"""
context = {'form': form, 'key': self.key}
return render(self.request, 'registration/activation_error.html', context)
class LoginView(AnonymousRequiredMixin, RequestFormAttachMixin, NextUrlMixin, FormView):
form_class = LoginForm
template_name = 'accounts/login.html'
success_url = '/'
default_url = '/'
default_next = '/'
def form_valid(self, form):
request = self.request
response = form.cleaned_data
if not response.get('success'):
messages.warning(request, mark_safe(response.get('message')))
return redirect('login')
next_path = self.get_next_url()
return redirect(next_path)
class RegisterView(AnonymousRequiredMixin, CreateView):
form_class = RegisterForm
template_name = 'accounts/register.html'
success_url = '/login/'
def form_valid(self, form):
super(RegisterView, self).form_valid(form)
messages.success(self.request, 'Verification link sent! Please check your email.')
return redirect(self.success_url)
|
11505785 | import torch
import numpy as np
def blaugment9to15(x, bl, blr, num_bone=15):
'''
this function convert 9 blr to 15 blr, and apply to bl
bl: b x joints-1 x 1
blr: b x 9 x 1
out: pose3d b x joints x 3
'''
blr9to15 = torch.Tensor([
[1, 0, 0, 0, 0, 0, 0, 0, 0], # 1
[0, 1, 0, 0, 0, 0, 0, 0, 0], # 2
[0, 0, 1, 0, 0, 0, 0, 0, 0], # 3
[1, 0, 0, 0, 0, 0, 0, 0, 0], # 4
[0, 1, 0, 0, 0, 0, 0, 0, 0], # 5
[0, 0, 1, 0, 0, 0, 0, 0, 0], # 6
[0, 0, 0, 1, 0, 0, 0, 0, 0], # 7
[0, 0, 0, 0, 1, 0, 0, 0, 0], # 8
[0, 0, 0, 0, 0, 1, 0, 0, 0], # 9
[0, 0, 0, 0, 0, 0, 1, 0, 0], # 10
[0, 0, 0, 0, 0, 0, 0, 1, 0], # 11
[0, 0, 0, 0, 0, 0, 0, 0, 1], # 12
[0, 0, 0, 0, 0, 0, 1, 0, 0], # 13
[0, 0, 0, 0, 0, 0, 0, 1, 0], # 14
[0, 0, 0, 0, 0, 0, 0, 0, 1], # 15
]).transpose(1, 0) # 9 x 15 matrix
blr9to15 = blr9to15.to(blr.device)
blr9to15 = blr9to15.repeat([blr.size(0), 1, 1]).view(blr.size(0), 9, 15)
blr_T = blr.permute(0, 2, 1).contiguous()
blr_15_T = torch.matmul(blr_T, blr9to15)
blr_15 = blr_15_T.permute(0, 2, 1).contiguous() # back to N x 15 x 1
# convert 3d pose to root relative
root = x[:, :1, :] * 1.0
x = x - x[:, :1, :]
# extract length, unit bone vec
bones_unit = get_bone_unit_vecbypose3d(x)
# prepare a bone length list for augmentation.
bones_length = torch.mul(bl, blr_15) + bl # res
modifyed_bone = bones_unit * bones_length
# convert bone vec back to pose3d
out = get_pose3dbyBoneVec(modifyed_bone)
return out + root # return the pose with position information.
def get_pose3dbyBoneVec(bones, num_joints=16):
'''
convert bone vect to pose3d, inverse function of get_bone_vector
:param bones:
:return:
'''
Ctinverse = torch.Tensor([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 basement
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 1
[-1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 1 2
[-1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 2 3
[0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 4
[0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 4 5
[0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 5 6
[0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0], # 0 7
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, 0, 0, 0], # 7 8
[0, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0], # 8 9
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, 0, 0, 0, 0, 0], # 8 10
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, -1, 0, 0, 0, 0], # 10 11
[0, 0, 0, 0, 0, 0, -1, -1, 0, -1, -1, -1, 0, 0, 0], # 11 12
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, 0, 0], # 8 13
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0], # 13 14
[0, 0, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, -1], # 14 15
]).transpose(1, 0)
Ctinverse = Ctinverse.to(bones.device)
C = Ctinverse.repeat([bones.size(0), 1, 1]).view(-1, num_joints - 1, num_joints)
bonesT = bones.permute(0, 2, 1).contiguous()
pose3d = torch.matmul(bonesT, C)
pose3d = pose3d.permute(0, 2, 1).contiguous() # back to N x 16 x 3
return pose3d
def get_BoneVecbypose3d(x, num_joints=16):
'''
convert 3D point to bone vector
:param x: N x number of joint x 3
:return: N x number of bone x 3 number of bone = number of joint - 1
'''
Ct = torch.Tensor([
[1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 1
[0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 1 2
[0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 2 3
[1, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 0 4
[0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 4 5
[0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # 5 6
[1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0], # 0 7
[0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0], # 7 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0], # 8 9
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0, 0], # 8 10
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0], # 10 11
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0], # 11 12
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0], # 8 13
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0], # 13 14
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1], # 14 15
]).transpose(1, 0)
Ct = Ct.to(x.device)
C = Ct.repeat([x.size(0), 1, 1]).view(-1, num_joints, num_joints - 1)
pose3 = x.permute(0, 2, 1).contiguous() # 这里16x3变成3x16的话 应该用permute吧
B = torch.matmul(pose3, C)
B = B.permute(0, 2, 1) # back to N x 15 x 3
return B
def get_bone_lengthbypose3d(x, bone_dim=2):
'''
:param bone_dim: dim=2
:return:
'''
bonevec = get_BoneVecbypose3d(x)
bones_length = torch.norm(bonevec, dim=2, keepdim=True)
return bones_length
def get_bone_unit_vecbypose3d(x, num_joints=16, bone_dim=2):
bonevec = get_BoneVecbypose3d(x)
bonelength = get_bone_lengthbypose3d(x)
bone_unitvec = bonevec / bonelength
return bone_unitvec
def get_discriminator_accuracy(prediction, label):
'''
this is to get discriminator accuracy for tensorboard
input is tensor -> convert to numpy
:param tensor_in: Bs x Score :: where score > 0.5 mean True.
:return:
'''
# get numpy from tensor
prediction = prediction.cpu().detach().numpy()
label = label.cpu().detach().numpy()
rlt = np.abs(prediction - label)
rlt = np.where(rlt > 0.5, 0, 1)
num_of_correct = np.sum(rlt)
accuracy = num_of_correct / label.shape[0]
return accuracy
import copy
# To store data in a pool and sample from it when it is full
# Shrivastava et al’s strategy
class Sample_from_Pool(object):
def __init__(self, max_elements=4096):
self.max_elements = max_elements
self.cur_elements = 0
self.items = []
def __call__(self, in_items):
return_items = []
for in_item in in_items:
if self.cur_elements < self.max_elements:
self.items.append(in_item)
self.cur_elements = self.cur_elements + 1
return_items.append(in_item)
else:
if np.random.ranf() > 0.5:
idx = np.random.randint(0, self.max_elements)
tmp = copy.copy(self.items[idx])
self.items[idx] = in_item
return_items.append(tmp)
else:
return_items.append(in_item)
return return_items
|
11505787 | import modules.internals
def jpeg_artifacts(source: Image, radius: int)->Image:
return modules.images_internal.jpeg_artifacts(source, radius) |
11505800 | import cPickle as pickle
import matplotlib.pyplot as plt
weights = pickle.load(open('weights.bin','r'))
print weights[0]
plt.imshow(weights[0])
plt.show()
|
11505826 | from .ble_attribute_abstract import BleAttributeAbstract
from .ble_helper import BleHelper
class BleDescriptor(BleAttributeAbstract):
def __init__(self, obj):
super().__init__(obj)
self.permissions = obj.get("permissions", [])
if type(self.permissions) is not list:
self.permissions = [self.permissions]
@property
def parent_name(self):
return "characteristic"
# addPermission(param) {
# if (!self.permissions.includes(param)) {
# self.permissions.push(param)
# }
# }
# removePermission(param) {
# self.permissions = self.permissions.filter(elm => {
# return elm !== param
# })
# }
# toJSON() {
# let obj = super.toJSON()
# if (self.permissions.length > 0) {
# obj.permissions = self.permissions
# }
# return obj
# }
def write(self, data_array, need_response=False):
self.get_characteristic().get_service().peripheral.obniz.send(
{
"ble": {
"peripheral": {
"write_descriptor": {
"service_uuid": BleHelper.uuid_filter(
self.get_characteristic().get_service().uuid
),
"characteristic_uuid": BleHelper.uuid_filter(
self.get_characteristic().uuid
),
"descriptor_uuid": self.uuid,
"data": data_array,
}
}
}
}
)
def read(self):
self.get_characteristic().get_service().peripheral.obniz.send(
{
"ble": {
"peripheral": {
"read_descriptor": {
"service_uuid": BleHelper.uuid_filter(
self.get_characteristic().get_service().uuid
),
"characteristic_uuid": BleHelper.uuid_filter(
self.get_characteristic().uuid
),
"descriptor_uuid": self.uuid,
}
}
}
}
)
|
11505861 | import string
import chainer
import cupy as cp
class HashMap(object):
def __init__(self, data, table_size=2 ** 24):
xp = chainer.cuda.get_array_module(data)
self.hash_factor = 2531011
self.batch_size, self.num_points, self.dim = data.shape
self.table_size = table_size
self.indices = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size,), 'int32')) - 1
self.values = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size, self.dim), 'int32'))
self.value_list = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size, self.dim), 'int32'))
self.size = None
self.init_keys(data)
def init_keys(self, data):
data = cp.ascontiguousarray(data)
used = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size), 'int32'))
written = cp.ascontiguousarray(cp.zeros((self.batch_size, self.table_size), 'int32'))
count = cp.ascontiguousarray(cp.zeros((self.batch_size,), 'int32'))
ok = cp.zeros((1,), 'int32')
loop_indices = cp.arange(data.size / self.dim).astype('int32')
chainer.cuda.elementwise(
'int32 j, raw int32 data, raw int32 indices, raw int32 values, ' +
'raw int32 value_list, raw int32 used, raw int32 written, raw int32 count, raw int32 ok',
'',
string.Template('''
int* value_init;
int* value;
value_init = &data[i * ${dim}];
int bn = i / ${num_points};
/* compute initial key */
unsigned int key = 0;
value = value_init;
for (int k = 0; k < ${dim}; k++) key = (key + *value++) * ${hash_factor};
key = key % ${table_size};
for (int l = 0; l < 100; l++) {
/* check if the key is used */
int ret;
ret = used[bn * ${table_size} + key];
if (ret == 0) ret = atomicExch(&used[bn * ${table_size} + key], 1);
if (ret == 0) {
/* register true key */
int* value_ref = &values[(bn * ${table_size} + key) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) *value_ref++ = *value++;
written[bn * ${table_size} + key] = 1;
int num = atomicAdd(&count[bn], 1);
indices[bn * ${table_size} + key] = num;
value_ref = &value_list[(bn * ${table_size} + num) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) *value_ref++ = *value++;
break;
} else {
bool match = true;
while (atomicAdd(&written[bn * ${table_size} + key], 0) == 0) {}
int* value_ref = &values[(bn * ${table_size} + key) * ${dim}];
value = value_init;
for (int k = 0; k < ${dim}; k++) if (*value_ref++ != *value++) match = false;
if (match) {
break;
} else {
key = (key + 1) % ${table_size};
}
}
if (l == 99) {
ok[0] = -1;
}
}
''').substitute(
table_size=self.table_size,
hash_factor=self.hash_factor,
num_points=self.num_points,
dim=self.dim,
),
'kernel',
)(loop_indices, data, self.indices, self.values, self.value_list, used, written, count, ok)
self.size = int(count.max())
if int(ok[0]) < 0:
raise Exception
def find(self, data):
ret = cp.ascontiguousarray(cp.zeros(data.shape[:-1], 'int32')) - 1
data = cp.ascontiguousarray(data)
loop_indices = cp.arange(data.size / self.dim).astype('int32')
ok = cp.zeros((1,), 'int32')
chainer.cuda.elementwise(
'int32 j, raw int32 data, raw int32 indices, raw int32 values, raw int32 ret, raw int32 ok',
'',
string.Template('''
/* */
int* value = &data[j * ${dim}];
int bn = i / ${num_points};
/* compute initial key */
unsigned int key = 0;
for (int k = 0; k < ${dim}; k++) key = (key + value[k]) * ${hash_factor};
key = key % ${table_size};
for (int l = 0; l < 100; l++) {
if (indices[bn * ${table_size} + key] < 0) {
ret[j] = -1;
break;
}
bool match = true;
for (int k = 0; k < ${dim}; k++)
if (values[(bn * ${table_size} + key) * ${dim} + k] != value[k])
match = false;
if (match) {
ret[j] = indices[bn * ${table_size} + key];
break;
} else {
key = (key + 1) % ${table_size};
}
if (l == 99) {
ok[0] = -1;
}
}
''').substitute(
table_size=self.table_size,
hash_factor=self.hash_factor,
num_points=data.shape[1],
dim=self.dim,
),
'function',
)(loop_indices, data, self.indices, self.values, ret, ok)
if int(ok[0]) < 0:
raise Exception
return ret
|
11505872 | from nose_parameterized import parameterized, param
from unittest import TestCase
from algotrader.trading.context import ApplicationContext
from algotrader.trading.event import EventLogger
from algotrader.utils.market_data import *
from tests import config
params = [
param('CSV', ['Bar.Yahoo.Time.D1']),
param('PandasWeb', ['Bar.Google.Time.D1']),
param('PandasWeb', ['Bar.Yahoo.Time.D1'])
]
class FeedTest(TestCase):
@parameterized.expand(params)
def test_loaded_bar(self, feed_id, subscription_types):
app_context = ApplicationContext(config=config)
app_context.start()
feed = app_context.provider_mgr.get(feed_id)
feed.start(app_context)
# logger.setLevel(logging.DEBUG)
eventLogger = EventLogger()
eventLogger.start(app_context)
instruments = app_context.ref_data_mgr.get_insts_by_ids(["SP<EMAIL>"])
for sub_req in build_subscription_requests(feed_id, instruments,
subscription_types,
20100101,
20170101):
feed.subscribe_mktdata(sub_req)
self.assertTrue(eventLogger.count[Bar] > 0)
self.assertTrue(eventLogger.count[Trade] == 0)
self.assertTrue(eventLogger.count[Quote] == 0)
|
11505907 | import pkgutil, importlib
class PluginManager:
def __init__(self):
self.loaded = []
def loadplugins(self, caller):
modules = [name for _, name, _ in pkgutil.iter_modules(['ip_plugins'])]
modules.remove("plugin")
for m in modules:
m = importlib.import_module("." + m, "ip_plugins")
p = m.load(caller)
if p:
self.loaded.append(p)
def __getattr__(self, attr, *args, **kwargs):
funcs = []
if attr.startswith("do_"):
fname = attr[3:]
for p in self.loaded:
try:
f = p.__getattribute__(fname)
funcs.append(f)
except AttributeError:
print("WARNING: Plugin %s has no method '%s'" % (type(p), fname))
def func(*args, **kwargs):
for f in funcs:
return f(*args, **kwargs)
return func
else:
return object.__getattribute__(self, attr)
|
11505923 | import pandas as pd
import os
def dataset_stats(data_set_path):
assert os.path.isfile(data_set_path),'"{}" is not a valid dataset path'.format(data_set_path)
churn_data = pd.read_csv(data_set_path,index_col=[0,1])
if 'is_churn' in churn_data:
churn_data['is_churn']=churn_data['is_churn'].astype(float)
summary = churn_data.describe()
summary = summary.transpose()
summary['skew'] = churn_data.skew()
summary['1%'] = churn_data.quantile(q=0.01)
summary['99%'] = churn_data.quantile(q=0.99)
summary['nonzero'] = churn_data.astype(bool).sum(axis=0) / churn_data.shape[0]
summary = summary[ ['count','nonzero','mean','std','skew','min','1%','25%','50%','75%','99%','max'] ]
summary.columns = summary.columns.str.replace("%", "pct")
save_path = data_set_path.replace('.csv', '_summarystats.csv')
summary.to_csv(save_path,header=True)
print('Saving results to %s' % save_path)
|
11505959 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from astropy.cosmology import Planck13
from astropy import units as u
from astropy import constants
#========================================================
#MODIFIABLE HEADER (make this a function later with argv)
z = 0.001
run = '/home/desika.narayanan/pd/examples/gadget/mw_zoom/example.135.rtout.sed'
#========================================================
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
m = ModelOutput(run)
wav,flux = m.get_sed(inclination='all',aperture=-1)
wav = np.asarray(wav)*u.micron #wav is in micron
wav *= (1.+z)
flux = np.asarray(flux)*u.erg/u.s
dl = Planck13.luminosity_distance(z)
dl = dl.to(u.cm)
flux /= (4.*3.14*dl**2.)
nu = constants.c.cgs/(wav.to(u.cm))
nu = nu.to(u.Hz)
flux /= nu
flux = flux.to(u.mJy)
for i in range(flux.shape[0]):
ax.loglog(wav,flux[i,:])
ax.set_xlabel(r'$\lambda$ [$\mu$m]')
ax.set_ylabel('Flux (mJy)')
ax.set_ylim([1,1e8])
ax.set_xlim(0.05,15000)
ax.grid()
fig.savefig('./sed.png')
|
11505997 | from polyphony import testbench
def f(d:list):
return d[0]
def g(d:list):
return f(d)
def special03(x):
data1 = [1, 2, 3]
data2 = [4, 5, 6]
x = g(data1)
y = g(data2)
return x + y
@testbench
def test():
assert 5 == special03(1)
test()
|
11506011 | class Request:
__slots__ = ["path", "method", "db", "route"]
def __init__(self, environ, db):
self.method = environ["REQUEST_METHOD"].upper()
self.path = get_str_from_wsgi(environ, "PATH_INFO", "/").replace("/", "", 1)
self.db = db
self.route = None
def get_str_from_wsgi(environ, key, default):
value = environ.get(key, default)
return value.encode("iso-8859-1").decode()
|
11506071 | import torch
import torch.nn as nn
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super(PreNorm, self).__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0
):
super(Attention, self).__init__()
assert (
dim % num_heads == 0
), "Embedding dimension should be divisible by number of heads"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class FeedForward(nn.Module):
"""
Implementation of MLP for transformer
"""
def __init__(self, dim, hidden_dim, dropout_rate=0.0, revised=False):
super(FeedForward, self).__init__()
if not revised:
"""
Original: https://arxiv.org/pdf/2010.11929.pdf
"""
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout_rate),
nn.Linear(hidden_dim, dim),
)
else:
"""
Scaled ReLU: https://arxiv.org/pdf/2109.03810.pdf
"""
self.net = nn.Sequential(
nn.Conv1d(dim, hidden_dim, kernel_size=1, stride=1),
nn.BatchNorm1d(hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout_rate),
nn.Conv1d(hidden_dim, dim, kernel_size=1, stride=1),
nn.BatchNorm1d(dim),
nn.GELU(),
)
self.revised = revised
self._init_weights()
def _init_weights(self):
for name, module in self.net.named_children():
if isinstance(module, nn.Linear):
nn.init.normal_(module.bias, std=1e-6)
def forward(self, x):
if self.revised:
x = x.permute(0, 2, 1)
x = self.net(x)
x = x.permute(0, 2, 1)
else:
x = self.net(x)
return x
class OutputLayer(nn.Module):
def __init__(
self,
embedding_dim,
num_classes=1000,
representation_size=None,
cls_head=False,
):
super(OutputLayer, self).__init__()
self.num_classes = num_classes
modules = []
if representation_size:
modules.append(nn.Linear(embedding_dim, representation_size))
modules.append(nn.Tanh())
modules.append(nn.Linear(representation_size, num_classes))
else:
modules.append(nn.Linear(embedding_dim, num_classes))
self.net = nn.Sequential(*modules)
if cls_head:
self.to_cls_token = nn.Identity()
self.cls_head = cls_head
self.num_classes = num_classes
self._init_weights()
def _init_weights(self):
for name, module in self.net.named_children():
if isinstance(module, nn.Linear):
if module.weight.shape[0] == self.num_classes:
nn.init.zeros_(module.weight)
nn.init.zeros_(module.bias)
def forward(self, x):
if self.cls_head:
x = self.to_cls_token(x[:, 0])
else:
"""
Scaling Vision Transformer: https://arxiv.org/abs/2106.04560
"""
x = torch.mean(x, dim=1)
return self.net(x)
|
11506127 | from os.path import basename
class UserSelect(object):
def __init__(self, code="", name=""):
self.code = code
self.name = name
def serialize(self):
if self.code:
return {
"code": self.code
}
else:
return None
@classmethod
def deserialize(cls, json_body):
j = json_body
return UserSelect(
j["code"],
j["name"]
)
class File(object):
API_ROOT = "https://{0}.cybozu.com/k/v1/file.json"
def __init__(self, content_type="", file_key="", name="", size=0.0):
self.content_type = content_type
self.file_key = file_key
self.name = name
self.size = size
self.file = None
def serialize(self):
if self.file_key:
return {
"fileKey": self.file_key
}
else:
return None
@classmethod
def deserialize(cls, json_body):
j = json_body
return File(
j["contentType"],
j["fileKey"],
j["name"],
float(j["size"])
)
def download(self, api, cache_enable=False):
if cache_enable and self.file:
return self.file
url = self.API_ROOT.format(api.account.domain)
r = api._request("GET", url, params_or_data={"fileKey": self.file_key})
file = None
if r.ok:
file = r.content
self.file = file
self.content_type = r.headers.get("content-type")
return file
@classmethod
def upload(cls, file_or_path, api, file_name="", mime_type=""):
url = cls.API_ROOT.format(api.account.domain)
def _upload(kfile):
if file_name:
n = file_name
else:
n = "" if not hasattr(kfile, "name") else basename(kfile.name)
f = {"file": (n, kfile, mime_type) if mime_type else (n, kfile)}
r = api._request("FILE", url, params_or_data=f)
return n, r
resp = None
if isinstance(file_or_path, str):
with open(file_or_path, "rb") as f:
name, resp = _upload(f)
else:
name, resp = _upload(file_or_path)
uploaded = None
if resp.ok:
body = resp.json()
if "fileKey" in body:
uploaded = File(name=name, file_key=body["fileKey"])
else:
print(resp.json())
return uploaded
|
11506136 | from collections import OrderedDict
from io import BytesIO
import numpy
from numpy.testing import assert_raises, assert_equal
from PIL import Image
from picklable_itertools.extras import partition_all
from six.moves import zip
from fuel import config
from fuel.datasets.base import IndexableDataset
from fuel.schemes import ShuffledScheme, SequentialExampleScheme
from fuel.streams import DataStream
from fuel.transformers.image import (ImagesFromBytes,
MinimumImageDimensions,
RandomFixedSizeCrop,
Random2DRotation)
def reorder_axes(shp):
if len(shp) == 3:
shp = (shp[-1],) + shp[:-1]
elif len(shp) == 2:
shp = (1,) + shp
return shp
class ImageTestingMixin(object):
def common_setup(self):
ex_scheme = SequentialExampleScheme(self.dataset.num_examples)
self.example_stream = DataStream(self.dataset,
iteration_scheme=ex_scheme)
self.batch_size = 2
scheme = ShuffledScheme(self.dataset.num_examples,
batch_size=self.batch_size)
self.batch_stream = DataStream(self.dataset, iteration_scheme=scheme)
class TestImagesFromBytes(ImageTestingMixin):
def setUp(self):
rng = numpy.random.RandomState(config.default_seed)
self.shapes = [
(10, 12, 3),
(9, 8, 4),
(12, 14, 3),
(4, 7),
(9, 8, 4),
(7, 9, 3)
]
pil1 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[0])
.astype('uint8'), mode='RGB')
pil2 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[1])
.astype('uint8'), mode='CMYK')
pil3 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[2])
.astype('uint8'), mode='RGB')
pil4 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[3])
.astype('uint8'), mode='L')
pil5 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[4])
.astype('uint8'), mode='RGBA')
pil6 = Image.fromarray(rng.random_integers(0, 255,
size=self.shapes[5])
.astype('uint8'), mode='YCbCr')
source1 = [pil1, pil2, pil3]
source2 = [pil4, pil5, pil6]
bytesio1 = [BytesIO() for _ in range(3)]
bytesio2 = [BytesIO() for _ in range(3)]
formats1 = ['PNG', 'JPEG', 'BMP']
formats2 = ['GIF', 'PNG', 'JPEG']
for s, b, f in zip(source1, bytesio1, formats1):
s.save(b, format=f)
for s, b, f in zip(source2, bytesio2, formats2):
s.save(b, format=f)
self.dataset = IndexableDataset(
OrderedDict([('source1', [b.getvalue() for b in bytesio1]),
('source2', [b.getvalue() for b in bytesio2])]),
axis_labels={'source1': ('batch', 'bytes'),
'source2': ('batch', 'bytes')})
self.common_setup()
def test_images_from_bytes_example_stream(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source1', 'source2'),
color_mode=None)
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
s1_shape = set(s.shape for s in s1)
s2_shape = set(s.shape for s in s2)
actual_s1 = set(reorder_axes(s) for s in self.shapes[:3])
actual_s2 = set(reorder_axes(s) for s in self.shapes[3:])
assert actual_s1 == s1_shape
assert actual_s2 == s2_shape
def test_images_from_bytes_batch_stream(self):
stream = ImagesFromBytes(self.batch_stream,
which_sources=('source1', 'source2'),
color_mode=None)
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
s1 = sum(s1, [])
s2 = sum(s2, [])
s1_shape = set(s.shape for s in s1)
s2_shape = set(s.shape for s in s2)
actual_s1 = set(reorder_axes(s) for s in self.shapes[:3])
actual_s2 = set(reorder_axes(s) for s in self.shapes[3:])
assert actual_s1 == s1_shape
assert actual_s2 == s2_shape
def test_images_from_bytes_example_stream_convert_rgb(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source1'),
color_mode='RGB')
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
actual_s1_gen = (reorder_axes(s) for s in self.shapes[:3])
actual_s1 = set((3,) + s[1:] for s in actual_s1_gen)
s1_shape = set(s.shape for s in s1)
assert actual_s1 == s1_shape
def test_images_from_bytes_example_stream_convert_l(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2'),
color_mode='L')
s1, s2 = list(zip(*list(stream.get_epoch_iterator())))
actual_s2_gen = (reorder_axes(s) for s in self.shapes[3:])
actual_s2 = set((1,) + s[1:] for s in actual_s2_gen)
s2_shape = set(s.shape for s in s2)
assert actual_s2 == s2_shape
def test_axis_labels(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2',))
assert stream.axis_labels['source1'] == ('bytes',)
assert stream.axis_labels['source2'] == ('channel', 'height',
'width')
bstream = ImagesFromBytes(self.batch_stream,
which_sources=('source1',))
assert bstream.axis_labels['source1'] == ('batch', 'channel', 'height',
'width')
assert bstream.axis_labels['source2'] == ('batch', 'bytes')
def test_bytes_type_exception(self):
stream = ImagesFromBytes(self.example_stream,
which_sources=('source2',))
assert_raises(TypeError, stream.transform_source_example, 54321,
'source2')
class TestMinimumDimensions(ImageTestingMixin):
def setUp(self):
rng = numpy.random.RandomState(config.default_seed)
source1 = []
source2 = []
source3 = []
self.shapes = [(5, 9), (4, 6), (4, 3), (6, 4), (2, 5), (4, 8), (8, 3)]
for i, shape in enumerate(self.shapes):
source1.append(rng.normal(size=shape))
source2.append(rng.normal(size=shape[::-1]))
source3.append(rng.random_integers(0, 255, size=(3,) + shape)
.astype('uint8'))
self.dataset = IndexableDataset(OrderedDict([('source1', source1),
('source2', source2),
('source3', source3)]),
axis_labels={'source1':
('batch', 'channel',
'height', 'width'),
'source3':
('batch', 'channel',
'height', 'width')})
self.common_setup()
def test_minimum_dimensions_example_stream(self):
stream = MinimumImageDimensions(self.example_stream, (4, 5),
which_sources=('source1',
'source3'))
it = stream.get_epoch_iterator()
for example, shp in zip(it, self.shapes):
assert example[0].shape[0] >= 4 and example[0].shape[1] >= 5
assert (example[1].shape[1] == shp[0] and
example[1].shape[0] == shp[1])
assert example[2].shape[0] == 3
assert example[2].shape[1] >= 4 and example[2].shape[2] >= 5
def test_minimum_dimensions_batch_stream(self):
stream = MinimumImageDimensions(self.batch_stream, (4, 5),
which_sources=('source1',))
it = stream.get_epoch_iterator()
for batch, shapes in zip(it, partition_all(self.batch_size,
self.shapes)):
assert (example.shape[0] >= 4 and example.shape[1] >= 5
for example in batch[0])
assert (example.shape[1] == shp[0] and
example.shape[0] == shp[1]
for example, shp in zip(batch[1], shapes))
def test_axes_exception(self):
stream = MinimumImageDimensions(self.example_stream, (4, 5),
which_sources=('source1',))
assert_raises(NotImplementedError,
stream.transform_source_example,
numpy.empty((2, 3, 4, 2)),
'source1')
def test_resample_exception(self):
assert_raises(ValueError,
MinimumImageDimensions, self.example_stream, (4, 5),
resample='notarealresamplingmode')
class TestFixedSizeRandomCrop(ImageTestingMixin):
def setUp(self):
source1 = numpy.zeros((9, 3, 7, 5), dtype='uint8')
source1[:] = numpy.arange(3 * 7 * 5, dtype='uint8').reshape(3, 7, 5)
shapes = [(5, 9), (6, 8), (5, 6), (5, 5), (6, 4), (7, 4),
(9, 4), (5, 6), (6, 5)]
source2 = []
biggest = 0
num_channels = 2
for shp in shapes:
biggest = max(biggest, shp[0] * shp[1] * 2)
ex = numpy.arange(shp[0] * shp[1] * num_channels).reshape(
(num_channels,) + shp).astype('uint8')
source2.append(ex)
self.source2_biggest = biggest
axis_labels = {'source1': ('batch', 'channel', 'height', 'width'),
'source2': ('batch', 'channel', 'height', 'width')}
self.dataset = IndexableDataset(OrderedDict([('source1', source1),
('source2', source2)]),
axis_labels=axis_labels)
self.common_setup()
def test_ndarray_batch_source(self):
# Make sure that with enough epochs we sample everything.
stream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source1',))
seen_indices = numpy.array([], dtype='uint8')
for i in range(30):
for batch in stream.get_epoch_iterator():
assert batch[0].shape[1:] == (3, 5, 4)
assert batch[0].shape[0] in (1, 2)
seen_indices = numpy.union1d(seen_indices, batch[0].flatten())
if 3 * 7 * 5 == len(seen_indices):
break
else:
assert False
def test_list_batch_source(self):
# Make sure that with enough epochs we sample everything.
stream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source2',))
seen_indices = numpy.array([], dtype='uint8')
for i in range(30):
for batch in stream.get_epoch_iterator():
for example in batch[1]:
assert example.shape == (2, 5, 4)
seen_indices = numpy.union1d(seen_indices,
example.flatten())
assert len(batch[1]) in (1, 2)
if self.source2_biggest == len(seen_indices):
break
else:
assert False
def test_format_exceptions(self):
estream = RandomFixedSizeCrop(self.example_stream, (5, 4),
which_sources=('source2',))
bstream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source2',))
assert_raises(ValueError, estream.transform_source_example,
numpy.empty((5, 6)), 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((7, 6))], 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((8, 6))], 'source2')
def test_window_too_big_exceptions(self):
stream = RandomFixedSizeCrop(self.example_stream, (5, 4),
which_sources=('source2',))
assert_raises(ValueError, stream.transform_source_example,
numpy.empty((3, 4, 2)), 'source2')
bstream = RandomFixedSizeCrop(self.batch_stream, (5, 4),
which_sources=('source1',))
assert_raises(ValueError, bstream.transform_source_batch,
numpy.empty((5, 3, 4, 2)), 'source1')
class TestRandom2DRotation(ImageTestingMixin):
def setUp(self):
source1 = numpy.zeros((2, 3, 4, 6), dtype='uint8')
source1[:] = numpy.arange(3 * 4 * 6, dtype='uint8').reshape((3, 4, 6))
source2 = numpy.empty(2, dtype=object)
source2[0] = numpy.arange(3 * 4 * 6, dtype='uint8').reshape((3, 4, 6))
source2[1] = numpy.arange(3 * 4 * 7, dtype='uint8').reshape((3, 4, 7))
source3 = [source2[0], source2[1]]
self.source1 = source1
self.source2 = source2
self.source3 = source3
axis_labels = {'source1': ('batch', 'channel', 'height', 'width'),
'source2': ('batch', 'channel', 'height', 'width'),
'source3': ('batch', 'channel', 'height', 'width')}
self.dataset = \
IndexableDataset(OrderedDict([('source1', source1),
('source2', source2),
('source3', source3)]),
axis_labels=axis_labels)
self.common_setup()
def test_format_exceptions(self):
estream = Random2DRotation(self.example_stream,
which_sources=('source2',))
bstream = Random2DRotation(self.batch_stream,
which_sources=('source2',))
assert_raises(ValueError, estream.transform_source_example,
numpy.empty((5, 6)), 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((7, 6))], 'source2')
assert_raises(ValueError, bstream.transform_source_batch,
[numpy.empty((8, 6))], 'source2')
def test_maximum_rotation_invalid_exception(self):
assert_raises(ValueError, Random2DRotation, self.example_stream,
maximum_rotation=0.0,
which_sources=('source2',))
assert_raises(ValueError, Random2DRotation, self.example_stream,
maximum_rotation=3.1416,
which_sources=('source2',))
def test_invalid_resample_exception(self):
assert_raises(ValueError, Random2DRotation, self.example_stream,
resample='nonexisting')
def test_random_2D_rotation_example_stream(self):
maximum_rotation = 0.5
rng = numpy.random.RandomState(123)
estream = Random2DRotation(self.example_stream,
maximum_rotation,
rng=rng,
which_sources=('source1',))
# the C x X x Y image should have equal rotation for all c in C
out = estream.transform_source_example(self.source1[0], 'source1')
expected = numpy.array([[[0, 1, 2, 3, 4, 11],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[12, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 35],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[36, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 59],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[60, 67, 68, 69, 70, 71]]], dtype="uint8")
assert_equal(out, expected)
def test_random_2D_rotation_batch_stream(self):
rng = numpy.random.RandomState(123)
bstream = Random2DRotation(self.batch_stream,
maximum_rotation=0.5,
rng=rng,
which_sources=('source1',))
# each C x X x Y image should have equal rotation for all c in C
out = bstream.transform_source_batch(self.source1, 'source1')
expected = numpy.array([[[[0, 1, 2, 3, 4, 11],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[12, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 35],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[36, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 59],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[60, 67, 68, 69, 70, 71]]],
[[[6, 1, 2, 3, 4, 5],
[12, 7, 8, 9, 10, 5],
[18, 13, 14, 15, 16, 11],
[18, 19, 20, 21, 22, 17]],
[[30, 25, 26, 27, 28, 29],
[36, 31, 32, 33, 34, 29],
[42, 37, 38, 39, 40, 35],
[42, 43, 44, 45, 46, 41]],
[[54, 49, 50, 51, 52, 53],
[60, 55, 56, 57, 58, 53],
[66, 61, 62, 63, 64, 59],
[66, 67, 68, 69, 70, 65]]]], dtype='uint8')
assert_equal(out, expected)
expected = \
[numpy.array([[[0, 1, 2, 3, 4, 11],
[6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[12, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 35],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[36, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 59],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[60, 67, 68, 69, 70, 71]]], dtype='uint8'),
numpy.array([[[7, 1, 2, 3, 4, 5, 0],
[14, 8, 9, 10, 11, 12, 6],
[21, 15, 16, 17, 18, 19, 13],
[0, 22, 23, 24, 25, 26, 20]],
[[35, 29, 30, 31, 32, 33, 0],
[42, 36, 37, 38, 39, 40, 34],
[49, 43, 44, 45, 46, 47, 41],
[0, 50, 51, 52, 53, 54, 48]],
[[63, 57, 58, 59, 60, 61, 0],
[70, 64, 65, 66, 67, 68, 62],
[77, 71, 72, 73, 74, 75, 69],
[0, 78, 79, 80, 81, 82, 76]]], dtype='uint8')]
rng = numpy.random.RandomState(123)
bstream = Random2DRotation(self.batch_stream,
maximum_rotation=0.5,
rng=rng,
which_sources=('source2',))
out = bstream.transform_source_batch(self.source2, 'source2')
assert_equal(out[0], expected[0])
assert_equal(out[1], expected[1])
rng = numpy.random.RandomState(123)
bstream = Random2DRotation(self.batch_stream,
maximum_rotation=0.5,
rng=rng,
which_sources=('source3',))
out = bstream.transform_source_batch(self.source3, 'source3')
assert_equal(out[0], expected[0])
assert_equal(out[1], expected[1])
|
11506162 | from django.conf.urls import url, include
urlpatterns = [
# adds all DMP-enabled apps
url('', include('django_mako_plus.urls')),
]
|
11506182 | import requests
import json
import random
import os
import stanza
nlp = stanza.Pipeline(lang='en', processors='tokenize')
agent_pool = {"plato": "http://127.0.0.1:8082/interact", "blender": "http://127.0.0.1:8080/interact", "dialoflow": "http://127.0.0.1:8089/interact", "dialogpt": "http://127.0.0.1:8086/interact"}
data = {}
start_utterance = "hello"
class PlatoAgent:
def __init__(self, userid):
self.userid = userid
self.url = agent_pool["plato"]
def act(self, text, replace=False):
data = json.dumps({"userID": self.userid, "text": text, "replace": replace})
r = requests.post(self.url, data=data)
text = json.loads(r.text)['body']['utterance']
return text
class DialoFlowAgent:
def __init__(self, userid):
self.userid = userid
self.url = agent_pool["dialoflow"]
def act(self, text, replace=False):
data = json.dumps({"userID": self.userid, "text": text, "replace": replace})
r = requests.post(self.url, data=data)
text = json.loads(r.text)['body']['utterance']
return text
class DialoGPTAgent:
def __init__(self, userid):
self.userid = userid
self.url = agent_pool["dialogpt"]
def act(self, text, replace=False):
data = json.dumps({"userID": self.userid, "text": text, "replace": replace})
r = requests.post(self.url, data=data)
text = json.loads(r.text)['body']['utterance']
return text
class BlenderAgent:
def __init__(self, userid):
self.userid = userid
self.url = agent_pool["blender"]
def act(self, text, replace=False):
if replace:
data = text+self.userid + "*"
else:
data = text+self.userid
r = requests.post(self.url, data=data.encode("utf-8"))
text = json.loads(r.text)["text"]
return text
def gen_q(text):
data = json.dumps({"text": text})
url = "http://127.0.0.1:8084/gen"
r = requests.post(url, data=data)
text = json.loads(r.text)['body']['text']
return text
def nli(res, res_gold):
data = json.dumps({"res": res, "res_gold": res_gold})
url = "http://127.0.0.1:8085/NLI"
r = requests.post(url, data=data)
score = json.loads(r.text)['nli_score']
return score
PLAY_NUM = 1000
TURN = 15
METHOD = "GEN"
agent_name_pool = list(agent_pool.keys())
for i in range(PLAY_NUM):
userid = random.randrange(100000, 999997)
userid1 = str(userid+1)
userid2 = str(userid+2)
if userid in data.keys():
continue
else:
data[userid] = []
agent1_name = random.choice(agent_name_pool)
if agent1_name == "plato":
agent1 = PlatoAgent(userid1)
elif agent1_name == "blender":
agent1 = BlenderAgent(userid1)
elif agent1_name == "dialogpt":
agent1 = DialoGPTAgent(userid1)
elif agent1_name == "dialoflow":
agent1 = DialoFlowAgent(userid1)
agent2_name = random.choice(agent_name_pool)
if agent2_name == "plato":
agent2 = PlatoAgent(userid2)
elif agent2_name == "blender":
agent2 = BlenderAgent(userid2)
elif agent2_name == "dialogpt":
agent2 = DialoGPTAgent(userid2)
elif agent2_name == "dialoflow":
agent2 = DialoFlowAgent(userid2)
r1 = None
r2 = None
questions = []
for j in range(TURN):
if j == 0:
r1 = start_utterance
data[userid].append(r1+"\n")
else:
r1 = agent1.act(r2)
print(r1)
data[userid].append(r1+"\n")
r2 = agent2.act(r1)
print(r2)
data[userid].append(r2+"\n")
doc = nlp(r2)
clean_r2 = []
for k in doc.sentences:
if "?" not in k.text:
clean_r2.append(k.text)
if len(clean_r2) == 0:
continue
q = gen_q(" ".join(clean_r2))
print(" ".join(clean_r2), q)
if len(q) > 0 and j > 0:
q = random.choice(q)
if len(q[0].strip()) > 0:
data[userid].append("\t" + METHOD + ": " + q[0] + "\n")
temp_r2 = agent2.act(q[0], replace=True)
score = " ".join([str(x) for x in nli(temp_r2, q[1])])
print(METHOD, temp_r2, score)
data[userid].append("\t" + METHOD + ": " + temp_r2 + "\t" + q[1] + "\t" + score + "\n")
if not os.path.exists(METHOD + "/" + agent1_name + "_" + agent2_name):
os.mkdir(METHOD + "/" + agent1_name + "_" + agent2_name)
with open(METHOD + "/" + agent1_name + "_" + agent2_name + '/' + str(userid), "w") as f:
f.writelines(data[userid])
|
11506199 | from enum import Enum
class SupportedModels(Enum):
UNKNOWN = 0
PATIENT = 1
OBSERVATION = 2
PROCEDURE = 3
APPOINTMENT = 4
# When changing this don't forget to change mapper.py as well |
11506241 | import io
import os
import pytest
import json
from CommonServerPython import DemistoException
from test_data import input_data
from unittest import mock
BASE_URL = 'https://mocked_url/v1/'
URL_SUFFIX = {
"REPORTS": "reports",
"PROGRAMS": "me/programs"
}
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
@pytest.fixture()
def client():
from HackerOne import Client
return Client("https://mocked_url/v1", False, False, auth=("user", "user123"), max_fetch=1,
first_fetch="2020-09-07T04:59:51Z",
program_handle=["checker_program_h1b"], severity="", state="", filters="")
def test_test_module_when_valid_response_is_returned(client, requests_mock):
"""
Test test_module function for success cases.
Given
- A valid response
When
- The status code returned is 200
Then
- Ensure test module should return success
"""
from HackerOne import test_module
requests_mock.get(BASE_URL + URL_SUFFIX["PROGRAMS"], status_code=200, json="{}")
assert test_module(client) == 'ok'
def test_test_module_when_isfetch_is_true(requests_mock, client):
"""
Test test_module function when isFetch is True.
Given
- A valid response
When
- The status code returned is 200 and is_fetch is true
Then
- Ensure test module should return success
"""
from HackerOne import test_module
requests_mock.get(BASE_URL + URL_SUFFIX["PROGRAMS"], json={"page_size": 1}, status_code=200)
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json={"filter[program][]": ["abc"]}, status_code=200)
assert test_module(client) == 'ok'
def test_test_module_when_authentication_error_is_returned(requests_mock, client):
"""
Test test_module function for failure cases.
Given
- an error status code
When
- the user can't be authenticated
Then
- raise DemistoException
"""
from HackerOne import test_module
requests_mock.get(BASE_URL + URL_SUFFIX["PROGRAMS"], status_code=400, json={})
with pytest.raises(DemistoException):
test_module(client)
@pytest.mark.parametrize("status_code, error_msg, expected_error_message", input_data.exception_handler_params)
def test_exception_handler_json(status_code, error_msg, expected_error_message, client):
"""
To test exception handler in various http error code.
Given
- a dictionary containing http error code.
When
- initializing client.
Then
- raise DemistoException
"""
mocked_response = mock.Mock()
mocked_response.status_code = status_code
mocked_response.json.return_value = error_msg
mocked_response.headers = {'Content-Type': "application/json"}
with pytest.raises(DemistoException) as err:
client.exception_handler(mocked_response)
assert str(err.value) == expected_error_message
def test_exception_handler_not_json(client):
"""
To test exception handler in various http error code.
Given
- 423 error code
When
- initializing client.
Then
- raise DemistoException
"""
status_code = 423
error_msg = "<html>\n<head><title>423 Invalid</title></head>\n<body>\n<center><h1>423 Invalid</h1></center>" \
"\n<hr><center>nginx</center>\n</body>\n</html>"
expected_error_message = "Unable to retrieve the data based on arguments."
mocked_response = mock.Mock()
mocked_response.status_code = status_code
mocked_response.json.return_value = error_msg
mocked_response.headers = {'Content-Type': "text/html"}
with pytest.raises(DemistoException) as err:
client.exception_handler(mocked_response)
assert str(err.value) == expected_error_message
def test_hackerone_program_list_command_when_valid_response_is_returned(client, requests_mock):
"""
Test case scenario for successful execution of hackerone-program-list command.
Given:
- command arguments for list program command
When:
- Calling `hackerone-program-list` command
Then:
- Returns the response data
"""
from HackerOne import hackerone_program_list_command
response = util_load_json(
os.path.join("test_data", "program/program_command_response.json"))
requests_mock.get(BASE_URL + URL_SUFFIX["PROGRAMS"], json=response, status_code=200)
context_output = util_load_json(
os.path.join("test_data", "program/program_command_context.json"))
with open(os.path.join("test_data", "program/program_command_readable_output.md"), 'r') as f:
readable_output = f.read()
# Execute
command_response = hackerone_program_list_command(client, {})
# Assert
assert command_response.outputs_prefix == 'HackerOne.Program'
assert command_response.outputs_key_field == "id"
assert command_response.outputs == context_output
assert command_response.readable_output == readable_output
def test_hackerone_program_list_command_when_empty_response_is_returned(client, requests_mock):
"""
Test case scenario for successful execution of hackerone-program-list command with an empty response.
Given:
- command arguments for list program command
When:
- Calling `hackerone-program-list` command
Then:
- Returns no records for the given input arguments
"""
from HackerOne import hackerone_program_list_command
expected_response = {"data": [], "links": []}
requests_mock.get(BASE_URL + URL_SUFFIX["PROGRAMS"], status_code=200, json=expected_response)
readable_output = "No programs were found for the given argument(s)."
# Execute
command_response = hackerone_program_list_command(client, {})
# Assert
assert command_response.readable_output == readable_output
@pytest.mark.parametrize("args, expected_error", input_data.invalid_args_for_program_list)
def test_hackerone_program_list_command_when_invalid_args_provided(client, args, expected_error):
"""
Test case scenario when invalid arguments are provided.
Given:
- invalid command arguments for list program command
When
- Calling `hackerone-program-list`
Then:
- Returns the response message of invalid input arguments
"""
from HackerOne import hackerone_program_list_command
with pytest.raises(ValueError) as err:
hackerone_program_list_command(client, args)
assert str(err.value) == expected_error
@pytest.mark.parametrize("args, expected_params", input_data.report_list_args)
def test_validate_report_list_args_when_valid_args_are_provided(args, expected_params):
"""
Test case scenario when report list valid arguments are provided.
Given:
- valid command arguments for list report command
When
- Calling `prepare_report_list_args`
Then:
- Returns the expected params.
"""
from HackerOne import prepare_report_list_args
assert prepare_report_list_args(args) == expected_params
def test_hackerone_report_list_command_when_empty_response_is_returned(client, requests_mock):
"""
Test case scenario for successful execution of hackerone-report-list command with an empty response.
Given:
- command arguments for list report command
When:
- Calling `hackerone-report-list` command
Then:
- Returns no records for the given input arguments
"""
from HackerOne import hackerone_report_list_command
expected_response = {"data": [], "links": []}
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=expected_response, status_code=200)
readable_output = "No reports were found for the given argument(s)."
# Execute
command_response = hackerone_report_list_command(client, {"program_handle": "abc"})
# Assert
assert command_response.readable_output == readable_output
def test_hackerone_report_list_command_when_valid_response_is_returned(client, requests_mock):
"""
Test case scenario for successful execution of hackerone-report-list command.
Given:
- command arguments for list report command
When:
- Calling `hackerone-report-list` command
Then:
- Returns the response data
"""
from HackerOne import hackerone_report_list_command
response = util_load_json(
os.path.join("test_data", "report/report_command_response.json"))
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=response, status_code=200)
context_output = util_load_json(
os.path.join("test_data", "report/report_command_context.json"))
with open(os.path.join("test_data", "report/report_command_readable_output.md"), 'r') as f:
readable_output = f.read()
# Execute
command_response = hackerone_report_list_command(client, {"program_handle": "abc"})
# Assert
assert command_response.outputs_prefix == 'HackerOne.Report'
assert command_response.outputs_key_field == "id"
assert command_response.outputs == context_output
assert command_response.readable_output == readable_output
@pytest.mark.parametrize("args, expected_error", input_data.invalid_args_for_report_list)
def test_hackerone_report_list_command_when_invalid_args_provided(client, args, expected_error):
"""
Test case scenario when invalid arguments for report list command are provided.
Given:
- invalid command arguments for list report command
When
- Calling `hackerone-report-list`
Then:
- Returns the response message of invalid input arguments
"""
from HackerOne import hackerone_report_list_command
with pytest.raises(ValueError) as err:
hackerone_report_list_command(client, args)
assert str(err.value) == expected_error
def test_fetch_incident_when_empty_result_is_returned(client, requests_mock):
"""
test case scenario when the results are empty.
Given:
- Fetch incident parameters
When:
- Fetching incidents.
Then:
- Returns empty response for first time
"""
from HackerOne import fetch_incidents
last_run = {'current_created_at': '2020-09-07T04:59:51', 'next_page': 2}
expected_response = {"data": [], "links": []}
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=expected_response, status_code=200)
fetched_incidents = fetch_incidents(client, last_run)
expected_next_run = {'current_created_at': '2020-09-07T04:59:51', 'next_page': 2}
assert fetched_incidents == (expected_next_run, [])
def test_fetch_incident_when_valid_result_is_returned(client, requests_mock):
"""
test case scenario when the results are valid on fetching for the first time.
Given:
- Fetch incident parameters
When:
- Fetching incidents.
Then:
- Ensure that the incidents returned are as expected.
"""
from HackerOne import fetch_incidents
last_run = {}
incident_data = util_load_json(
os.path.join("test_data", "incident/raw_response.json"))
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=incident_data, status_code=200)
fetched_incidents = fetch_incidents(client, last_run)
next_run = {'next_page': 1,
'next_created_at': '2021-08-09T13:41:38.039Z',
'report_ids': ['1295856']}
incidents = [
{
"name": incident_data.get("data")[0].get("attributes").get("title"),
"occurred": incident_data.get("data")[0].get("attributes").get("created_at"),
"rawJSON": json.dumps(incident_data.get("data")[0])
}
]
assert fetched_incidents == (next_run, incidents)
def test_fetch_incident_when_getting_already_fetched_report(client, requests_mock):
"""
test case scenario when the results are valid on fetching for the first time.
Given:
- Fetch incident parameters
When:
- Fetching incidents.
Then:
- Ensure that these reports are already fetched previously.
"""
from HackerOne import fetch_incidents
incident_data = util_load_json(
os.path.join("test_data", "incident/raw_response.json"))
last_run = {'next_page': 1, 'next_created_at': '2021-08-09T13:41:38.039Z',
'report_ids': ['1295856']}
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=incident_data, status_code=200)
fetched_incidents = fetch_incidents(client, last_run)
next_run = {'next_page': 2, 'next_created_at': '2021-08-09T13:41:38.039Z',
'report_ids': ['1295856']}
assert fetched_incidents == (next_run, [])
def test_fetch_incident_when_report_ids_should_be_replaced(client, requests_mock):
"""
Test case scenario when report ids are replaced
Given:
- Fetch incident parameters
When:
- Fetching incidents.
Then:
- Ensure that the report ids are replaced.
"""
from HackerOne import fetch_incidents
incident_data = util_load_json(
os.path.join("test_data", "incident/raw_response.json"))
last_run = {'next_page': 2,
'next_created_at': '2020-09-07T04:59:51Z',
'report_ids': ['1295852']}
requests_mock.get(BASE_URL + URL_SUFFIX["REPORTS"], json=incident_data, status_code=200)
fetched_incidents = fetch_incidents(client, last_run)
next_run = {'next_page': 1,
'next_created_at': '2021-08-09T13:41:38.039Z',
'report_ids': ['1295856']}
incidents = [
{
"name": incident_data.get("data")[0].get("attributes").get("title"),
"occurred": incident_data.get("data")[0].get("attributes").get("created_at"),
"rawJSON": json.dumps(incident_data.get("data")[0])
}
]
assert fetched_incidents == (next_run, incidents)
@pytest.mark.parametrize("max_fetch, first_fetch, program_handle, severity, state, filters, page, expected_params",
input_data.valid_params_for_fetch_incidents)
def test_fetch_incident_when_valid_params_are_provided(max_fetch, first_fetch, program_handle, severity, state, filters,
page,
expected_params):
"""
test case scenario when valid parameters are provided for fetching the incidents.
Given:
- Valid fetch incident parameters
When:
- Fetching incidents.
Then:
- Prepare params to fetch incidents
"""
from HackerOne import prepare_fetch_incidents_parameters
assert prepare_fetch_incidents_parameters(max_fetch, first_fetch, program_handle, severity, state,
filters, page) == expected_params
@pytest.mark.parametrize("max_fetch, program_handle,filters, expected_error_msg",
input_data.invalid_params_for_fetch_incidents)
def test_fetch_incident_when_invalid_params_are_provided(max_fetch, program_handle, filters,
expected_error_msg):
"""
test case scenario when invalid parameters are provided for fetching the incidents.
Given:
- Invalid fetch incident parameters
When:
- Fetching incidents.
Then:
- Returns error for invalid arguments
"""
from HackerOne import validate_fetch_incidents_parameters
with pytest.raises(ValueError) as err:
validate_fetch_incidents_parameters(max_fetch, program_handle, filters)
assert str(err.value) == expected_error_msg
|
11506287 | import module
import config as CFG
import re
import zlib
REGEX = re.compile('^exec(\s+-o(\s+[\w.]+)?)?\s+(("[^"]+")\s+)+$')
EXEC = 'exec'
RECON = 'recon'
USAGE = """Execute commands on target.
usage: exec [-o [filename]] "cmd1" ["cmd2" "cmd3" ...]
\nExecute given commands and optionally log to file with optional filename.
\noptions:
-h\t\tshow help
-o filename\twrite results to file in {}/'.""".format(CFG.ARCHIVE_DIR)
@module.server_handler(EXEC)
def server_exec(server, argv):
# extra space is for regex
if len(argv) < 2 or argv[1] in ('-h', '--help') or not REGEX.match(' '.join(argv) + ' '):
print USAGE
return
try:
preproc = preprocess(argv)
except Exception:
print USAGE
return
server.generic(*preproc)
@module.client_handler(EXEC)
def client_exec(client, inp):
"""Handle server `exec' command.
Execute specially formatted input string and return specially formatted
response.
"""
client.s.send(execute(client, ' '.join(inp.split()[1:])))
@module.server_handler(RECON)
def server_recon(server, argv):
if '-h' in argv or '--help' in argv:
print USAGE
return
argc = len(argv)
if argc == 1:
server.generic(RECON)
elif '-o' in argv:
if argc == 2:
server.generic(RECON, True)
elif argc == 3:
server.generic(RECON, True, argv[2])
else:
print USAGE
else:
print USAGE
@module.client_handler(RECON)
def client_recon(client, inp):
ipcmd = 'ip addr' if 'no' in client.cmd_exec('which ifconfig') else 'ifconfig'
exec_str = '"whoami" "id" "uname -a" "lsb_release -a" "{}" "w" "who -a"'.format(ipcmd)
client.s.send(zlib.compress(execute(client, exec_str)))
def execute(client, exec_str):
out = ''
cmds = parse_exec_cmds(exec_str)
for cmd in cmds:
cmd_out = client.cmd_exec(cmd)
out += '='*20 + '\n\n$ {}\n{}\n'.format(cmd, cmd_out)
return out
def preprocess(argv):
"""Parse posh `exec' command line.
Args:
inp: raw `exec' command line
Returns:
Tuple suitable for expansion into as self.generic() parameters.
"""
write_file = None
write_flag = argv[1] == '-o'
if write_flag:
if len(argv) == 2:
# it was just "exec -o"
raise Exception
if '"' not in argv[2]:
write_file = argv[2]
del argv[2]
del argv[1]
argv = ' '.join(argv)
return argv, write_flag, write_file
def parse_exec_cmds(inp):
"""Parse string provided by server `exec' command.
Convert space delimited string with commands to execute in quotes, for
example ("ls -l" "cat /etc/passwd") into list with commands as strings.
Returns:
List of commands to execute.
"""
if inp.count('"') == 2:
return [inp[1:-1]]
else:
# server side regex guarantees that these quotes will be in the
# correct place -- the space between two commands
third_quote = inp.find('" "') + 2
first_cmd = inp[:third_quote-1]
rest = inp[third_quote:]
return [first_cmd[1:-1]] + parse_exec_cmds(rest)
|
11506333 | from torchero.utils.data.cross_fold_validation import (CrossFoldValidation,
train_test_split)
from torchero.utils.data.datasets import *
|
11506334 | from __future__ import absolute_import, division, print_function
from fractions import Fraction
import sys
sys.path.insert(0, '../')
from constructs import *
from expression import *
def test_affine():
N = Parameter(UInt, "N")
x = Variable(UInt, "x")
y = Variable(UInt, "y")
assert(isAffine(x + y) == True)
assert(isAffine(3) == True)
assert(isAffine(x*y) == False)
assert(isAffine(-x + N + 3*y) == True)
assert(isAffine(2*x + N/2 + 3*y) == True)
c1 = Condition(x, '<', 2*y)
c2 = Condition(x, '>', 2-y)
c3 = Condition(x, '>=', x*y)
c4 = Condition(x + 2*N, '<=', y + N)
c5 = Condition(x*N, '!=', y)
assert(isAffine(c1) == True)
assert(isAffine(c2) == True)
assert(isAffine(c3) == False)
assert(isAffine(c4) == True)
assert(isAffine(c5) == False)
def test_coeff():
N = Parameter(UInt, "N")
x = Variable(UInt, "x")
y = Variable(UInt, "y")
coeff = get_affine_var_and_param_coeff(1+x)
assert(coeff[x] == 1)
coeff = get_affine_var_and_param_coeff(1+x +y)
assert(coeff[x] == 1 and coeff[y] == 1)
coeff = get_affine_var_and_param_coeff(3)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(N*x + y)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(x*y)
assert(coeff == {})
coeff = get_affine_var_and_param_coeff(2*(x*3+y +N +x + y -5)
+ 3*(-x) + 4*(-y) + N)
assert(coeff[x] == 5 and coeff[y] == 0 and coeff[N] == 3)
|
11506346 | import copy
import datetime
import json
import logging
from functools import wraps
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import backoff
import dateutil
import dateutil.tz
import requests
from requests.structures import CaseInsensitiveDict
from annofabapi.models import Task, TaskHistory, TaskHistoryShort, TaskPhase
#########################################
# Private Method
#########################################
def _raise_for_status(response: requests.Response) -> None:
"""
HTTP Status CodeがErrorの場合、``requests.exceptions.HTTPError`` を発生させる。
そのとき ``response.text`` もHTTPErrorに加えて、HTTPError発生時にエラーの原因が分かるようにする。
Args:
response: Response
Raises:
requests.exceptions.HTTPError:
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
http_error_msg = f"{e.args[0]} , {response.text}"
e.args = (http_error_msg,)
raise e
def _log_error_response(arg_logger: logging.Logger, response: requests.Response) -> None:
"""
HTTP Statusが400以上ならば、loggerにresponse/request情報を出力する
Args:
arg_logger: logger
response: Response
"""
RequestBodyHeader = Union[Dict[str, Any], CaseInsensitiveDict]
def mask_key(d: RequestBodyHeader, key: str) -> RequestBodyHeader:
if key in d:
d[key] = "***"
return d
def mask_password(d: RequestBodyHeader) -> RequestBodyHeader:
d = mask_key(d, "password")
d = mask_key(d, "old_password")
d = mask_key(d, "new_password")
return d
if 400 <= response.status_code < 600:
headers = copy.deepcopy(response.request.headers)
arg_logger.debug("status_code = %s, response.text = %s", response.status_code, response.text)
arg_logger.debug("request.url = %s %s", response.request.method, response.request.url)
# logにAuthorizationを出力しないようにマスクする
mask_key(headers, "Authorization")
arg_logger.debug("request.headers = %s", headers)
# request_bodyのpassword関係をマスクして、logに出力する
if response.request.body is None or response.request.body == "":
dict_request_body = {}
else:
dict_request_body = json.loads(response.request.body)
arg_logger.debug("request.body = %s", mask_password(dict_request_body))
def _download(url: str, dest_path: str) -> requests.Response:
"""
HTTP GETで取得した内容をファイルに保存する(ダウンロードする)
Args:
url: ダウンロード対象のURL
dest_path: 保存先ファイルのパス
Returns:
URLにアクセスしたときのResponse情報
"""
response = requests.get(url)
_raise_for_status(response)
p = Path(dest_path)
p.parent.mkdir(parents=True, exist_ok=True)
with open(dest_path, "wb") as f:
f.write(response.content)
return response
#########################################
# Public Method
#########################################
def str_now() -> str:
"""
現在日時をISO8601 拡張形式で取得する。
Returns:
ISO 8601 formatの現在日時
"""
return to_iso8601_extension(datetime.datetime.now())
def to_iso8601_extension(d: datetime.datetime, tz: Optional[datetime.tzinfo] = None) -> str:
"""
datetime.datetimeを、ISO8601 拡張形式のstringに変換する。
``2019-05-08T10:00:00.000+09:00``
Args:
d: datetimeオブジェクト
tz: タイムゾーンオブジェクト。Noneの場合、ローカルのタイムゾーンを設定する。
Returns:
ISO 8601 拡張形式の日時
"""
if tz is None:
tz = dateutil.tz.tzlocal()
d = d.astimezone(tz)
return d.isoformat(timespec="milliseconds")
def get_task_history_index_skipped_acceptance(task_history_list: List[TaskHistory]) -> List[int]:
"""
受入がスキップされたタスク履歴のインデックス番号(0始まり)を返す。
Args:
task_history_list: タスク履歴List
Returns:
受入フェーズがスキップされた履歴のインデックス番号(0始まり)。受入がスキップされていない場合は空リストを返す。
"""
index_list = []
for index, history in enumerate(task_history_list):
if not (
TaskPhase(history["phase"]) == TaskPhase.ACCEPTANCE
and history["account_id"] is None
and history["accumulated_labor_time_milliseconds"] == "PT0S"
and history["started_datetime"] is not None
and history["ended_datetime"] is not None
):
continue
if index + 1 < len(task_history_list):
# 直後の履歴あり
next_history = task_history_list[index + 1]
if TaskPhase(next_history["phase"]) in [TaskPhase.ANNOTATION, TaskPhase.INSPECTION]:
# 教師付フェーズ or 検査フェーズでの提出取消(直後が前段のフェーズ)
pass
else:
# 受入スキップ
index_list.append(index)
else:
# 直後の履歴がない
index_list.append(index)
return index_list
def get_task_history_index_skipped_inspection(task_history_list: List[TaskHistory]) -> List[int]:
"""
検査フェーズがスキップされたタスク履歴のインデックス番号(0始まり)を返す。
Args:
task_history_list: タスク履歴List
Returns:
検査フェーズがスキップされた履歴のインデックス番号(0始まり)。検査がスキップされていない場合は空リストを返す。
"""
index_list = []
for index, history in enumerate(task_history_list):
if not (
TaskPhase(history["phase"]) == TaskPhase.INSPECTION
and history["account_id"] is None
and history["accumulated_labor_time_milliseconds"] == "PT0S"
and history["started_datetime"] is not None
and history["ended_datetime"] is not None
):
continue
if index + 1 < len(task_history_list):
# 直後の履歴あり
next_history = task_history_list[index + 1]
if TaskPhase(next_history["phase"]) in [TaskPhase.ANNOTATION, TaskPhase.INSPECTION]:
# 教師付フェーズ or 検査フェーズでの提出取消(直後が前段のフェーズ)
pass
else:
# 検査スキップ
index_list.append(index)
else:
# 直後の履歴がない
index_list.append(index)
return index_list
def get_number_of_rejections(task_histories: List[TaskHistoryShort], phase: TaskPhase, phase_stage: int = 1) -> int:
"""
タスク履歴から、指定されたタスクフェーズでの差し戻し回数を取得する。
Args:
task_histories: タスク履歴
phase: どのフェーズで差し戻されたか(TaskPhase.INSPECTIONかTaskPhase.ACCEPTANCE)
phase_stage: どのフェーズステージで差し戻されたか。デフォルトは1。
Returns:
差し戻し回数
"""
if phase not in [TaskPhase.INSPECTION, TaskPhase.ACCEPTANCE]:
raise ValueError("引数'phase'には、'TaskPhase.INSPECTION'か'TaskPhase.ACCEPTANCE'を指定してください。")
rejections_by_phase = 0
for i, history in enumerate(task_histories):
if not (history["phase"] == phase.value and history["phase_stage"] == phase_stage and history["worked"]):
continue
if i + 1 < len(task_histories) and task_histories[i + 1]["phase"] == TaskPhase.ANNOTATION.value:
rejections_by_phase += 1
return rejections_by_phase
def can_put_annotation(task: Task, my_account_id: str) -> bool:
"""
対象タスクが、`put_annotation` APIで、アノテーションを更新できる状態かどうか。
過去に担当者が割り当たっている場合、または現在の担当者が自分自身の場合は、アノテーションを更新できる。
Args:
task: 対象タスク
my_account_id: 自分(ログインしているユーザ)のアカウントID
Returns:
Trueならば、タスクの状態を変更せずに`put_annotation` APIを実行できる。
"""
# ログインユーザはプロジェクトオーナであること前提
return len(task["histories_by_phase"]) == 0 or task["account_id"] == my_account_id
#########################################
# Public Method: Decorator
#########################################
def my_backoff(function):
"""
HTTP Status Codeが429 or 5XXのときはリトライする. 最大5分間リトライする。
"""
@wraps(function)
def wrapped(*args, **kwargs):
def fatal_code(e):
"""
リトライするかどうか
status codeが5xxのとき、またはToo many Requests(429)のときはリトライする。429以外の4XXはリトライしない
https://requests.kennethreitz.org/en/master/user/quickstart/#errors-and-exceptions
Args:
e: exception
Returns:
True: give up(リトライしない), False: リトライする
"""
if isinstance(e, requests.exceptions.HTTPError):
if e.response is None:
return True
code = e.response.status_code
return 400 <= code < 500 and code != 429
else:
# リトライする
return False
return backoff.on_exception(
backoff.expo,
(requests.exceptions.RequestException, ConnectionError),
jitter=backoff.full_jitter,
max_time=300,
giveup=fatal_code,
)(function)(*args, **kwargs)
return wrapped
def ignore_http_error(status_code_list: List[int]):
"""
HTTPErrorが発生したとき、特定のstatus codeを無視して処理するデコレータ。
Args:
status_code_list: 無視するhttp status codeのList
"""
def decorator(function):
@wraps(function)
def wrapped(*args, **kwargs):
annofabapi_logger_level = logging.getLogger("annofabapi").level
backoff_logger_level = logging.getLogger("backoff").level
try:
# 不要なログが出力されないようにする
logging.getLogger("annofabapi").setLevel(level=logging.INFO)
logging.getLogger("backoff").setLevel(level=logging.CRITICAL)
return function(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code in status_code_list:
return None
else:
raise e
finally:
# ロガーの設定を元に戻す
logging.getLogger("annofabapi").setLevel(level=annofabapi_logger_level)
logging.getLogger("backoff").setLevel(level=backoff_logger_level)
return wrapped
return decorator
allow_404_error = ignore_http_error(status_code_list=[requests.codes.not_found])
"""
Not Found Error(404)を無視して処理するデコレータ。
リソースの存在確認などに利用する。
"""
|
11506424 | import sys
import numpy as np
import tensorflow as tf
from sklearn.metrics import roc_auc_score
import pandas as pd
from collections import namedtuple
import subprocess
import argparse
import os
import json
import shutil
import glob
from datetime import date, timedelta
from tempfile import TemporaryDirectory
#################### CMD Arguments ####################
FLAGS = tf.app.flags.FLAGS
# model param
tf.app.flags.DEFINE_boolean(
"transform", False, "whether to transform entity embeddings")
tf.app.flags.DEFINE_boolean("use_context", False,
"whether to transform context embeddings")
tf.app.flags.DEFINE_boolean("use_entity", True,
"whether to transform entity embeddings")
tf.app.flags.DEFINE_integer("max_click_history", 8,
"number of sampled click history for each user")
tf.app.flags.DEFINE_integer(
"n_filters", 128, "number of filters for each size in KCNN")
tf.app.flags.DEFINE_list(
'filter_sizes', [1, 2], 'list of filter sizes, e.g., --filter_sizes 2 3')
tf.app.flags.DEFINE_float('l2_weight', 0.001, 'weight of l2 regularization')
tf.app.flags.DEFINE_string('attention_activation', 'relu',
'activation method for attention module')
tf.app.flags.DEFINE_string('KGE', 'TransE',
'knowledge graph embedding method, please ensure that the specified input file exists')
tf.app.flags.DEFINE_integer('entity_dim', 128,
'dimension of entity embeddings, please ensure that the specified input file exists')
tf.app.flags.DEFINE_integer('word_dim', 300,
'dimension of word embeddings, please ensure that the specified input file exists')
tf.app.flags.DEFINE_integer('max_title_length', 16,
'maximum length of news titles, should be in accordance with the input datasets')
tf.app.flags.DEFINE_integer('attention_layer_sizes', 100,
'layer sizes of attention module')
tf.app.flags.DEFINE_list('layer_sizes', [100],
'layer size for final prediction score layer')
tf.app.flags.DEFINE_list('activation', ['sigmoid'],
'activation method for attention module')
# training param
tf.app.flags.DEFINE_integer("perform_shuffle", 0, "perform shuffle data")
tf.app.flags.DEFINE_integer("num_epochs", 10, "Number of epochs")
tf.app.flags.DEFINE_integer("batch_size", 128, "Number of batch size")
tf.app.flags.DEFINE_integer("log_steps", 1000, "save summary every steps")
tf.app.flags.DEFINE_float("learning_rate", 0.001, "learning rate")
tf.app.flags.DEFINE_float("embed_l1", 0.00000, "L1 regularization for embeddings")
tf.app.flags.DEFINE_float("layer_l1", 0.00000, "L1 regularization for nn layers")
tf.app.flags.DEFINE_float("embed_l2", 0.00001, "L2 regularization for embeddings")
# tf.app.flags.DEFINE_float("layer_l2", 0.00003, "L2 regularization for nn layers")
tf.app.flags.DEFINE_float("layer_l2", 0.001, "L2 regularization for nn layers")
tf.app.flags.DEFINE_float("cross_l1", 0.00000, "cross L1 regularization")
tf.app.flags.DEFINE_float("cross_l2", 0.00000, "corss L2 regularization")
tf.app.flags.DEFINE_string("loss_type", 'log_loss',
"loss type {square_loss, log_loss}")
tf.app.flags.DEFINE_string(
"optimizer", 'Adam', "optimizer type {Adam, Adagrad, GD, Momentum}")
tf.app.flags.DEFINE_string("data_dir", '', "data dir")
tf.app.flags.DEFINE_string("dt_dir", '', "data dt partition")
tf.app.flags.DEFINE_string("model_dir", '', "model check point dir")
tf.app.flags.DEFINE_string("servable_model_dir", '',
"export servable model for TensorFlow Serving")
tf.app.flags.DEFINE_string(
"task_type", 'train', "task type {train, infer, eval, export}")
tf.app.flags.DEFINE_boolean("clear_existing_model",
False, "clear existing model or not")
tf.app.flags.DEFINE_string(
"checkpointPath", '', "checkpoint path during training ")
tf.app.flags.DEFINE_float(
"loss_weight", '1.0', "weight for pos sample")
tf.app.flags.DEFINE_list('dropout', [0.0], 'dropout parameters of training stage')
Data = namedtuple('Data', ['size', 'clicked_words',
'clicked_entities', 'news_words', 'news_entities', 'labels'])
class DKN(object):
# def __init__(self, params, feature=None, labels=None, hparams=hparams):
def __init__(self, params, feature=None, labels=None):
# prepare train/test data
# print(params)
self.train_data = None
self.test_data = None
seed = 30
init_value = 0.1
self.initializer = tf.random_uniform_initializer(
-init_value, init_value, seed=seed)
self.n_filters_total = params["n_filters"] * len(params["filter_sizes"])
self.reg_params = [] # for computing regularization loss
self.layer_params = []
self.embed_params = []
self.cross_params = []
self._build_inputs(params, feature, labels)
# # build raw model
# self._build_model(params)
# build ms implement model
# self.hparams=hparams
self.params=params
self._build_ms_model(params)
# self._build_train(params)
def _build_inputs(self, params, feature=None, labels=None):
self.clicked_words = feature["click_words"]
self.clicked_entities = feature["click_entities"]
self.news_words = feature["news_words"]
self.news_entities = feature["news_entities"]
self.labels = labels
print("!!!!!!!!!!verify input shape")
print("!!!!!!!!!!clicked words {}".format(self.clicked_words))
print("!!!!!!!!!!clicked entities {}".format(self.clicked_entities))
print("!!!!!!!!!!news words {}".format(self.news_words))
print("!!!!!!!!!!news entities {}".format(self.news_entities))
def _build_ms_model(self, params):
with tf.name_scope('embedding'):
if FLAGS.data_dir == '':
raw_dir = os.environ.get('SM_CHANNEL_TRAIN')
else:
raw_dir = os.path.join(FLAGS.data_dir, 'train')
# word_embs = np.load(
# os.path.join(raw_dir,'word_embeddings_' + str(params["word_dim"]) + '.npy'))
# entity_embs = np.load(os.path.join(raw_dir,'entity_embeddings_' +
# params["KGE"] + '_' + str(params["entity_dim"]) + '.npy'))
# # word_embs = np.load(os.path.join(raw_dir, 'word_embeddings.npy'))
# # entity_embs = np.load(os.path.join(
# # raw_dir, 'entity_embeddings.npy'))
# self.word_embeddings = tf.Variable(
# word_embs, trainable=False, dtype=np.float32, name='word')
# # self.word_embeddings = word_embs
# self.entity_embeddings = tf.Variable(
# entity_embs, trainable=False, dtype=np.float32, name='entity')
# self.entity_embeddings = entity_embs
# self.reg_params.append(self.word_embeddings)
# self.reg_params.append(self.entity_embeddings)
# print("run here 1!")
# print(params["use_context"])
if params["use_context"]:
print("run here 2.1!")
context_embs = np.load(os.path.join(raw_dir,'context_embeddings_' +
params["KGE"] + '_' + str(params["entity_dim"]) + '.npy'))
self.context_embeddings = tf.Variable(
context_embs, trainable=False, dtype=np.float32, name='context')
# self.reg_params.append(self.context_embeddings)
# print("run here 2.2!")
if params["transform"]:
# print("run here 3.1!")
self.entity_embeddings = tf.layers.dense(
self.entity_embeddings, units=params["entity_dim"], activation=tf.nn.tanh, name='transformed_entity',
kernel_regularizer=tf.contrib.layers.l2_regularizer(params["l2_weight"]))
# print("run here 3.2!")
if params["use_context"]:
print("run here transform context")
self.context_embeddings = tf.layers.dense(
self.context_embeddings, units=params["entity_dim"], activation=tf.nn.tanh,
name='transformed_context', kernel_regularizer=tf.contrib.layers.l2_regularizer(params["l2_weight"]))
# print("build graph")
self.logit = tf.reshape(self._build_graph(), [-1])
# print("build output")
self.output = tf.sigmoid(self.logit)
def _build_graph(self):
params = self.params
# print("params {}".format(params))
self.keep_prob_train = 1 - np.array(params["dropout"])
self.keep_prob_test = np.ones_like(params["dropout"])
with tf.compat.v1.variable_scope("DKN") as scope:
logit = self._build_dkn()
return logit
def _build_dkn(self):
"""The main function to create DKN's logic.
Returns:
obj: Prediction score made by the DKN model.
"""
params = self.params
click_news_embed_batch, candidate_news_embed_batch = self._build_pair_attention(
self.news_words,
self.news_entities,
self.clicked_words,
self.clicked_entities,
self.params,
)
nn_input = tf.concat(
[click_news_embed_batch, candidate_news_embed_batch], axis=1
)
dnn_channel_part = 2
last_layer_size = dnn_channel_part * self.num_filters_total
layer_idx = 0
hidden_nn_layers = []
hidden_nn_layers.append(nn_input)
with tf.compat.v1.variable_scope(
"nn_part", initializer=self.initializer
) as scope:
for idx, layer_size in enumerate(params["layer_sizes"]):
curr_w_nn_layer = tf.compat.v1.get_variable(
name="w_nn_layer" + str(layer_idx),
shape=[last_layer_size, layer_size],
dtype=tf.float32,
)
curr_b_nn_layer = tf.compat.v1.get_variable(
name="b_nn_layer" + str(layer_idx),
shape=[layer_size],
dtype=tf.float32,
)
curr_hidden_nn_layer = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[layer_idx], curr_w_nn_layer, curr_b_nn_layer
)
# if hparams.enable_BN is True:
# curr_hidden_nn_layer = tf.layers.batch_normalization(
# curr_hidden_nn_layer,
# momentum=0.95,
# epsilon=0.0001,
# training=self.is_train_stage,
# )
activation = params["activation"][idx]
# curr_hidden_nn_layer = self._active_layer(
# logit=curr_hidden_nn_layer, activation=activation
# )
curr_hidden_nn_layer = tf.nn.sigmoid(curr_hidden_nn_layer)
hidden_nn_layers.append(curr_hidden_nn_layer)
layer_idx += 1
last_layer_size = layer_size
# self.layer_params.append(curr_w_nn_layer)
# self.layer_params.append(curr_b_nn_layer)
w_nn_output = tf.compat.v1.get_variable(
name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32
)
b_nn_output = tf.compat.v1.get_variable(
name="b_nn_output", shape=[1], dtype=tf.float32
)
# self.layer_params.append(w_nn_output)
# self.layer_params.append(b_nn_output)
nn_output = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[-1], w_nn_output, b_nn_output
)
return nn_output
def _build_pair_attention(
self,
candidate_word_batch,
candidate_entity_batch,
click_word_batch,
click_entity_batch,
params,
):
"""This function learns the candidate news article's embedding and user embedding.
User embedding is generated from click history and also depends on the candidate news article via attention mechanism.
Article embedding is generated via KCNN module.
Args:
candidate_word_batch (obj): tensor word indices for constructing news article
candidate_entity_batch (obj): tensor entity values for constructing news article
click_word_batch (obj): tensor word indices for constructing user clicked history
click_entity_batch (obj): tensor entity indices for constructing user clicked history
params (obj): global hyper-parameters
Returns:
click_field_embed_final_batch: user embedding
news_field_embed_final_batch: candidate news article embedding
"""
doc_size = params["max_title_length"]
attention_hidden_sizes = params["attention_layer_sizes"]
# clicked_words = tf.reshape(click_word_batch, shape=[-1, doc_size])
# clicked_entities = tf.reshape(click_entity_batch, shape=[-1, doc_size])
clicked_words = click_word_batch
clicked_entities = click_entity_batch
with tf.compat.v1.variable_scope(
"attention_net", initializer=self.initializer
) as scope:
# use kims cnn to get conv embedding
with tf.compat.v1.variable_scope(
"kcnn", initializer=self.initializer, reuse=tf.compat.v1.AUTO_REUSE
) as cnn_scope:
news_field_embed = self._kims_cnn(
candidate_word_batch, candidate_entity_batch, params
)
click_field_embed = self._kims_cnn(
clicked_words, clicked_entities, params
)
click_field_embed = tf.reshape(
click_field_embed,
shape=[
-1,
params["max_click_history"],
params["n_filters"] * len(params["filter_sizes"]),
],
)
avg_strategy = False
if avg_strategy:
click_field_embed_final = tf.reduce_mean(
click_field_embed, axis=1, keepdims=True
)
else:
news_field_embed = tf.expand_dims(news_field_embed, 1)
news_field_embed_repeat = tf.add(
tf.zeros_like(click_field_embed), news_field_embed
)
attention_x = tf.concat(
axis=-1, values=[click_field_embed, news_field_embed_repeat]
)
attention_x = tf.reshape(
attention_x, shape=[-1, self.num_filters_total * 2]
)
attention_w = tf.compat.v1.get_variable(
name="attention_hidden_w",
shape=[self.num_filters_total * 2, attention_hidden_sizes],
dtype=tf.float32,
)
attention_b = tf.compat.v1.get_variable(
name="attention_hidden_b",
shape=[attention_hidden_sizes],
dtype=tf.float32,
)
curr_attention_layer = tf.compat.v1.nn.xw_plus_b(
attention_x, attention_w, attention_b
)
activation = params["attention_activation"]
curr_attention_layer = tf.nn.relu(curr_attention_layer)
attention_output_w = tf.compat.v1.get_variable(
name="attention_output_w",
shape=[attention_hidden_sizes, 1],
dtype=tf.float32,
)
attention_output_b = tf.compat.v1.get_variable(
name="attention_output_b", shape=[1], dtype=tf.float32
)
attention_weight = tf.compat.v1.nn.xw_plus_b(
curr_attention_layer, attention_output_w, attention_output_b
)
attention_weight = tf.reshape(
attention_weight, shape=[-1, params["max_click_history"], 1]
)
norm_attention_weight = tf.nn.softmax(attention_weight, axis=1)
click_field_embed_final = tf.reduce_sum(
tf.multiply(click_field_embed, norm_attention_weight),
axis=1,
keepdims=True,
)
# if attention_w not in self.layer_params:
# self.layer_params.append(attention_w)
# if attention_b not in self.layer_params:
# self.layer_params.append(attention_b)
# if attention_output_w not in self.layer_params:
# self.layer_params.append(attention_output_w)
# if attention_output_b not in self.layer_params:
# self.layer_params.append(attention_output_b)
self.news_field_embed_final_batch = tf.squeeze(news_field_embed)
click_field_embed_final_batch = tf.squeeze(click_field_embed_final)
return click_field_embed_final_batch, self.news_field_embed_final_batch
def _kims_cnn(self, word, entity, params):
"""The KCNN module. KCNN is an extension of traditional CNN that incorporates symbolic knowledge from
a knowledge graph into sentence representation learning.
Args:
word (obj): word indices for the sentence.
entity (obj): entity indices for the sentence. Entities are aligned with words in the sentence.
params (obj): global hyper-parameters.
Returns:
obj: Sentence representation.
"""
# kims cnn parameter
filter_sizes = params["filter_sizes"]
num_filters = params["n_filters"]
dim = params["word_dim"]
# embedded_chars = tf.nn.embedding_lookup(self.word_embeddings, word)
embedded_chars = word
if params["use_entity"] and params["use_context"]:
entity_embedded_chars = tf.nn.embedding_lookup(
self.entity_embeddings, entity
)
context_embedded_chars = tf.nn.embedding_lookup(
self.context_embeddings, entity
)
concat = tf.concat(
[embedded_chars, entity_embedded_chars, context_embedded_chars], axis=-1
)
print("concat is {}".format(concat))
elif params["use_entity"]:
# entity_embedded_chars = tf.nn.embedding_lookup(
# self.entity_embeddings, entity
# )
entity_embedded_chars = entity
concat = tf.concat([embedded_chars, entity_embedded_chars], axis=-1)
else:
concat = embedded_chars
concat_expanded = tf.expand_dims(concat, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.compat.v1.variable_scope(
"conv-maxpool-%s" % filter_size, initializer=self.initializer
):
# Convolution Layer
if params["use_entity"] and params["use_context"]:
filter_shape = [filter_size, dim + params["entity_dim"] * 2, 1, num_filters]
elif params["use_entity"]:
filter_shape = [filter_size, dim + params["entity_dim"], 1, num_filters]
else:
filter_shape = [filter_size, dim, 1, num_filters]
W = tf.compat.v1.get_variable(
name="W" + "_filter_size_" + str(filter_size),
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
)
b = tf.compat.v1.get_variable(
name="b" + "_filter_size_" + str(filter_size),
shape=[num_filters],
dtype=tf.float32,
)
# if W not in self.layer_params:
# self.layer_params.append(W)
# if b not in self.layer_params:
# self.layer_params.append(b)
conv = tf.nn.conv2d(
concat_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv",
)
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool2d(
h,
ksize=[1, params["max_title_length"] - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="pool",
)
pooled_outputs.append(pooled)
# Combine all the pooled features
# self.num_filters_total is the kims cnn output dimension
self.num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, axis=-1)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
return h_pool_flat
def _l2_loss(self):
l2_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
for param in self.embed_params:
l2_loss = tf.add(
l2_loss, tf.multiply(self.params["embed_l2"], tf.nn.l2_loss(param))
)
params = self.layer_params
for param in params:
l2_loss = tf.add(
l2_loss, tf.multiply(self.params["layer_l2"], tf.nn.l2_loss(param))
)
return l2_loss
def _l1_loss(self):
l1_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
for param in self.embed_params:
l1_loss = tf.add(
l1_loss, tf.multiply(self.params["embed_l1"], tf.norm(param, ord=1))
)
params = self.layer_params
for param in params:
l1_loss = tf.add(
l1_loss, tf.multiply(self.params["layer_l1"], tf.norm(param, ord=1))
)
return l1_loss
def _cross_l_loss(self):
"""Construct L1-norm and L2-norm on cross network parameters for loss function.
Returns:
obj: Regular loss value on cross network parameters.
"""
cross_l_loss = tf.zeros([1], dtype=tf.float32)
for param in self.cross_params:
cross_l_loss = tf.add(
cross_l_loss, tf.multiply(self.params["cross_l1"], tf.norm(param, ord=1))
)
cross_l_loss = tf.add(
cross_l_loss, tf.multiply(self.params["cross_l2"], tf.norm(param, ord=2))
)
return cross_l_loss
def _build_train(self, params):
with tf.name_scope('train'):
self.base_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logit,
labels=self.labels
)
)
self.l2_loss = tf.Variable(tf.constant(
0., dtype=tf.float32), trainable=False)
# for param in self.reg_params:
# self.l2_loss = tf.add(
# self.l2_loss, params["l2_weight"] * tf.nn.l2_loss(param))
if params["transform"]:
self.l2_loss = tf.add(
self.l2_loss, tf.compat.v1.losses.get_regularization_loss())
# self.loss = self.base_loss + self.l2_loss
# self.loss = self.base_loss
# self.embed_regular_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
self.regular_loss = self._l2_loss() + self._l1_loss() + self._cross_l_loss()
self.loss = tf.add(self.base_loss, self.regular_loss)
# self.loss = tf.add(self.loss, self.embed_regular_loss)
self.optimizer = tf.compat.v1.train.AdamOptimizer(
FLAGS.learning_rate).minimize(self.loss)
def input_fn(filenames='', channel='training', batch_size=32, num_epochs=1, perform_shuffle=False):
# print('Parsing', filenames)
max_title_length = FLAGS.max_title_length
if FLAGS.data_dir == '':
raw_dir = os.environ.get('SM_CHANNEL_TRAIN')
else:
raw_dir = os.path.join(FLAGS.data_dir, 'train')
def decode_txt(line):
# print("test line {}".format(line))
max_click_history = FLAGS.max_click_history
max_title_length = FLAGS.max_title_length
line = tf.expand_dims(line, axis=0)
# print("test more axis line {}".format(line))
columns = tf.string_split(line, '\t')
# print("test collumns {}".format(columns))
user_id = tf.strings.to_number(columns.values[0], out_type=tf.int32)
label = tf.strings.to_number(columns.values[3], out_type=tf.float32)
ids = []
# click history to be added
for i in range(1, 3):
raw1 = tf.string_split([columns.values[i]], '[').values
raw2 = tf.string_split(raw1, ']').values
sparse_modify_tensor = tf.string_split(raw2, ',')
# sparse_modify_tensor = tf.string_split([columns.values[i]], ',')
modify_tensor = tf.reshape(
sparse_modify_tensor.values, [max_title_length])
# ids.append(tf.squeeze(modify_tensor))
ids.append(modify_tensor)
ids[i-1] = tf.strings.to_number(ids[i-1], out_type=tf.int32)
click_ids = []
for i in range(4, 6):
# raw1 = tf.string_split([columns.values[i]], '[').values
# raw2 = tf.string_split(raw1, ']').values
# sparse_modify_tensor = tf.string_split(raw2, '-')
sparse_modify_tensor = tf.string_split([columns.values[i]], '-')
def judge(sparse):
empty = tf.constant('""')
return tf.math.equal(sparse.values[0], empty)
def org(max_click_history, max_title_length):
return tf.zeros([max_click_history, max_title_length], tf.int32)
def update(sparse, max_click_history, max_title_length):
two_d_t = []
update_indices = []
t_list = []
for i in range(max_title_length):
t_list.append('0')
base_t = tf.constant([t_list])
raw1_t = tf.string_split(sparse.values, '[')
raw2_t = tf.string_split(raw1_t.values, ']')
string_t = tf.string_split(raw2_t.values, ',').values
string_t = tf.reshape(string_t, [-1, max_title_length])
for j in range(max_click_history):
string_t = tf.concat([string_t, base_t], 0)
return tf.strings.to_number(tf.slice(string_t, [0, 0], [max_click_history, max_title_length], 'debug_slice_zay'), tf.int32)
click_ids.append(tf.cond(judge(sparse_modify_tensor), lambda: org(max_click_history, max_title_length),
lambda: update(sparse_modify_tensor, max_click_history, max_title_length)))
feat = {"user_id": user_id, "news_words": ids[0], "news_entities": ids[1],
"click_words": click_ids[0], "click_entities": click_ids[1]}
return feat, label
dataset = tf.data.TextLineDataset(filenames)
# dataset = dataset.skip(1)
if perform_shuffle:
dataset = dataset.shuffle(buffer_size=1024*1024)
dataset = dataset.map(
decode_txt, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if num_epochs > 1:
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(
batch_size, drop_remainder=True) # Batch size to use
dataset = dataset.cache()
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
batch_features_index, batch_labels = iterator.get_next()
word_embs = np.load(os.path.join(raw_dir,'word_embeddings_' + str(FLAGS.word_dim) + '.npy'))
entity_embs = np.load(os.path.join(raw_dir,'entity_embeddings_' +
FLAGS.KGE + '_' + str(FLAGS.entity_dim) + '.npy'))
context_embs = np.load(os.path.join(raw_dir,'context_embeddings_' +
FLAGS.KGE + '_' + str(FLAGS.entity_dim) + '.npy'))
word_embeddings = tf.Variable(word_embs, trainable=False, dtype=np.float32, name='word')
entity_embeddings = tf.Variable(entity_embs, trainable=False, dtype=np.float32, name='entity')
context_embeddings = tf.Variable(context_embs, trainable=False, dtype=np.float32, name='context')
batch_features = {}
batch_features['click_words'] = tf.nn.embedding_lookup(word_embeddings, tf.reshape(batch_features_index["click_words"],[-1,max_title_length]))
batch_features['click_entities'] = tf.nn.embedding_lookup(entity_embeddings, tf.reshape(batch_features_index["click_entities"],[-1,max_title_length]))
batch_features['news_words'] = tf.nn.embedding_lookup(word_embeddings, batch_features_index["news_words"])
batch_features['news_entities'] = tf.nn.embedding_lookup(entity_embeddings, batch_features_index["news_entities"])
return batch_features, batch_labels
def model_fn(features, labels, mode, params):
"""Bulid Model function f(x) for Estimator."""
dkn_model = DKN(params, features, labels)
pred = dkn_model.output
predictions = {"prob": pred}
export_outputs = {
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(predictions)}
# Provide an estimator spec for `ModeKeys.PREDICT`
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs=export_outputs)
# ------bulid loss------
dkn_model._build_train(params)
loss = dkn_model.loss
# Provide an estimator spec for `ModeKeys.EVAL`
# eval_logging_hook = tf.estimator.LoggingTensorHook(
# {'eval_labels': labels, 'eval_pred': pred, 'eval_loss':loss}, every_n_iter=1)
# eval_metric_ops = {
# "auc": tf.metrics.auc(labels, pred)
# }
auc_metric = tf.compat.v1.metrics.auc(labels, pred)
eval_metric_ops = {
# "auc": roc_auc_score(y_true=labels, y_score=pred)
"auc": auc_metric
}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
# evaluation_hooks=[eval_logging_hook],
eval_metric_ops=eval_metric_ops)
# optimizer = dkn_model.optimizer
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
loss, global_step=tf.train.get_or_create_global_step())
# Provide an estimator spec for `ModeKeys.TRAIN` modes
# train_logging_hook = tf.estimator.LoggingTensorHook(
# {'train_labels': labels, 'train_pred': pred, 'train_loss':loss}, every_n_iter=1)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
# training_hooks=[train_logging_hook],
train_op=train_op)
def main(_):
print("check input params: ")
print(sys.argv)
if FLAGS.dt_dir == "":
FLAGS.dt_dir = (date.today() + timedelta(-1)).strftime('%Y%m%d')
print('task_type ', FLAGS.task_type)
print('model_dir ', FLAGS.model_dir)
print('checkpoint_dir ', FLAGS.checkpointPath)
print('data_dir ', FLAGS.data_dir)
print('dt_dir ', FLAGS.dt_dir)
print('num_epochs ', FLAGS.num_epochs)
print('batch_size ', FLAGS.batch_size)
print('loss_type ', FLAGS.loss_type)
print('optimizer ', FLAGS.optimizer)
print('learning_rate ', FLAGS.learning_rate)
print('embed_l2 ', FLAGS.embed_l2)
print('layer_l2 ', FLAGS.layer_l2)
print('shuffle ', FLAGS.perform_shuffle)
print('use_context ', FLAGS.use_context)
# check train/test path
if FLAGS.data_dir == '':
train_data_dir = os.environ.get('SM_CHANNEL_TRAIN')
eval_data_dir = os.environ.get('SM_CHANNEL_EVAL')
else:
train_data_dir = os.path.join(FLAGS.data_dir, 'train')
eval_data_dir = os.path.join(FLAGS.data_dir, 'val')
print("train dir is {}".format(train_data_dir))
print("eval dir is {}".format(eval_data_dir))
# tr_files = os.path.join(train_data_dir, 'train.csv')
tr_files = glob.glob("%s/*.csv" % train_data_dir)
print("tr_files:", tr_files)
# va_files = os.path.join(eval_data_dir, 'validation.csv')
va_files = glob.glob("%s/*.csv" % eval_data_dir)
print("va_files:", va_files)
te_files = None
print("te_files:", te_files)
if FLAGS.clear_existing_model:
try:
shutil.rmtree(FLAGS.model_dir)
except Exception as e:
print(e, "at clear_existing_model")
else:
print("existing model cleaned at %s" % FLAGS.model_dir)
model_params = {
"transform": False,
"use_entity": FLAGS.use_entity,
"use_context": False,
"max_click_history": FLAGS.max_click_history,
"n_filters": FLAGS.n_filters,
"filter_sizes": FLAGS.filter_sizes,
# "SEED": FLAGS.SEED,
"KGE": FLAGS.KGE,
"entity_dim": FLAGS.entity_dim,
"word_dim": FLAGS.word_dim,
"max_title_length": FLAGS.max_title_length,
"l2_weight": FLAGS.l2_weight,
"layer_sizes": FLAGS.layer_sizes,
"loss_weight": FLAGS.loss_weight,
"dropout": FLAGS.dropout,
"activation": FLAGS.activation,
"attention_layer_sizes": FLAGS.attention_layer_sizes,
"attention_activation": FLAGS.attention_activation,
"embed_l1": FLAGS.embed_l1,
"layer_l1": FLAGS.layer_l1,
"embed_l2": FLAGS.embed_l2,
"layer_l2": FLAGS.layer_l2
}
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
print("sagemaker mode building ...")
dkn_estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.checkpointPath,
params=model_params, config=tf.estimator.RunConfig().replace(session_config=config))
if FLAGS.task_type == 'train':
"""
train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_fn(tr_files, channel='training', num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size), hooks=[bcast_hook])
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_fn(va_files, channel='evaluation', num_epochs=1, batch_size=FLAGS.batch_size), steps=None, start_delay_secs=1000, throttle_secs=1200)
tf.estimator.train_and_evaluate(NCF, train_spec, eval_spec)
"""
i = 1
for _ in range(FLAGS.num_epochs):
train_result = dkn_estimator.train(input_fn=lambda: input_fn(
tr_files, num_epochs=1, batch_size=FLAGS.batch_size, perform_shuffle=FLAGS.perform_shuffle))
print("finish train, start eval")
eval_result = dkn_estimator.evaluate(input_fn=lambda: input_fn(
va_files, num_epochs=1, batch_size=FLAGS.batch_size))
print("sagemaker mode epoch %d test_auc is %.4f" %
(i, eval_result['auc']))
i = i + 1
elif FLAGS.task_type == 'eval':
dkn_estimator.evaluate(input_fn=lambda: input_fn(
va_files, num_epochs=1, batch_size=FLAGS.batch_size))
elif FLAGS.task_type == 'infer':
preds = dkn_estimator.predict(input_fn=lambda: input_fn(
te_files, num_epochs=1, batch_size=FLAGS.batch_size), predict_keys="prob")
if FLAGS.task_type == 'export' or FLAGS.task_type == 'train':
feature_spec = {
'click_words': tf.placeholder(
dtype=tf.float32, shape=[None, model_params["max_title_length"], model_params["word_dim"]], name='click_words'),
'click_entities': tf.placeholder(
dtype=tf.float32, shape=[None, model_params["max_title_length"], model_params["entity_dim"]], name='click_entities'),
'news_words': tf.placeholder(
dtype=tf.float32, shape=[None, model_params["max_title_length"], model_params["word_dim"]], name='news_words'),
'news_entities': tf.placeholder(
dtype=tf.float32, shape=[None, model_params["max_title_length"], model_params["entity_dim"]], name='news_entities')
}
serving_input_receiver_fn_no_embed = tf.estimator.export.build_raw_serving_input_receiver_fn(
feature_spec)
dkn_estimator.export_savedmodel(FLAGS.servable_model_dir,
serving_input_receiver_fn_no_embed)
print("finish saving model!")
if __name__ == "__main__":
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
tf.compat.v1.app.run() |
11506435 | from google.protobuf.json_format import ParseDict
from ..query import QueryVisitor
from ..protobuf import query_pb2
class Exporter(QueryVisitor):
def __init__(self, node):
self.stack = [node]
def visit_field(self, obj):
field = self.stack[-1].items.add().field
field.name = obj.name
if obj.options is not None:
ParseDict(obj.options, field.options)
def visit_link(self, obj):
link = self.stack[-1].items.add().link
link.name = obj.name
if obj.options is not None:
ParseDict(obj.options, link.options)
self.stack.append(link.node)
try:
self.visit(obj.node)
finally:
self.stack.pop()
def visit_node(self, obj):
for item in obj.fields:
self.visit(item)
def export(query):
node = query_pb2.Node()
Exporter(node).visit(query)
return node
|
11506572 | import os
import json
import random
import discord
import aiohttp
import asyncio
import zipfile
import aiofiles
import operator
import datetime
import pyttanko
import numpy as np
from PIL import Image
import scipy
from scipy import cluster
from bs4 import BeautifulSoup
import matplotlib as mpl
mpl.use('Agg') # for non gui
from matplotlib import ticker
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageFilter
from cogs.osu.osu_utils import map_utils, web_utils, utils, owoAPI
from cogs.osu.osu_utils.chunks import chunks
async def plot_profile(user, color = 'blue'):
rank_data = user['rank_history']["data"]
replays_watched_counts = user["replays_watched_counts"]
monthly_playcounts = user["monthly_playcounts"]
if color == (0.0, 0.0, 0.0):
color = (.8, .8, .8)
other_color = (1, 0.647, 0)
# print(luminance(other_color), luminance(color), luminance(other_color) - luminance(color))
if abs(luminance(other_color) - luminance(color)) < .1:
other_color = (1, 0.4, 0.667)
base = datetime.datetime.today()
date_list = [base - datetime.timedelta(days=x) for x in range(0, 89)]
date_list = date_list[::-1]
fig = plt.figure(figsize=(8, 4))
plt.rcParams['text.antialiased'] = True
ax = fig.add_subplot(211)
plt.style.use('ggplot')
ax.plot(date_list, rank_data[:-1], color=color, linewidth=3.0, antialiased=True, label='Rank (90 days)')
ax.tick_params(axis='y', colors=color, labelcolor = color)
ax.yaxis.label.set_color(color)
ax.grid(color='w', linestyle='-', axis='y', linewidth=1)
ax.legend(loc='best')
rank_range = max(rank_data) - min(rank_data)
plt.ylim(max(rank_data) + int(.15*rank_range), min(rank_data) - int(.15*rank_range))
# plt.xticks([date_list[0], date_list[int(len(date_list-1)/2)], date_list[len(date_list)-1]])
plt.xticks([])
#plt.xaxis.label.set_color('white')
#plt.yaxis.label.set_color('white')
ax1 = fig.add_subplot(212)
dates = []
watched = []
playcounts = []
for point in replays_watched_counts:
dates.append(point['start_date'])
watched.append(point['count'])
dates_list_replay = [datetime.datetime.strptime(date, '%Y-%m-%d').date() for date in dates]
dates = []
for point in monthly_playcounts:
dates.append(point['start_date'])
playcounts.append(point['count'])
dates_list_playcount = [datetime.datetime.strptime(date, '%Y-%m-%d').date() for date in dates]
xlabels = [dt.strftime('%m/%y') for dt in dates_list_playcount]
#ax1.xaxis.set_major_locator(mdates.MonthLocator())
#ax1.xaxis.set_minor_locator(mdates.DayLocator(bymonthday=(1,30)))
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m/%y'))
lns1 = ax1.plot(dates_list_replay, watched, '-', color=color, linewidth=3.0, label='Replays Watched')
# Make the y-axis label, ticks and tick labels match the line color.
ax1.tick_params('y', colors=color)
ax2 = ax1.twinx()
lns2 = ax2.plot(dates_list_playcount, playcounts, '-', color=other_color, linewidth=3.0, label='Play Hist.')
ax2.tick_params('y', colors=other_color)
ax2.tick_params('x', colors=(255, 255, 255))
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc='best')
ax1.grid(False)
fig.tight_layout()
img_id = random.randint(0, 50)
foreground_filepath = "cogs/osu/temp/graph_{}.png".format(img_id)
fig.savefig(foreground_filepath, transparent=True)
plt.close()
# download background image, use another as default
if 'cover' in user and 'url' in user['cover']:
bg_url = user['cover']['url']
else:
bg_url = 'https://i.imgur.com/dm47q3B.jpg'
filepath = os.path.join(
'cogs','osu','temp','profile_bg_{}.png'.format(img_id))
await web_utils.download_file(user['cover']['url'], filepath)
background = Image.open(filepath).convert('RGBA')
# get images
foreground = Image.open(foreground_filepath).convert('RGBA')
dropshadow = foreground.copy()
# create dropshadow for the graph
datas = foreground.getdata()
new_data = list()
for item in datas:
if item[3] != 0:
new_data.append((0,0,0,255))
else:
new_data.append(item)
dropshadow.putdata(new_data)
dropshadow = dropshadow.filter(ImageFilter.GaussianBlur(10))
dropshadow = Image.alpha_composite(dropshadow, foreground)
foreground = dropshadow
# foreground = foreground.crop((23, 8, foreground.size[0], foreground.size[1]))
target_size = (800, 400)
min_side = min(background.width, background.height)
scale_factor = target_size[0]/min_side
background = background.resize(
(round(background.width * scale_factor),
round(background.height * scale_factor)), Image.ANTIALIAS)
# determine crop area
center = (round(background.width/2), round(background.height/2))
upper_left = (round(center[0] - target_size[0]/2),
round(center[1] - target_size[1]/2))
bottom_right = (round(center[0] + target_size[0]/2),
round(center[1] + target_size[1]/2))
background = background.crop((
upper_left[0], upper_left[1],
bottom_right[0], bottom_right[1]))
background = background.filter(ImageFilter.GaussianBlur(10))
background = ImageEnhance.Brightness(background).enhance(0.50)
# background = ImageEnhance.Sharpness(background).enhance(0.75)
# background = Image.alpha_composite(foreground, background.convert('RGBA'))
background.paste(foreground, (0, 0), foreground)
background.save(filepath, transparent=True)
discord_file = discord.File(filepath, filename="profile_{}.png".format(img_id))
url = 'attachment://' + "profile_{}.png".format(img_id)
return discord_file, url
async def draw_score(ctx, userinfo, userrecent, beatmap_info, gamemode,
beatmap_image_file=None, bmp_chunks=None, api_name='Bancho'):
img_id = random.randint(0, 50)
try:
channel = ctx.message.channel
user = ctx.message.author
server = ctx.message.guild
except:
channel = ctx.channel
user = ctx.author
server = ctx.guild
font_folder_root = os.path.join(os.path.abspath(os.getcwd()),
'cogs/osu/resources/fonts/')
osu_folder_root = os.path.join(os.path.abspath(os.getcwd()),
'cogs/osu/resources/')
# get information for display
full_title = beatmap_info['artist'] + ' - ' + beatmap_info['title']
version = beatmap_info['version']
play_date = userrecent['date']
score = "{:,}".format(int(userrecent['score']))
acc_num = utils.calculate_acc(userrecent, gamemode)
acc = str(round(acc_num, 2))
fc_acc_num = utils.no_choke_acc(userrecent, gamemode)
fc_acc = str(round(fc_acc_num, 2))
totalhits = (int(userrecent['count_50']) + int(userrecent['count_100']) +
int(userrecent['count_300']) + int(userrecent['count_miss']))
combo = int(userrecent['max_combo'])
try:
max_combo = int(beatmap_info['max_combo'])
except:
max_combo = None
stars_float = float(beatmap_info['stars_mod'])
if stars_float > 10:
map_stars = str(round(stars_float, 1))
else:
map_stars = str(round(stars_float, 2))
map_creator = beatmap_info['creator']
mods = utils.fix_mod_list(utils.num_to_mod(userrecent['enabled_mods']))
m1, s1, bpm_mod = utils.calc_time(beatmap_info['total_length'], beatmap_info['bpm'], 1)
if 'DT' in mods or 'NC' in mods:
m1, s1, bpm_mod = utils.calc_time(beatmap_info['total_length'], beatmap_info['bpm'], 1.5)
elif 'HT' in mods:
m1, s1, bpm_mod = utils.calc_time(beatmap_info['total_length'], beatmap_info['bpm'], (2/3))
map_time = '{}:{}'.format(m1, str(s1).zfill(2))
bpm = '{}'.format(round(bpm_mod))
ar = str(round(beatmap_info['ar_mod'], 1))
od = str(round(beatmap_info['od_mod'], 1))
cs = str(round(beatmap_info['cs_mod'], 1))
hp = str(round(beatmap_info['hp_mod'], 1))
# try_num = int(userrecent['attempt'])
rank = str(userrecent['rank']).upper()
data = userrecent['date']
if 'pp' in userrecent and userrecent['pp'] is not None and \
int(userrecent['pp']) != 0:
performance = round(userrecent['pp'])
else:
performance = round(beatmap_info['extra_info']['play_pp'])
performance_max = round(beatmap_info['pp_mod'][2]) # for 100%
if gamemode == 0:
score_hits = ['count_300', 'count_geki', 'count_100', 'count_katu', 'count_50', 'count_miss']
elif gamemode == 1:
score_hits = ['count_300', 'count_geki', 'count_100', 'count_katu', 'count_miss']
elif gamemode == 2:
score_hits = ['count_300', 'count_miss', 'count_100', None, 'count_50', None]
# score_hits = ['count_300', 'count_geki', 'count_100', 'count_miss']
elif gamemode == 3:
score_hits = ['count_300', 'count_geki', 'count_100', 'count_katu', 'count_50', 'count_miss']
num_score = []
for hit_type in score_hits:
if not hit_type:
num_score.append(None)
else:
num_score.append(userrecent[hit_type])
score_hits = num_score
# print('SCORE HITS', score_hits)
diff_name = _determine_emote_name(beatmap_info)
username = userinfo['username']
# draw image
filename = 'cogs/osu/temp/score_{}.png'.format(img_id)
# set canvas
width = 1500
height = 500
width_center = width/2
height_center = height/2
default_color = (45, 45, 45, 230)
canvas = Image.new('RGBA', (width, height), default_color)
# get background image
# background_filepath = 'test_images/background_' + str(bg_num) + '.jpg'
# background_image = Image.open(background_filepath).convert('RGBA')
background_image = Image.open(beatmap_image_file).convert('RGBA')
# await get_full_map_image(beatmap_info['beatmapset_id'])
resize_ratio = max(width/background_image.width,
height/background_image.height)
background_image = background_image.resize(
(round(resize_ratio*background_image.width),
round(resize_ratio*background_image.height)))
left_bound = round(background_image.width - width)/2
right_bound = background_image.width - left_bound
background_image = background_image.crop(box=(left_bound,0,right_bound,height))
background_image = background_image.resize((width, height), Image.ANTIALIAS)
background_image = background_image.filter(ImageFilter.GaussianBlur(10))
canvas.paste(background_image)
# get rank image
rank_left = 865
rank_top = 120
rank_width = 250
rank_height = 250
rank_filepath = os.path.join(osu_folder_root, f'ranks/{rank}.png')
rank_image = Image.open(rank_filepath).convert('RGBA')
resize_ratio = min(rank_width/rank_image.width,
rank_height/rank_image.height)
rank_image = rank_image.resize((round(resize_ratio*rank_image.width),
round(resize_ratio*rank_image.height)), Image.ANTIALIAS)
rank_canvas = Image.new('RGBA', (width, height))
rank_canvas.paste(rank_image, (rank_left, rank_top))
# generate graph
color = (0, 200, 0, 255)
percentage = 75
graph = Image.new('RGBA', (240, 75))
# set drawing canvas
process = Image.new('RGBA', (width, height), default_color)
draw = ImageDraw.Draw(process)
text_canvas = Image.new('RGBA', (width, height))
## draw boxes
# sidebar dims
sidebar_width = 25
vert_padding = 18
horiz_padding = 15
box_color = (40, 40, 40, 230)
# title box
main_left = sidebar_width + 1
main_right = 1145
title_box_top = vert_padding
title_box_left = main_left
title_box_bottom = 120
title_box_right = main_right - horiz_padding
draw.rectangle([(title_box_left,title_box_top),
(title_box_right, title_box_bottom)], fill=box_color)
# info box
info_box_top = title_box_bottom + vert_padding - 3
info_box_bottom = height - vert_padding
info_box_left = main_left
info_box_right = 830
draw.rectangle([(info_box_left, info_box_top),
(info_box_right, info_box_bottom)], fill=box_color)
# pp box
pp_box_top = 370
pp_box_left = info_box_right + horiz_padding
pp_box_bottom = height - vert_padding
pp_box_right = main_right - horiz_padding
# draw.rectangle([(pp_box_left, pp_box_top),
# (pp_box_right, pp_box_bottom)], fill=box_color)
# map box
map_box_top = 0
map_box_left = main_right
map_box_bottom = height
map_box_right = width
draw.rectangle([(map_box_left, map_box_top),
(map_box_right, map_box_bottom)], fill=box_color)
## write lables
label_left = 40
text_padding = label_left - sidebar_width
label_mid_horiz = 390
label_right = 620
label_top = 150
label_mid_vert = 260
label_bottom = 370
label_color = (200, 200, 200, 200)
label_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 18)
draw.text((label_left, label_top), 'SCORE', font=label_font, fill=label_color)
draw.text((label_mid_horiz, label_top), 'ACCURACY', font=label_font, fill=label_color)
draw.text((label_right, label_top), 'MODS', font=label_font, fill=label_color)
draw.text((label_left, label_mid_vert), 'COMBO', font=label_font, fill=label_color)
try_label_offset = 200
# draw.text((label_left+try_label_offset, label_mid_vert), 'TRY',
# font=label_font, fill=label_color)
draw.text((label_left, label_bottom), 'GRAPH', font=label_font, fill=label_color)
draw.text((pp_box_left+text_padding, label_bottom + 10), 'PERFORMANCE',
font=label_font, fill=label_color)
map_label_top = 215
map_label_left = map_box_left + text_padding
small_label_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 16)
small_label_left = map_label_left
small_label_right = map_box_left + round((width - map_box_left)/2)
small_label_top = 315
small_label_bottom = label_bottom + 10
draw.text((small_label_left, map_label_top), 'DIFFICULTY',
font=label_font, fill=label_color)
# draw.text((small_label_right, map_label_top), 'BPM',
# font=label_font, fill=label_color)
draw.text((small_label_left, small_label_top), 'AR',
font=small_label_font, fill=label_color)
draw.text((small_label_right, small_label_top), 'OD',
font=small_label_font, fill=label_color)
draw.text((small_label_left, small_label_bottom), 'HP',
font=small_label_font, fill=label_color)
draw.text((small_label_right, small_label_bottom), 'CS',
font=small_label_font, fill=label_color)
# get 300, 100, 50, x tag
tag_canvas = Image.new('RGBA', (width, height))
if gamemode == 0:
score_images = ['score_300', 'score_g', 'score_100', 'score_k_g', 'score_50', 'score_x']
elif gamemode == 1:
score_images = ['score_300', 'score_g', 'score_100', 'score_k_g', 'score_x']
elif gamemode == 2:
score_images = ['score_ctb_fruit', 'score_x', 'score_ctb_big', None, 'score_ctb_small', None]
elif gamemode == 3:
score_images = ['score_300r', 'score_300', 'score_200', 'score_100', 'score_50', 'score_x']
tag_width = 80
tag_height = 80
tag_left = label_mid_horiz - 5
tag_right = label_right - 15
tag_top = label_mid_vert - text_padding # - 5
tag_bottom = 370
tag_mid = round((tag_top + tag_bottom)/2)
for i, file in enumerate(score_images):
if not file:
continue
if i % 2 == 0: # first column
h_coord = tag_left
else:
h_coord = tag_right - 5
if i/2 < 1:
v_coord = tag_top
elif i/2 < 2:
if gamemode == 2:
v_coord = tag_mid + 5
else:
v_coord = tag_mid
else:
v_coord = tag_bottom
tag_filename = os.path.join(osu_folder_root, 'hits/' + file + '.png')
tag_image = Image.open(tag_filename).convert('RGBA')
resize_ratio = min(tag_width/tag_image.width,
tag_height/tag_image.height)
tag_image = tag_image.resize((round(resize_ratio*tag_image.width),
round(resize_ratio*tag_image.height)), Image.ANTIALIAS)
temp_canvas_w_tag_image = Image.new("RGBA", tag_canvas.size)
temp_canvas_w_tag_image.paste(tag_image, (h_coord, v_coord))
# tag_canvas.paste(tag_image, (h_coord, v_coord)) # good
tag_canvas = Image.alpha_composite(tag_canvas, temp_canvas_w_tag_image)
# get diff image
diff_left = main_left + text_padding - 1
diff_top = 75
diff_dim = 40
letter_modes = ['s', 't', 'c', 'm']
diff_filepath = os.path.join(osu_folder_root, 'mode_symbols/' + diff_name + '-'+ letter_modes[gamemode] + '.png')
diff_image = Image.open(diff_filepath).convert('RGBA')
diff_image = diff_image.resize((diff_dim, diff_dim), Image.ANTIALIAS)
diff_canvas = Image.new('RGBA', (width, height))
diff_canvas.paste(diff_image, (diff_left, diff_top))
# paste thumbnail image
max_size = [325, 183]
thumbnail_left = map_label_left
thumbnail_top = title_box_top
thumbnail_width = width - text_padding - thumbnail_left
# get thumbnail/necessary for colors
thumbnail_image = Image.open(beatmap_image_file).convert('RGBA') # await get_full_map_image(beatmap_info['beatmapset_id'])
resize_ratio = thumbnail_width/thumbnail_image.width
thumbnail_image = thumbnail_image.resize(
(round(resize_ratio*thumbnail_image.width),
round(resize_ratio*thumbnail_image.height)), Image.ANTIALIAS)
thumbnail_image_2 = thumbnail_image.copy()
thumbnail_image = thumbnail_image.resize(max_size)
thumbnail_image = thumbnail_image.filter(ImageFilter.GaussianBlur(5))
thumbnail_image_2.thumbnail(max_size, Image.ANTIALIAS)
thumbnail_left_2 = thumbnail_left + round((max_size[0] - thumbnail_image_2.width)/2)
thumbnail_canvas = Image.new('RGBA', (width, height))
thumbnail_canvas.paste(thumbnail_image, (thumbnail_left, thumbnail_top))
thumbnail_canvas.paste(thumbnail_image_2, (thumbnail_left_2, thumbnail_top))
# colors
color_scheme = await auto_color(thumbnail_image)
# draw sidebar
sidebar_color = color_scheme[4] # 5 colors in total
draw.rectangle([(0,0),(sidebar_width, height)], fill=sidebar_color)
## write actual text
# title
if len(full_title) >= 58:
title_main_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 40)
title_text_top = title_box_top + 5
full_title = full_title[0:55] + '...'
else:
title_main_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 45)
title_text_top = title_box_top
title_text_left = main_left + text_padding
text_canvas, _ = draw_text_w_shadow(text_canvas,
(title_text_left, title_text_top), full_title, title_main_font)
# difficulty title
diff_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 40)
diff_text_left = diff_left + diff_dim + 5
diff_text_top = diff_top - 5
text_canvas, version_text_size = draw_text_w_shadow(text_canvas,
(diff_text_left, diff_text_top), version, diff_font,
font_color = color_scheme[1])
text_canvas, played_text_size = draw_text_w_shadow(text_canvas,
(diff_text_left + version_text_size[0], diff_text_top), ' played by ', diff_font,
font_color = (100, 100, 100, 200), shadow_color = (100, 100, 100, 50))
text_canvas, version_text_size = draw_text_w_shadow(text_canvas,
(diff_text_left + version_text_size[0] + played_text_size[0], diff_text_top), username, diff_font,
font_color = color_scheme[1])
# put on profile picture
pfp_canvas = Image.new('RGBA', (width, height))
pfp_dim = 20
pfp_left = 0
pfp_top = 0
# get pfp
pfp_image = 0
# pfp_canvas.paste(pfp_image, (pfp_left, pfp_top))
text_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 60)
# score text
text_horiz_shift = -3
score_text_left = label_left + text_horiz_shift
score_text_top = label_top + 23
text_canvas, _ = draw_text_w_shadow(text_canvas,
(score_text_left, score_text_top), score, text_font)
# accuracy text
acc_text_left = label_mid_horiz + text_horiz_shift
acc_text_top = score_text_top
text_canvas, acc_size = draw_text_w_shadow(text_canvas,
(acc_text_left, acc_text_top), acc, text_font)
small_acc_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 30)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(acc_text_left + acc_size[0] + 3, acc_text_top + 27), '%', small_acc_font)
# combo
combo_text_left = main_left + text_padding
combo_text_top = label_mid_vert + 25
text_canvas, combo_text_size = draw_text_w_shadow(text_canvas,
(combo_text_left, combo_text_top), combo, text_font)
# put in mods
if len(mods) > 0:
all_mod_canvas = Image.new('RGBA', (width, height))
mod_size = 75 # pixels
mods_left = label_right - 8
mods_top = label_top + 23
mods_right = mods_left + mod_size * (len(mods) + 2)
if len(mods) < 3:
add_comp = 3
elif len(mods) == 3:
add_comp = 3
else:
add_comp = 2
mod_shift = round((mods_right - mods_left)/(len(mods)+add_comp)) # pixels
for i, mod in enumerate(mods):
mod_canvas = Image.new('RGBA', (width, height))
current_shift = i * mod_shift
mod_filename = os.path.join(osu_folder_root, 'mods/mods_' + mod + '.png')
mod_image = Image.open(mod_filename).convert('RGBA')
mod_image = mod_image.resize((mod_size, mod_size), Image.ANTIALIAS)
mod_canvas.paste(mod_image, (mods_left + current_shift, mods_top))
all_mod_canvas = Image.alpha_composite(all_mod_canvas, mod_canvas)
else:
text_canvas, _ = draw_text_w_shadow(text_canvas,
(label_right, score_text_top), '-', text_font)
# hits text
hits_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 50)
for i, file in enumerate(score_images):
if not file:
continue
if i % 2 == 0: # first column
h_coord = tag_left + tag_width + 10
else:
h_coord = tag_right + tag_width
if i/2 < 1:
v_coord = tag_top + 13
elif i/2 < 2:
v_coord = tag_mid + 14
else:
v_coord = tag_bottom + 12
text_canvas, _ = draw_text_w_shadow(text_canvas,
(h_coord, v_coord), score_hits[i], hits_font)
# pp
pp_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 70)
pp_text_left = pp_box_left + text_padding
pp_text_top = label_bottom + 30
text_canvas, pp_text_size = draw_text_w_shadow(text_canvas,
(pp_text_left, pp_text_top), performance, pp_font,
font_color = (255,105,180,255))
# map infor text
map_info_vert_offset = -10
map_info_horiz_offset = 30
# print(os.path.join(font_folder_root, 'Asimov.ttf'))
large_map_info_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 55)
unicode_font = ImageFont.truetype(os.path.join(font_folder_root, 'unicode.ttf'), 50)
text_canvas, stars_size = draw_text_w_shadow(text_canvas,
(map_label_left, map_label_top + map_info_vert_offset + 30), map_stars,
large_map_info_font, font_color = color_scheme[1])
text_canvas, _ = draw_text_w_shadow(text_canvas,
(map_label_left + stars_size[0], map_label_top + map_info_vert_offset + 38), '★',
unicode_font, font_color = color_scheme[1])
text_canvas, bpm_size = draw_text_w_shadow(text_canvas,
(small_label_right, map_label_top + map_info_vert_offset + 30), bpm,
large_map_info_font)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_right + bpm_size[0], map_label_top + map_info_vert_offset + 54), ' BPM',
small_acc_font)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_left + map_info_horiz_offset,
small_label_top + map_info_vert_offset), ar,
large_map_info_font)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_right + map_info_horiz_offset,
small_label_top + map_info_vert_offset), od,
large_map_info_font)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_left + map_info_horiz_offset,
small_label_bottom + map_info_vert_offset), hp,
large_map_info_font)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_right + map_info_horiz_offset,
small_label_bottom + map_info_vert_offset), cs,
large_map_info_font)
## write small text
small_padding = 2
small_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 30)
# max combo
max_combo_text_left = combo_text_left + combo_text_size[0] + small_padding
max_combo_text_top = combo_text_top + 26
if max_combo:
text_canvas, _ = draw_text_w_shadow(text_canvas,
(max_combo_text_left, max_combo_text_top), '/'+str(max_combo), small_font)
# max pp possible
max_pp_text_left = pp_text_left + pp_text_size[0] + small_padding
max_pp_text_top = pp_text_top + 36
max_pp_text = ''
if gamemode == 0:
max_pp_text = '/'+str(performance_max)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(max_pp_text_left, max_pp_text_top), max_pp_text+' PP', small_font)
# write map time
time_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 20)
text_canvas, _ = draw_text_w_shadow(text_canvas,
(320, 445), map_time, time_font, shadow_color=color_scheme[1])
# write play time + server
play_time_vert_shift = 74
play_time_font = ImageFont.truetype(os.path.join(font_folder_root, 'Asimov.ttf'), 20)
text_canvas, play_time_text_size = draw_text_w_shadow(text_canvas,
(label_mid_horiz, label_bottom + play_time_vert_shift), '@ ', time_font,
font_color = (100, 100, 100, 200), shadow_color = (100, 100, 100, 50))
text_canvas, _ = draw_text_w_shadow(text_canvas,
(label_mid_horiz + play_time_text_size[0], label_bottom + play_time_vert_shift),
play_date + ' UTC', time_font,
font_color = color_scheme[1])
"""
time_text_shift = 100
server_horizontal_shift = label_mid_horiz + play_time_text_size[0] + time_text_shift
text_canvas, play_time_text_size = draw_text_w_shadow(text_canvas,
(label_mid_horiz + play_time_text_size[0] + time_text_shift,
label_bottom + play_time_vert_shift), 'on ', time_font,
font_color = (100, 100, 100, 200), shadow_color = (100, 100, 100, 50))
text_canvas, _ = draw_text_w_shadow(text_canvas,
(label_mid_horiz + play_time_text_size[0], label_bottom + play_time_vert_shift),
play_date + ' UTC', time_font,
font_color = color_scheme[1])"""
# write mapper name
mapper_name_vert_shift = 65
text_canvas, by_text_size = draw_text_w_shadow(text_canvas,
(small_label_left, small_label_bottom + mapper_name_vert_shift), 'By ', time_font,
font_color = (100, 100, 100, 200), shadow_color = (100, 100, 100, 50))
text_canvas, _ = draw_text_w_shadow(text_canvas,
(small_label_left + by_text_size[0], small_label_bottom + mapper_name_vert_shift),
map_creator, time_font,
font_color = color_scheme[1])
# get player graph
graph_left = label_left - 13
graph_top = 390
graph_image = await get_draw_score_graph_image(bmp_chunks,
beatmap_info, userrecent['enabled_mods'], color=color_scheme[1])
graph_canvas = Image.new('RGBA', (width, height))
graph_canvas.paste(graph_image, (graph_left, graph_top))
# paste
canvas = Image.alpha_composite(canvas, process)
canvas = Image.alpha_composite(canvas, rank_canvas)
canvas = Image.alpha_composite(canvas, thumbnail_canvas)
canvas = Image.alpha_composite(canvas, tag_canvas)
canvas = Image.alpha_composite(canvas, diff_canvas)
canvas = Image.alpha_composite(canvas, graph_canvas)
if len(mods) > 0:
canvas = Image.alpha_composite(canvas, all_mod_canvas)
canvas = Image.alpha_composite(canvas, text_canvas)
canvas.save(filename,'PNG', quality=100)
file = discord.File(filename)
await ctx.send(file=file)
def draw_text_w_shadow(image, position, text, font,
font_color=(255,255,255,255), radius=5, shadow_color=(0,0,0,255)):
temp_layer = Image.new('RGBA', (image.width, image.height))
text_draw = ImageDraw.Draw(temp_layer)
# draw text in all black
text_draw.text((position[0], position[1]), str(text),
font=font, fill=shadow_color)
# put Gaussian filter over black text
temp_layer = temp_layer.filter(ImageFilter.GaussianBlur(radius=radius))
text_draw = ImageDraw.Draw(temp_layer)
text_draw.text((position[0], position[1]), str(text),
font=font, fill=font_color)
size = text_draw.textsize(str(text), font=font)
# paste onto image
image = Image.alpha_composite(image, temp_layer)
return image, size
# uses k-means algorithm to find color from bg, rank is abundance of color, descending
async def auto_color(im):
default_colors = [
(100, 100, 100),
(255, 102, 170),
(255, 165, 0),
(100, 100, 100),
(255, 102, 170)
]
try:
im = im.resize((10,10), Image.ANTIALIAS)
clusters = 5
ranks = range(clusters)
ar = np.asarray(im)
shape = ar.shape
ar = ar.reshape(scipy.product(shape[:2]), shape[2])
codes, dist = cluster.vq.kmeans(ar.astype(float), clusters)
vecs, dist = cluster.vq.vq(ar, codes) # assign codes
counts, bins = scipy.histogram(vecs, len(codes)) # count occurrences
# sort counts
freq_index = []
index = 0
for count in counts:
freq_index.append((index, count))
index += 1
sorted_list = sorted(freq_index, key=operator.itemgetter(1), reverse=True)
colors = []
luminances = []
for rank in ranks:
color_index = min(rank, len(codes))
peak = codes[sorted_list[color_index][0]] # gets the original index
peak = peak.astype(int)
colors.append(tuple(peak))
luminances.append(luminance(tuple(peak)))
# sort by luminance, highest luminance first
colors = [x for _, x in sorted(zip(luminances, colors), reverse=True)]
return colors # returns array
except:
return default_colors
def luminance(color):
# convert to greyscale
luminance = float((0.2126*color[0]) + (0.7152*color[1]) + (0.0722*color[2]))
return luminance
def hex_to_rgb(self, hex):
hex = hex.lstrip('#')
hlen = len(hex)
return tuple(int(hex[i:i+hlen/3], 16) for i in range(0, hlen, hlen/3))
def _determine_emote_name(beatmap):
diff = float(beatmap["difficulty_rating"])
if diff <= 1.99:
name = "easy"
elif 1.99 < diff <= 2.69:
name = "normal"
elif 2.69 < diff <= 3.99:
name = "hard"
elif 3.99 < diff <= 5.29:
name = "insane"
elif 5.29 < diff <= 6.49:
name = "expert"
else:
name = "expertplus"
return name
async def get_draw_score_graph_image(bmp_chunks, beatmap_info, mods,
color=None):
star_list, speed_list, aim_list, time_list = [], [], [], []
# results = chunks(file_path, mods=int(mods))
results = bmp_chunks
for chunk in results:
time_list.append(chunk['time'])
star_list.append(chunk['stars'])
fig = plt.figure(figsize=(.350, .080), dpi=100, frameon=False)
plt.rcParams['text.antialiased'] = True
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.style.use('ggplot')
# print('DRAW GRAPH COMPLETION', completion)
if 'extra_info' in beatmap_info and \
'map_completion' in beatmap_info['extra_info'] and \
beatmap_info['extra_info']['map_completion']:
# print('GRAPH MAP COMPLETION', beatmap_info['extra_info']['map_completion'])
p_comp = beatmap_info['extra_info']['map_completion']/100
color_incomp = [color[0]/255, color[1]/255, color[2]/255, .2]
color_comp = [color[0]/255, color[1]/255, color[2]/255, 1]
ax.plot(time_list, star_list,
color=color_incomp, linewidth=.1, antialiased=True)
ax.fill_between(time_list, 0, star_list,
facecolor=color_incomp)
max_fill_idx = round(len(time_list)*p_comp)
ax.fill_between(time_list[0:max_fill_idx], 0, star_list[0:max_fill_idx],
facecolor=color_comp)
else:
color = [color[0]/255, color[1]/255, color[2]/255, 1]
ax.plot(time_list, star_list, color=color, linewidth=.1, antialiased=True)
ax.fill_between(time_list, 0, star_list, facecolor=color)
# fig.gca().xaxis.set_major_formatter(ticker.FuncFormatter(plot_time_format))
# fig.gca().xaxis.grid(True)
# fig.gca().yaxis.grid(False)
# plt.ylabel('Stars')
fig.tight_layout()
ax.xaxis.label.set_color(color)
ax.set_yticks([])
ax.set_yticklabels([])
# ax.get_yaxis().set_visible(False)
# ax.yaxis.label.set_color(color)
ax.tick_params(axis='both', colors=color, labelcolor = color)
# ax.grid(color='w', linestyle='-', linewidth=1)
img_id = random.randint(0, 50)
filepath = "../owo_v3.5/cogs/osu/temp/map_{}.png".format(img_id)
fig.savefig(filepath, transparent=True, dpi=1000)
plt.close()
im = Image.open(filepath)
return im |
11506592 | from collections import OrderedDict
import ruamel.yaml
def clone_class(klass):
return type(
'Cloned' + klass.__name__,
(klass, object),
{}
)
def get_yaml_instance(
version=(1, 2),
indent=None,
**kwargs
):
if indent is None:
indent = {'mapping': 2, 'sequence': 4, 'offset': 2}
yaml = ruamel.yaml.YAML(**kwargs)
yaml.Constructor = clone_class(yaml.Constructor)
yaml.Representer = clone_class(yaml.Representer)
yaml.version = version
yaml.indent(**indent)
yaml.Representer.add_representer(OrderedDict,
lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data))
return yaml
|
11506623 | from flask import Flask
import os
app = Flask(__name__)
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'dev')
from app import views
|
11506663 | class Solution:
def addBinary(self, a: str, b: str) -> str:
num1 = int(a, 2)
num2 = int(b, 2)
sum0 = num1 + num2
return format(sum0, 'b')
# 测试
sol = Solution()
print(sol.addBinary('11', '1101')) |
11506671 | from .coingecko import *
class Price():
def __init__(self, whichcoin="bitcoin", fiat="eur", days_ago=1):
self.coingecko = CoinGecko(whichcoin=whichcoin, days_ago=days_ago)
self.fiat = fiat
self.ohlc = {}
self.price = {}
self.timeseriesstack = []
def refresh(self):
logging.info("Getting Data")
self.price = {}
self.price["usd"] = self.coingecko.getCurrentPrice("usd")
self.price["gold"] = self.coingecko.getCurrentPrice("xau")
self.price["sat_usd"] = 1e8 / self.price["usd"]
self.price["fiat"] = self.coingecko.getCurrentPrice(self.fiat)
self.price["sat_fiat"] = 1e8 / self.price["fiat"]
self.ohlc = self.coingecko.getOHLC(self.fiat)
self.timeseriesstack = self.coingecko.getHistoryPrice(self.fiat)
def setDaysAgo(self, days_ago):
self.coingecko.days_ago = days_ago
@property
def days_ago(self):
return self.coingecko.days_ago
def getPriceChange(self):
if len(self.timeseriesstack) == 0:
return ""
pricechange = str("%+d" % round((self.timeseriesstack[-1]-self.timeseriesstack[0])/self.timeseriesstack[-1]*100,2))+"%"
return pricechange
def getPriceNow(self):
if len(self.timeseriesstack) == 0:
return ""
pricenow = self.timeseriesstack[-1]
if pricenow > 1000:
pricenowstring =format(int(pricenow),",")
else:
pricenowstring =str(float('%.5g' % pricenow))
return pricenowstring |
11506692 | from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
n_classes = 6
def l1_normalize(vector):
norm = np.sum(vector)
if norm == 0:
return vector # zeros vector
return vector / norm
def read_emo_lemma(aline):
"""
Splits a line into lemma l, emotion e, and l(e).
l(e) := 1 if lemma l has emotion e according to the lexicon
l(e) := 0 otherwise
"""
split = aline.split()
return split[0], split[1], int(split[2])
def build_fuzzy_lexicon(lexicon_path):
"""
Based on the emotion lexicon, create a mapping from an emotion word to its label probability distribution
"""
lexicon = dict()
with open(lexicon_path, 'r') as f:
emo_idx = 0 # anger: 0, anticipation: 1, disgust: 2, fear: 3, joy: 4, sadness: 5, surprise: 6, trust: 7
for l in f:
lemma, emotion, has_emotion = read_emo_lemma(l)
if emotion == 'anger': # i.e. if lemma not in lexicon.keys()
lexicon[lemma] = np.empty(shape=(n_classes,))
if emotion == 'positive' or emotion == 'negative':
continue
lexicon[lemma][emo_idx] = has_emotion
if emo_idx < n_classes - 1:
emo_idx += 1
else:
# normalize: emotion-label probabilities for a lemma should sum up to 1
lexicon[lemma] = l1_normalize(lexicon[lemma])
# reset index - next line contains a new lemma
emo_idx = 0
return lexicon
def classify(corpus_path, lexicon_path):
"""
Return a list of probability distributions.
"""
# Create mapping: emotion word -> label probability distribution
prob_lexicon = build_fuzzy_lexicon(lexicon_path)
print('Read and tokenize corpus.')
texts = []
with open(corpus_path, 'r') as f:
for line in f:
line_split = line[20:].split(sep='\t:: ')
texts.append(line_split[0].strip())
print('Found %s texts.' % len(texts))
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts) # one sequence of tokens per text input
max_seq_len = np.max([len(s) for s in sequences])
sequences = pad_sequences(sequences, max_seq_len, padding='post')
# Dictionary mapping an index to the word it represents in the corpus (invert word->index mapping as it is bijective)
index_to_word = {i: w for w, i in tokenizer.word_index.items()}
# Label probability distribution for sequences, shape=(len(sequences), n_classes)
fuzzy_labels = []
print('Label the texts.')
for seq in sequences:
seq_labels = np.zeros(shape=(max_seq_len, n_classes))
j = 0 # index of token in a sequence (different from token_id)
for token_id in seq:
if token_id == 0: # we reached the padding zeros
break
token = index_to_word[token_id]
if token in prob_lexicon.keys():
seq_labels[j] += prob_lexicon[token]
j += 1
labels = l1_normalize(np.sum(seq_labels, 0))
fuzzy_labels.append(labels)
return fuzzy_labels
|
11506710 | import csv
import datetime
import logging
from share.harvest import BaseHarvester
logger = logging.getLogger(__name__)
class SCHarvester(BaseHarvester):
"""
"""
VERSION = 1
def _do_fetch(self, start, end, **kwargs):
end_date = end.date()
start_date = start.date()
logger.info('Harvesting the social science registry %s - %s', start_date, end_date)
return self.fetch_records(start_date, end_date)
def fetch_records(self, start_date, end_date):
csv_response = self.requests.get(self.config.base_url + '/trials/search.csv')
csv_response.raise_for_status()
record_list = list(csv.reader(csv_response.text.split('\n')))
record_list = record_list[1:]
total_records = len(record_list)
logging.info('Found total %d results from the social science registry', total_records)
standard_size = len(record_list[0])
records_ignored = 0
records_harvested = 0
for i, record in enumerate(record_list):
logger.info('On trial %d of %d (%d%%)', i, total_records, (i / total_records) * 100)
if len(record) != standard_size or len(record) == 0:
records_ignored += 1
continue
last_updated = datetime.datetime.strptime(record[2], '%B %d, %Y').date()
if last_updated < start_date:
logger.info('Trial {}: Trial date {} is less than start date {}.'.format(i, last_updated, start_date))
else:
yield (
record[5],
{'record': record}
)
records_harvested += 1
logging.info('Total records harvested %d for date range %s - %s', records_harvested, start_date, end_date)
logging.info('Total records ignored %d for incorrect csv formatting', records_ignored)
|
11506726 | import math
import random
from tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.instruccion import *
from tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.expresion import *
import tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.Expresiones.Trigonometrica as Trigonometrica
import tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.Expresiones.Expresion as Expresion
import tytus.parser.fase2.team21.Analisis_Ascendente.Instrucciones.Expresiones.IdAsId as IdAsId
import hashlib
class Math_(Instruccion):
def __init__(self, nombre, E1, E2,fila,columna):
self.nombre = nombre
self.E1 = E1
self.E2 = E2
self.fila = fila
self.columna = columna
def Resolver(mathe,ts, Consola,exceptions):
#Consola.append('E1 -- ' + mathe.nombre + '\n')
#Consola.append('E2 -- ' + type(mathe.E2).__name__ + '\n')
if isinstance(mathe,Math_):
if mathe.E1 == None and mathe.E2 == None:
if mathe.nombre == 'PI':
return math.pi
elif mathe.nombre == 'RANDOM':
return random.random()
else:
if mathe.E2 == None: # operaciones de un valor
num1 = Math_.Resolver(mathe.E1,ts,Consola,exceptions)
if mathe.nombre == 'LN' or mathe.nombre == 'LOG':
return math.log(float(num1))
elif mathe.nombre == 'ABS':
return abs(num1)
elif mathe.nombre == 'CBRT':
return num1 ** (1/3)
elif mathe.nombre == 'CEIL' or mathe.nombre == 'CEILING':
return math.ceil(num1)
elif mathe.nombre == 'DEGREES':
return math.degrees(num1)
elif mathe.nombre == 'EXP':
return math.exp(num1)
elif mathe.nombre == 'FACTORIAL':
return math.factorial(int(num1))
elif mathe.nombre == 'FLOOR':
return math.floor(num1)
elif mathe.nombre == 'LOG10':
return math.log10(num1)
elif mathe.nombre == 'RADIANS':
return math.radians(num1)
elif mathe.nombre == 'ROUND':
return round(num1)
elif mathe.nombre == 'SIGN':
if num1 >= 0:
return 1
else:
return -1
elif mathe.nombre == 'SQRT':
return math.sqrt(num1)
elif mathe.nombre == 'TRUNC':
return math.trunc(num1)
elif mathe.nombre == 'SUM':
return math.fsum(num1)
elif mathe.nombre == 'MD5':
return hashlib.md5(str(num1).encode("utf-8")).hexdigest()
elif mathe.nombre == 'WIDTH_BUCKET':
print("whidth bucket")
print(mathe.E1)
elementos = mathe.E1
valor = elementos[0].valor
min = elementos[1].valor
max = elementos[2].valor
count = elementos[3].valor
temp = (max - min) / count
contador = float(min)
cubo = 0
if float(valor) == contador:
return 1
while contador < float(max):
if float(valor) < contador:
return cubo
contador += temp
cubo += 1
return cubo + 1
else:
num1 = Math_.Resolver(mathe.E1,ts,Consola,exceptions)
num2 = Math_.Resolver(mathe.E2,ts,Consola,exceptions)
if mathe.nombre == 'DIV':
return num1/num2
elif mathe.nombre == 'GCD':
return math.gcd(num1,num2)
elif mathe.nombre == 'MOD':
return num1 % num2
elif mathe.nombre == 'POWER':
return num1 ** num2
elif isinstance(mathe, Trigonometrica.Trigonometrica):
return Trigonometrica.Trigonometrica.Resolver(mathe,ts,Consola,exceptions)
elif isinstance(mathe,Math_):
return Math_.Resolver(mathe,ts,Consola,exceptions)
elif isinstance(mathe, Primitivo):
return mathe.valor
elif isinstance(mathe, Expresion.Expresion):
return Expresion.Expresion.Resolver(mathe,ts,Consola,exceptions)
elif isinstance(mathe, Unario):
return Expresion.Expresion.Resolver(mathe,ts,Consola,exceptions)
def obtenerCadenaEntrada(mathe,condicion):
if isinstance(mathe,Math_):
if mathe.E1 == None and mathe.E2 == None:
if mathe.nombre == 'PI':
return 'PI() '
elif mathe.nombre == 'RANDOM':
return 'RANDOM() '
else:
if mathe.E2 == None: # operaciones de un valor
if isinstance(mathe.E1,Unario):
num1 = Expresion.Expresion.ObtenerCadenaEntrada(mathe.E1,condicion)
return mathe.nombre + '(' + str(num1) + ') '
else:
num1 = Math_.obtenerCadenaEntrada(mathe.E1, condicion)
return mathe.nombre + '(' + str(num1) + ') '
else:
if isinstance(mathe.E1, Unario) and isinstance(mathe.E2, Unario):
num1 = Expresion.Expresion.ObtenerCadenaEntrada(mathe.E1,condicion)
num2 = Expresion.Expresion.ObtenerCadenaEntrada(mathe.E2,condicion)
return mathe.nombre + '(' + str(num1) + ',' + str(num2) + ') '
elif isinstance(mathe.E1, Unario):
num1 = Expresion.Expresion.ObtenerCadenaEntrada(mathe.E1,condicion)
num2 = Math_.obtenerCadenaEntrada(mathe.E2, condicion)
return mathe.nombre + '(' + str(num1) + ',' + str(num2) + ') '
elif isinstance(mathe.E2, Unario):
num1 = Math_.obtenerCadenaEntrada(mathe.E1, condicion)
num2 = Expresion.Expresion.ObtenerCadenaEntrada(mathe.E2,condicion)
return mathe.nombre + '(' + str(num1) + ',' + str(num2) + ') '
else:
num1 = Math_.obtenerCadenaEntrada(mathe.E1,condicion)
num2 = Math_.obtenerCadenaEntrada(mathe.E2,condicion)
return mathe.nombre+'('+ str(num1)+','+str(num2)+') '
elif isinstance(mathe, Trigonometrica.Trigonometrica):
return str(Trigonometrica.Trigonometrica.obtenerCadenaEntrada(mathe,condicion))
elif isinstance(mathe,Math_):
return str(Math_.obtenerCadenaEntrada(mathe,condicion))
elif isinstance(mathe, Primitivo):
return str(Primitivo.ObtenerCadenaEntrada(mathe))
elif isinstance(mathe, Expresion.Expresion):
expresion = str(Expresion.Expresion.ObtenerCadenaEntrada(mathe,condicion))
return expresion
elif isinstance(mathe,list):
valores=''
cont=0
for val in mathe:
if isinstance(val,Primitivo):
valores+=Primitivo.ObtenerCadenaEntrada(val)+' '
cont += 1
if cont < len(mathe):
valores += ', '
else:
valores += ' '
return valores
elif isinstance(mathe,Id):
return str(mathe.id)
elif isinstance(mathe,IdId):
return str(IdId.ObtenerCadenaEntrada(mathe))
elif isinstance(mathe, IdAsId.IdAsId):
return str(IdAsId.IdAsId.ObtenerCadenaEntrada(mathe))
def traducir(mathe, ts, consola, exception, tv, regla, antes, optimizado, ID):
if mathe.E1 == None and mathe.E2 == None:
if mathe.nombre == 'PI':
temp = tv.Temp()
consola.append(f'\t{temp} = math.pi\n')
return temp
elif mathe.nombre == 'RANDOM':
temp = tv.Temp()
consola.append(f'\t{temp} = random.random()\n')
return temp
else:
if mathe.E2 == None: # operaciones de un valor
num1 = Expresion.Expresion.traducir(mathe.E1, ts, consola, exception, tv, regla, antes, optimizado, ID)
if mathe.nombre == 'LN' or mathe.nombre == 'LOG':
temp1 = tv.Temp()
consola.append(f'\t{temp1} = float({num1})\n')
temp2 = tv.Temp()
consola.append(f'\t{temp2} = math.log({temp1})\n')
return temp2
elif mathe.nombre == 'ABS':
temp = tv.Temp()
consola.append(f'\t{temp} = abs({num1})\n')
return temp
elif mathe.nombre == 'CBRT': #========================
return num1 ** (1 / 3)
elif mathe.nombre == 'CEIL' or mathe.nombre == 'CEILING':
temp = tv.Temp()
consola.append(f'\t{temp} = math.ceil({num1})\n')
return temp
elif mathe.nombre == 'DEGREES':
temp = tv.Temp()
consola.append(f'\t{temp} = math.degrees({num1})\n')
return temp
elif mathe.nombre == 'EXP':
temp = tv.Temp()
consola.append(f'\t{temp} = math.exp({num1})\n')
return temp
elif mathe.nombre == 'FACTORIAL':
temp1 = tv.Temp()
consola.append(f'\t{temp1} = int({num1})\n')
temp2 = tv.Temp()
consola.append(f'\t{temp2} = math.factorial({temp1})\n')
return temp2
elif mathe.nombre == 'FLOOR':
temp = tv.Temp()
consola.append(f'\t{temp} = math.floor({num1})\n')
return temp
elif mathe.nombre == 'LOG10':
temp = tv.Temp()
consola.append(f'\t{temp} = math.log10({num1})\n')
return temp
elif mathe.nombre == 'RADIANS':
temp = tv.Temp()
consola.append(f'\t{temp} = math.radians({num1})\n')
return temp
elif mathe.nombre == 'ROUND':
temp = tv.Temp()
consola.append(f'\t{temp} = round({num1})\n')
return temp
elif mathe.nombre == 'SIGN':
if num1 >= 0:
return 1
else:
return -1
elif mathe.nombre == 'SQRT':
temp = tv.Temp()
consola.append(f'\t{temp} = math.sqrt({num1})\n')
return temp
elif mathe.nombre == 'TRUNC':
temp = tv.Temp()
consola.append(f'\t{temp} = math.trunc({num1})\n')
return temp
elif mathe.nombre == 'SUM':
temp = tv.Temp()
consola.append(f'\t{temp} = math.fsum({num1})\n')
return temp
elif mathe.nombre == 'MD5':
temp1 = tv.Temp()
consola.append(f'\t{temp1} = str({num1})\n')
temp2 = tv.Temp()
consola.append(f'\t{temp2} = {temp1}.encode("utf-8")\n')
temp3 = tv.Temp()
consola.append(f'\t{temp3} = hashlib.md5({temp2})\n')
temp4 = tv.Temp()
consola.append(f'\t{temp4} = {temp3}.hexdigest()\n')
return temp4
elif mathe.nombre == 'WIDTH_BUCKET':
print("whidth bucket")
consola.append('\t#whidth bucket\n')
print(mathe.E1)
elementos = mathe.E1
valor = elementos[0].valor
min = elementos[1].valor
max = elementos[2].valor
count = elementos[3].valor
#---------
tvalor = tv.Temp()
consola.append(f'\t{tvalor} = {valor}\n')
tmin = tv.Temp()
consola.append(f'\t{tmin} = {min}\n')
tmax = tv.Temp()
consola.append(f'\t{tmax} = {max}\n')
tcount = tv.Temp()
consola.append(f'\t{tcount} = {count}\n')
temp = (max - min) / count
#-----------
resta = tv.Temp()
consola.append(f'\t{resta} = {tmax} - {tmin}\n')
division = tv.Temp()
consola.append(f'\t{division} = {resta} / {tcount}\n')
contador = float(min)
cubo = 0
#-----------
tcontador = tv.Temp()
consola.append(f'\t{tcontador} = float({tmin})\n')
tcubo = tv.Temp()
consola.append(f'\t{tcubo} = 0\n')
#-------------- primer if
casteo = tv.Temp()
consola.append(f'\t{casteo} = float({tvalor})\n')
condicion = tv.Temp()
consola.append(f'\t{condicion} = {casteo} == {tcontador}\n')
tresultado = tv.Temp()
consola.append(f'\t{tresultado} = 1\n')
verdadero = tv.Et()
falso = tv.Et()
consola.append('\tif ' + condicion + ':\n\t\t goto .' + verdadero + '\n')
consola.append(f'\telse:\n\t\tgoto .{falso}\n')
consola.append(f'\tlabel .{verdadero}\n')
consola.append(f'\t{tresultado} = 1\n')
#saltarse el while
inicial = tv.Et()
verdadero2 = tv.Et()
falso2 = tv.Et()
consola.append(f'\tgoto .{falso2}\n')
consola.append(f'\tlabel .{falso}\n')
#if float(valor) == contador:
# return 1
#-----------------------while
casteo2 = tv.Temp()
consola.append(f'\t{casteo2} = float({tmax})\n')
condicion2 = tv.Temp()
consola.append(f'\tlabel .{inicial}\n')
consola.append(f'\t{condicion2} = {tcontador} < {casteo2}\n')
consola.append('\tif ' + condicion2 + ':\n\t\t goto .' + verdadero2 + '\n')
consola.append(f'\telse:\n\t\tgoto .{falso2}\n')
consola.append(f'\tlabel .{verdadero2}\n')
#otro if
condicion3 = tv.Temp()
consola.append(f'\t{condicion3} = {casteo} < {tcontador}\n')
verdadero3 = tv.Et()
falso3 = tv.Et()
consola.append('\tif ' + condicion3 + ':\n\t\t goto .' + verdadero3 + '\n')
consola.append(f'\telse:\n\t\tgoto .{falso3}\n')
consola.append(f'\tlabel .{verdadero3}\n')
consola.append(f'\t{tresultado} = {tcubo}\n')
#salir de while
consola.append(f'\tgoto .{falso2}\n')
consola.append(f'\tlabel .{falso3}\n')
consola.append(f'\t{tcontador} = {tcontador} + {division}\n')
consola.append(f'\t{tcubo} = {tcubo} + 1\n')
consola.append(f'\tgoto .{inicial}\n')
consola.append(f'\tlabel .{falso2}\n')
#while contador < float(max):
# if float(valor) < contador:
# return cubo
# contador += temp
# cubo += 1
#return cubo + 1 #eso para que es?
return tresultado
else:
num1 = Expresion.Expresion.traducir(mathe.E1, ts, consola, exception, tv, regla, antes, optimizado, ID)
num2 = Expresion.Expresion.traducir(mathe.E2, ts, consola, exception, tv, regla, antes, optimizado, ID)
if mathe.nombre == 'DIV':
temp = tv.Temp()
consola.append(f'\t{temp} = {num1} / {num2}\n')
return temp
elif mathe.nombre == 'GCD':
temp = tv.Temp()
consola.append(f'\t{temp} = math.gcd({num1}, {num2})\n')
return temp
elif mathe.nombre == 'MOD':
temp = tv.Temp()
consola.append(f'\t{temp} = {num1} % {num2}\n')
return temp
elif mathe.nombre == 'POWER':
temp = tv.Temp()
consola.append(f'\t{temp} = {num1} ** {num2}\n')
return temp
|
11506742 | from pyFHE.tlwe import tlweSymEncrypt, tlweSymDecrypt
from pyFHE.key import SecretKey, CloudKey
from pyFHE.gatebootstrapping import GateBootstrappingTLWE2TLWElvl2FFT
import numpy as np
from concurrent import futures
from os import cpu_count
def test():
sk = SecretKey(500,2.44e-5,1024,2,10,3.73e-9,8,2,2.44e-5,2048,4,9,2**-44,10,3,2**-31)
ck = CloudKey(sk)
p = np.random.binomial(1, 0.5)
x = tlweSymEncrypt((p * 2 + -1) * (2 ** -3), sk.params.alpha, sk.key.tlwe)
mu = 1/sk.params.Bgbar
y = GateBootstrappingTLWE2TLWElvl2FFT(x, ck, mu)
z = np.int64(y[-1] - np.dot(y[:-1], sk.key.lvl2))
np.set_printoptions(threshold=2000)
if abs(p*mu - z*2**-64)>mu/2:
print(p*mu)
print(z)
exit()
future_list = []
for i in range(10):
test()
print("completed.")
|
11506744 | import pytest
import re
class TestMismatchedBrackets:
def test_it_knows_about_mismatched_square_from_parenthesis(self):
original = """
def wat(self:
pass
things = "]"
def things]self:
pass
"""
expected = re.escape(
"Trying to close the wrong type of bracket. Found ']' (line 6, column 10) instead of closing a '(' (line 1, column 7)"
)
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_mismatched_square_from_curly(self):
original = """
def wat{self:
pass
def things]self:
pass
"""
expected = re.escape(
"Trying to close the wrong type of bracket. Found ']' (line 4, column 10) instead of closing a '{' (line 1, column 7)"
)
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_mismatched_parenthesis_from_square(self):
original = """
def wat[self:
pass
def things)self:
pass
"""
expected = re.escape(
"Trying to close the wrong type of bracket. Found ')' (line 4, column 10) instead of closing a '[' (line 1, column 7)"
)
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_hanging_square(self):
original = """
def wat(self):
pass
def things]self:
pass
"""
expected = re.escape("Found a hanging ']' on line 4, column 10")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_hanging_parenthesis(self):
original = """
def wat(self)):
pass
"""
expected = re.escape("Found a hanging ')' on line 1, column 13")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_hanging_curly(self):
original = """
class Wat:
def __init__(self):
self.d = {1: 2}}
"""
expected = re.escape("Found a hanging '}' on line 3, column 23")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_unclosed_parenthesis(self):
original = """
def thing(self):
pass
def wat(self:
pass
"""
expected = re.escape("Found an open '(' (line 4, column 7) that wasn't closed")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_unclosed_square(self):
original = """
def thing(self):
pass
things = [1, 2
"""
expected = re.escape("Found an open '[' (line 4, column 9) that wasn't closed")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
def test_it_knows_about_unclosed_curly(self):
original = """
def thing(self):
pass
things = [1, 2]
stuff = {1: 2
"""
expected = re.escape("Found an open '{' (line 6, column 8) that wasn't closed")
with pytest.raises(SyntaxError, match=expected):
pytest.helpers.assert_conversion(original, "")
|
11506755 | import pytest
import numpy as np
import pandas as pd
from pyshgp.validation import check_1d, check_2d, check_column_types, check_num_columns
def test_check_1d_on_1d():
assert check_1d([1, 2, 3]) == [1, 2, 3]
def test_check_1d_on_dirty_1d():
assert check_1d([1, np.array(2), 3]) == [1, 2, 3]
def test_check_1d_on_2d():
with pytest.raises(ValueError):
check_1d(np.arange(10).reshape(-1, 2))
def test_check_2d_on_1d():
with pytest.raises(ValueError):
check_2d(np.arange(3))
def test_check_2d_on_2d():
df = pd.DataFrame({
"x1": [-2, -1, 0, 1, 2],
"x2": [-1, 2, -3, 4, -5],
"y": [2, -2, 0, 4, -10]
})
pd.testing.assert_frame_equal(check_2d(df), df)
a = np.arange(10).reshape(-1, 2)
assert np.array_equal(check_2d(a), a)
def test_check_2d_on_3d():
with pytest.raises(ValueError):
check_2d(np.arange(12).reshape(-1, 2, 2))
def test_check_column_types():
arr_col_types = check_column_types(np.arange(30).reshape(-1, 3))
assert arr_col_types == [np.int64, np.int64, np.int64] or \
arr_col_types == [np.int32, np.int32, np.int32] or \
arr_col_types == [np.int16, np.int16, np.int16]
mock_dataset = [[1, "a"], [2, "b"], [3, "c"]]
assert check_column_types(mock_dataset, 1.0) == [int, str]
mock_dataset2 = [[1, "a"], [2, False], [3, "c"]]
with pytest.raises(ValueError):
check_column_types(mock_dataset2, 1.0)
df = pd.DataFrame({
"i": [1, 2, 3, 4, 5],
"s": ["a", "b", "c", "d", "e"]
})
df_col_types = check_column_types(df, 1.0)
assert df_col_types == [np.int64, np.object_] or \
df_col_types == [np.int32, np.object_] or \
df_col_types == [np.int16, np.object_]
def test_check_num_columns_a():
mock_dataset = [
[1, "a"],
[2, "b"],
[3, "c"]
]
assert check_num_columns(mock_dataset) == 2
def test_check_num_columns_b():
mock_dataset = np.arange(9).reshape(-1, 3)
assert check_num_columns(mock_dataset) == 3
|
11506786 | from lib.external.PluginManager import PluginInterface, Manager
from prettytable import PrettyTable
from aayudh import utils
import sys
import os
current_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.normpath(os.path.join(current_dir, ".."))
sys.path.insert(0, root_dir)
class summary(PluginInterface):
name = "summary"
enabled = True
def __init__(self):
self.details = utils.objdict({})
self.details.name = self.name
self.details.description = "Display a summary for each file"
self.details.mimetypes = None
self.details.author = "@7h3rAm"
self.details.version = "0.01"
self.details.date = "15/OCT/2015"
self.details.path = ("" if __file__ is None else os.path.abspath(__file__))
def run(self, report):
if hasattr(report, "meta"):
borderflag = False
headerflag = False
padwidth = 1
# show file metainfo
summarytab = PrettyTable(["Attribute", "Value"])
summarytab.border = borderflag
summarytab.header = headerflag
summarytab.padding_width = padwidth
summarytab.align["Attribute"] = "l"
summarytab.align["Value"] = "l"
summarytab.add_row(["File", report.meta.filebasename])
summarytab.add_row(["Location", report.meta.filedirname])
summarytab.add_row(["MIMEType", report.meta.filemimetype])
summarytab.add_row(["Magic", report.meta.filemagic])
summarytab.add_row(["Size", report.meta.filesize])
summarytab.add_row(["Minsize", "%s (%s%%)" % (report.meta.fileminsize, report.meta.filecompressionratio) if report.meta.fileminsize and report.meta.filecompressionratio else "None"])
summarytab.add_row(["Entropy", "%s (%s)" % (report.meta.fileentropy, report.meta.fileentropycategory) if report.meta.fileentropy and report.meta.fileentropycategory else "None"])
hashtab = PrettyTable(["Hash", "Value"])
hashtab.border = False
hashtab.header = False
hashtab.padding_width = 1
hashtab.align["Hash"] = "l"
hashtab.align["Value"] = "l"
for hashfunc in report.meta.hashes:
if hashfunc != "sha512":
hashtab.add_row([hashfunc.upper(), report.meta.hashes[hashfunc]])
summarytab.add_row(["Hashes", hashtab.get_string()])
if report.meta.subfiles and len(report.meta.subfiles):
subfilestab = PrettyTable(["Attribute", "Value"])
subfilestab.border = False
subfilestab.header = False
subfilestab.padding_width = 1
subfilestab.align["Attribute"] = "l"
subfilestab.align["Value"] = "l"
for subfile in report.meta.subfiles:
if subfile["offset"] and subfile["offset"] > 0 and subfile["size"] and subfile["size"] > 0:
subfilestab.add_row(["Description", subfile["description"]])
subfilestab.add_row(["SHA256", subfile["hashes"]["sha256"]])
subfilestab.add_row(["MIMEType", subfile["mimetype"]])
subfilestab.add_row(["Offset:Size", "%d:%d" % (subfile["offset"], subfile["size"])])
subfilestab.add_row(["", ""])
summarytab.add_row(["Subfiles", subfilestab.get_string()])
result = summarytab.get_string()
if result != "":
print "\nMeta Information:\n%s" % result
Manager().register_plugin(summary)
|
11506814 | from setuptools import setup, find_namespace_packages
import os
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.rst'), 'rb') as f:
long_description = f.read().decode('utf-8')
setup(name='fluent.syntax',
description='Localization library for expressive translations.',
long_description=long_description,
long_description_content_type='text/x-rst',
author='Mozilla',
author_email='<EMAIL>',
license='APL 2',
url='https://github.com/projectfluent/python-fluent',
keywords=['fluent', 'localization', 'l10n'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
packages=find_namespace_packages(include=['fluent.*']),
test_suite='tests.syntax'
)
|
11506944 | import sys
import json
import click
import json
from prettytable import PrettyTable
from calm.dsl.api import get_api_client, get_resource_api
from calm.dsl.constants import PROVIDER_ACCOUNT_TYPE_MAP
from calm.dsl.log import get_logging_handle
from .utils import highlight_text
LOG = get_logging_handle(__name__)
def get_brownfield_ahv_vm_list(entity_rows):
"""displays ahv brownfield vms"""
table = PrettyTable()
table.field_names = [
"NAME",
"CLUSTER",
"SUBNET",
"ADDRESS",
"MEMORY",
"SOCKETS",
"VCPU",
"ID",
]
for row in entity_rows:
# Status section
st_resources = row["status"]["resources"]
cluster = st_resources["cluster_name"]
subnet = st_resources["subnet_list"]
address = ",".join(st_resources["address_list"])
memory = st_resources["memory_size_mib"] // 1024
sockets = st_resources["num_sockets"]
vcpus = st_resources["num_vcpus_per_socket"]
instance_id = st_resources["instance_id"]
instance_name = st_resources["instance_name"]
table.add_row(
[
highlight_text(instance_name),
highlight_text(cluster),
highlight_text(subnet),
highlight_text(address),
highlight_text(memory),
highlight_text(sockets),
highlight_text(vcpus),
highlight_text(instance_id),
]
)
click.echo(table)
def get_brownfield_aws_vm_list(entity_rows):
"""displays aws brownfield vms"""
table = PrettyTable()
table.field_names = [
"NAME",
"PUBLIC IP ADDRESS",
"PRIVATE DNS",
"PUBLIC DNS",
"REGION",
"POWER STATE",
"ID",
]
for row in entity_rows:
# Status section
st_resources = row["status"]["resources"]
address = ",".join(st_resources["public_ip_address"])
private_dns_name = st_resources["private_dns_name"]
public_dns_name = ",".join(st_resources["public_dns_name"])
region = ",".join(st_resources["region"])
power_state = st_resources["power_state"]
instance_id = st_resources["instance_id"]
instance_name = st_resources["instance_name"]
table.add_row(
[
highlight_text(instance_name),
highlight_text(address),
highlight_text(private_dns_name),
highlight_text(public_dns_name),
highlight_text(region),
highlight_text(power_state),
highlight_text(instance_id),
]
)
click.echo(table)
def get_brownfield_azure_vm_list(entity_rows):
"""displays azure brownfield vms"""
table = PrettyTable()
table.field_names = [
"NAME",
"RESOURCE GROUP",
"LOCATION",
"PUBLIC IP",
"PRIVATE IP",
"HARDWARE PROFILE",
"ID",
]
for row in entity_rows:
# Status section
st_resources = row["status"]["resources"]
instance_id = st_resources["instance_id"]
instance_name = st_resources["instance_name"]
resource_group = st_resources["resource_group"]
location = st_resources["location"]
public_ip = st_resources["public_ip_address"]
private_ip = st_resources["private_ip_address"]
hardwareProfile = (
st_resources["properties"].get("hardwareProfile", {}).get("vmSize", "")
)
table.add_row(
[
highlight_text(instance_name),
highlight_text(resource_group),
highlight_text(location),
highlight_text(public_ip),
highlight_text(private_ip),
highlight_text(hardwareProfile),
highlight_text(instance_id),
]
)
click.echo(table)
def get_brownfield_gcp_vm_list(entity_rows):
"""displays gcp brownfield vms"""
table = PrettyTable()
table.field_names = [
"NAME",
"ZONE",
"SUBNETS",
"NETWORK",
"NAT IP",
"NETWORK NAME",
"ID",
]
for row in entity_rows:
# Status section
st_resources = row["status"]["resources"]
instance_id = st_resources["id"]
instance_name = st_resources["instance_name"]
zone = st_resources["zone"]
subnetwork = st_resources["subnetwork"]
network = st_resources["network"]
natIP = ",".join(st_resources["natIP"])
network_name = ",".join(st_resources["network_name"])
table.add_row(
[
highlight_text(instance_name),
highlight_text(zone),
highlight_text(subnetwork),
highlight_text(network),
highlight_text(natIP),
highlight_text(network_name),
highlight_text(instance_id),
]
)
click.echo(table)
def get_brownfield_vmware_vm_list(entity_rows):
"""displays vmware brownfield vms"""
table = PrettyTable()
table.field_names = [
"NAME",
"HOSTNAME",
"IP ADDRESS",
"VCPU",
"CORES PER VCPU",
"MEMORY (GIB)",
"GUEST FAMILY",
"TEMPLATE",
"ID",
]
for row in entity_rows:
# Status section
st_resources = row["status"]["resources"]
instance_id = st_resources["instance_id"]
instance_name = st_resources["instance_name"]
hostname = st_resources["guest.hostName"]
address = ",".join(st_resources["guest.ipAddress"])
vcpus = st_resources["config.hardware.numCPU"]
sockets = st_resources["config.hardware.numCoresPerSocket"]
memory = int(st_resources["config.hardware.memoryMB"]) // 1024
guest_family = st_resources.get("guest.guestFamily", "")
template = st_resources.get("config.template", False)
table.add_row(
[
highlight_text(instance_name),
highlight_text(hostname),
highlight_text(address),
highlight_text(vcpus),
highlight_text(sockets),
highlight_text(memory),
highlight_text(guest_family),
highlight_text(template),
highlight_text(instance_id),
]
)
click.echo(table)
def get_brownfield_account_details(project_name, provider_type, account_name):
"""returns object containing project uuid and account uuid"""
client = get_api_client()
# Getting the account uuid map
account_type = PROVIDER_ACCOUNT_TYPE_MAP[provider_type]
params = {"length": 250, "filter": "state!=DELETED;type=={}".format(account_type)}
if account_name:
params["filter"] += ";name=={}".format(account_name)
account_uuid_name_map = client.account.get_uuid_name_map(params)
provider_account_uuids = list(account_uuid_name_map.keys())
LOG.info("Fetching project '{}' details".format(project_name))
params = {"length": 250, "filter": "name=={}".format(project_name)}
res, err = client.project.list(params)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
if res["metadata"]["total_matches"] == 0:
LOG.error("Project {} not found".format(project_name))
sys.exit(-1)
pj_data = res["entities"][0]
whitelisted_accounts = [
account["uuid"]
for account in pj_data["status"]["resources"].get("account_reference_list", [])
]
project_uuid = pj_data["metadata"]["uuid"]
account_uuid = ""
for _account_uuid in whitelisted_accounts:
if _account_uuid in provider_account_uuids:
account_uuid = _account_uuid
break
if not account_uuid:
LOG.error("No account with given details found in project")
sys.exit(-1)
account_name = account_uuid_name_map[account_uuid]
LOG.info("Using account '{}' for listing brownfield vms".format(account_name))
if provider_type == "AHV_VM":
LOG.info("Fetching account '{}' details".format(account_name))
res, err = client.account.read(account_uuid)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
clusters = res["status"]["resources"]["data"].get(
"cluster_account_reference_list", []
)
if not clusters:
LOG.error(
"No cluster found in ahv account (uuid='{}')".format(account_uuid)
)
sys.exit(-1)
# Use clustrer uuid for AHV account
account_uuid = clusters[0]["uuid"]
return {
"project": {"name": project_name, "uuid": project_uuid},
"account": {"name": account_name, "uuid": account_uuid},
}
def get_brownfield_vms(
limit, offset, quiet, out, project_name, provider_type, account_name
):
"""displays brownfield vms for a provider"""
client = get_api_client()
account_detail = get_brownfield_account_details(
project_name=project_name,
provider_type=provider_type,
account_name=account_name,
)
project_uuid = account_detail["project"]["uuid"]
account_name = account_detail["account"]["name"]
account_uuid = account_detail["account"]["uuid"]
LOG.info("Fetching brownfield vms")
Obj = get_resource_api("blueprints/brownfield_import/vms", client.connection)
filter_query = "project_uuid=={};account_uuid=={}".format(
project_uuid, account_uuid
)
params = {"length": limit, "offset": offset, "filter": filter_query}
res, err = Obj.list(params=params)
if err:
LOG.error(err)
sys.exit(-1)
if out == "json":
click.echo(json.dumps(res.json(), indent=4, separators=(",", ": ")))
return
json_rows = res.json()["entities"]
if not json_rows:
click.echo(
highlight_text(
"No brownfield {} found on account '{}' !!!\n".format(
provider_type, account_name
)
)
)
return
if quiet:
for _row in json_rows:
row = _row["status"]
click.echo(highlight_text(row["name"]))
return
if provider_type == "AHV_VM":
get_brownfield_ahv_vm_list(json_rows)
elif provider_type == "AWS_VM":
get_brownfield_aws_vm_list(json_rows)
elif provider_type == "AZURE_VM":
get_brownfield_azure_vm_list(json_rows)
elif provider_type == "GCP_VM":
get_brownfield_gcp_vm_list(json_rows)
elif provider_type == "VMWARE_VM":
get_brownfield_vmware_vm_list(json_rows)
|
11506950 | import os
import glob
import numpy as np
import pandas as pd
"""
Module for postprocessing AD results. AD results are
stored in CSV files and then assembled in DataFrame tables
for further querying.
"""
def load_ad_table(path, index_column='oid'):
"""Load one file with AD results.
Parameters
----------
path: Path to CSV file for loading.
index_column: Name of the first column. Defaults to 'oid'. The second column's name is driven from filename.
Returns
-------
DataFrame table.
"""
_, filename = os.path.split(path)
basename, _ = os.path.splitext(filename)
return pd.read_csv(path, names=[index_column, basename], index_col=None)
def load_ad_tables_by_patterns(patterns, sorting='hits'):
"""Load all the file with AD results matching any patterns from the specified list.
Parameters
----------
patterns: Glob patterns of files for loading.
sorting: Sorting preference:
* 'hits' sorts by number of anomaly hits with different algos. (Default)
* None for no sorting.
Returns
-------
DataFrame table.
"""
all_tables = []
for pattern in patterns:
one_pattern_tables = [load_ad_table(filename) for filename in glob.glob(pattern)]
all_tables.append(merge_ad_tables(one_pattern_tables))
table = merge_ad_tables(all_tables)
# Sort the table, if needed
if sorting == 'hits':
index = table.isna().sum(axis=1).sort_values(kind='mergesort').index
table = table.loc[index].reset_index(drop=True)
elif sorting is None:
pass
else:
raise ValueError('unknown sorting algorithm: {}'.format(sorting))
return table
def merge_ad_tables(tables, index_column='oid'):
"""Merge the list of tables with AD results.
Parameters
----------
tables: list of AD results tables to load
index_column: Column to index on. Defaults to 'oid'.
Returns
-------
Merged DataFrame table.
"""
tbl = pd.DataFrame({index_column: []})
for t in tables:
tbl = pd.merge(tbl, t, on=index_column, how='outer')
return tbl
def extract_ad_subtable(table, value_columns):
"""Extract subtable from the AD results table.
The results are sorted in the way suitable for expert analysis.
Parameters
----------
table: Source table to query from.
value_columns: Which columns should we preserve.
Returns
-------
Extracted subtable.
"""
columns = ['oid'] + value_columns
subtable = table.loc[:, columns].dropna(thresh=2).sort_values(by=value_columns).reset_index(drop=True)
sorted_index = subtable.iloc[:, 1:].isna().sum(axis=1).values.argsort(kind='stable')
sorted_subtable = subtable.loc[sorted_index].reset_index(drop=True)
return sorted_subtable
def save_anomaly_list(filename, names, scores):
"""Save the list of anomalies in a uniform way.
Parameters
----------
filename: Name of the file to save to.
names: Array of anomaly names.
scores: Array of anomaly scores. Lesser score means more anomalous object.
Returns
-------
None
"""
table = pd.concat((pd.Series(names), pd.Series(scores)), axis=1)
table.to_csv(filename, header=False, index=False)
def load_expert_table(filename):
"""
Load table with expert analysis of the anomalies.
Parameters
----------
filename: Name of the file for loading.
Returns
-------
Loaded table.
"""
return pd.read_csv(filename, header=0, index_col=0)
def extract_anomaly_features(anomalies, oids, features):
"""
Extract features of anomalies.
Parameters
----------
anomalies: 1D array of anomalies to extract features for.
oids: Dataset oids, 1D array.
features: Dataset features, 2D array.
Returns
-------
Numpy array of features for anomalies.
"""
d = dict(zip(oids, range(len(oids)))) # not very efficient, yes
index = np.vectorize(lambda oid: d[oid])(anomalies)
return features[index]
|
11506972 | from __future__ import annotations
from typing import Optional, Type
from bunq import ApiEnvironmentType
from bunq.sdk.context.bunq_context import BunqContext
from bunq.sdk.exception.bunq_exception import BunqException
from bunq.sdk.http.anonymous_api_client import AnonymousApiClient
from bunq.sdk.http.bunq_response import BunqResponse
from bunq.sdk.http.bunq_response_raw import BunqResponseRaw
from bunq.sdk.http.http_util import HttpUtil
from bunq.sdk.json import converter
from bunq.sdk.model.core.bunq_model import BunqModel
from bunq.sdk.model.core.oauth_grant_type import OauthGrantType
from bunq.sdk.model.generated.endpoint import OauthClient
from bunq.sdk.util.type_alias import T
class OauthAccessToken(BunqModel):
# Field constants.
FIELD_GRANT_TYPE = "grant_type"
FIELD_CODE = "code"
FIELD_REDIRECT_URI = "redirect_uri"
FIELD_CLIENT_ID = "client_id"
FIELD_CLIENT_SECRET = "client_secret"
# Token constants.
TOKEN_URI_FORMAT_SANDBOX = "https://api-oauth.sandbox.bunq.com/v1/token?%s"
TOKEN_URI_FORMAT_PRODUCTION = "https://api.oauth.bunq.com/v1/token?%s"
# Error constants.
ERROR_ENVIRONMENT_TYPE_NOT_SUPPORTED = "You are trying to use an unsupported environment type."
def __init__(self, token: str, token_type: str, state: str = None) -> None:
self._token = token
self._token_type = token_type
self._state = state
@property
def token(self) -> str:
return self._token
@property
def token_type(self) -> str:
return self._token_type
@property
def state(self) -> Optional[str]:
return self._state
@classmethod
def create(cls,
grant_type: OauthGrantType,
oauth_code: str,
redirect_uri: str,
client: OauthClient) -> OauthAccessToken:
api_client = AnonymousApiClient(BunqContext.api_context())
response_raw = api_client.post(
cls.create_token_uri(grant_type.value, oauth_code, redirect_uri, client),
bytearray(),
{}
)
return cls.from_json(OauthAccessToken, response_raw).value
@classmethod
def create_token_uri(cls, grant_type: str, auth_code: str, redirect_uri: str, client: OauthClient) -> str:
all_token_parameter = {
cls.FIELD_GRANT_TYPE: grant_type,
cls.FIELD_CODE: auth_code,
cls.FIELD_REDIRECT_URI: redirect_uri,
cls.FIELD_CLIENT_ID: client.id_,
cls.FIELD_CLIENT_SECRET: client.secret,
}
return cls.determine_auth_uri_format().format(HttpUtil.create_query_string(all_token_parameter))
def is_all_field_none(self) -> bool:
if self._token is not None:
return False
elif self._token_type is not None:
return False
elif self._state is not None:
return False
return True
@classmethod
def from_json(cls, class_of_object: Type[T], response_raw: BunqResponseRaw):
response_item_object = converter.deserialize(class_of_object, response_raw)
response_value = converter.json_to_class(class_of_object, response_item_object)
return BunqResponse(response_value, response_raw.headers)
@classmethod
def determine_auth_uri_format(cls) -> str:
environment_type = BunqContext.api_context().environment_type
if ApiEnvironmentType.PRODUCTION == environment_type:
return cls.TOKEN_URI_FORMAT_PRODUCTION
if ApiEnvironmentType.SANDBOX == environment_type:
return cls.TOKEN_URI_FORMAT_SANDBOX
raise BunqException(cls.ERROR_ENVIRONMENT_TYPE_NOT_SUPPORTED)
|
11506998 | import os
import os.path as osp
from os.path import dirname, abspath
DEFAULT_SEED = 42
DS_SEED = 123 # uses this seed when splitting datasets
# -------------- Paths
CONFIG_PATH = abspath(__file__)
SRC_ROOT = dirname(CONFIG_PATH)
PROJECT_ROOT = dirname(SRC_ROOT)
CACHE_ROOT = osp.join(SRC_ROOT, 'cache')
DATASET_ROOT = osp.join(PROJECT_ROOT, 'data')
DEBUG_ROOT = osp.join(PROJECT_ROOT, 'debug')
MODEL_DIR = osp.join(PROJECT_ROOT, 'models')
# -------------- URLs
ZOO_URL = 'http://datasets.d2.mpi-inf.mpg.de/blackboxchallenge'
# -------------- Dataset Stuff
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
DEFAULT_BATCH_SIZE = 64 |
11507006 | from datetime import datetime, timedelta
import mock
from rdr_service.offline.service_accounts import ServiceAccount, ServiceAccountKey, ServiceAccountKeyManager
from rdr_service.services.gcp_config import RdrEnvironment
from tests.helpers.unittest_base import BaseTestCase
class ServiceAccountManagerTest(BaseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uses_database = False
def setUp(self, *args, **kwargs) -> None:
super(ServiceAccountManagerTest, self).setUp(*args, **kwargs)
# Mock google discover
patcher = mock.patch('rdr_service.offline.service_accounts.discovery')
self.mock_discovery = patcher.start()
self.addCleanup(patcher.stop)
self.mock_project_sa_call = (self.mock_discovery.build.return_value.projects.
return_value.serviceAccounts.return_value)
self.mock_account_list = self.mock_project_sa_call.list
self.mock_key_list = self.mock_project_sa_call.keys.return_value.list
self.service_account_manager = ServiceAccountKeyManager()
def test_listing_accounts(self):
project_name = 'test_project'
self._mock_service_accounts([
{'email': '<EMAIL>'},
{'email': '<EMAIL>'}
])
self.assertEqual([
ServiceAccount(email='<EMAIL>'),
ServiceAccount(email='<EMAIL>')
], self.service_account_manager._get_service_accounts_for_project(project_name))
self.mock_account_list.assert_called_with(name=f'projects/{project_name}')
def test_listing_keys(self):
project_name = 'test_project'
service_account_name = 'test_account'
self._mock_keys([
{'name': 'one', 'validAfterTime': '2021-05-01T15:09:32Z'},
{'name': 'two', 'validAfterTime': '2020-09-17T09:10:11Z'}
])
self.assertEqual([
ServiceAccountKey(name='one', start_date=datetime(2021, 5, 1, 15, 9, 32)),
ServiceAccountKey(name='two', start_date=datetime(2020, 9, 17, 9, 10, 11))
], self.service_account_manager._get_keys_for_account(project_name, service_account_name))
self.mock_key_list.assert_called_with(
name=f'projects/{project_name}/serviceAccounts/{service_account_name}',
keyTypes='USER_MANAGED'
)
def test_delete_old_keys(self):
with mock.patch.object(self.service_account_manager, '_get_service_accounts_for_project') as mock_get_accounts,\
mock.patch.object(self.service_account_manager, '_get_keys_for_account') as mock_get_keys:
mock_get_accounts.return_value = [ServiceAccount(email='test')]
mock_get_keys.return_value = [
ServiceAccountKey(name='delete_this', start_date=datetime.now() - timedelta(days=4))
]
self.service_account_manager.expire_old_keys()
self.assertKeyDeleted(key_name='delete_this')
def test_keep_newer_keys(self):
with mock.patch.object(self.service_account_manager, '_get_service_accounts_for_project') as mock_get_accounts,\
mock.patch.object(self.service_account_manager, '_get_keys_for_account') as mock_get_keys:
mock_get_accounts.return_value = [ServiceAccount(email='test')]
mock_get_keys.return_value = [
ServiceAccountKey(name='do_not_delete', start_date=datetime.now() - timedelta(days=1))
]
self.service_account_manager.expire_old_keys()
self.assertNoKeysDeleted()
def test_ignore_long_lived_accounts(self):
with mock.patch.object(self.service_account_manager, '_get_service_accounts_for_project') as mock_get_accounts,\
mock.patch.object(self.service_account_manager, '_get_keys_for_account') as mock_get_keys:
long_lived_account_email = '<PASSWORD>'
mock_get_accounts.return_value = [ServiceAccount(email=long_lived_account_email)]
mock_get_keys.return_value = [
ServiceAccountKey(name='do_not_delete', start_date=datetime.now() - timedelta(days=100))
]
self.service_account_manager._service_accounts_with_long_lived_keys = [long_lived_account_email]
self.service_account_manager.expire_old_keys()
self.assertNoKeysDeleted()
def test_expire_keys_for_ops_project(self):
"""Check that Prod data-ops accounts are managed when they appear in the list of managed accounts"""
self.service_account_manager._app_id = RdrEnvironment.PROD.value
with mock.patch.object(self.service_account_manager, '_get_service_accounts_for_project') as get_accounts_mock,\
mock.patch.object(self.service_account_manager, '_get_keys_for_account') as mock_get_keys:
managed_account = '<EMAIL>'
self._mock_accounts_for_project(get_accounts_mock, 'all-of-us-ops-data-api-prod', [
ServiceAccount(email=managed_account)
])
mock_get_keys.return_value = [
ServiceAccountKey(name='delete_this', start_date=datetime.now() - timedelta(days=100))
]
self.service_account_manager._managed_data_ops_accounts = [managed_account]
self.service_account_manager.expire_old_keys()
self.assertKeyDeleted(key_name='delete_this')
def test_expire_only_managed_ops_accounts(self):
"""Make sure that only keys for accounts in the managed account list get expired for the data ops project"""
self.service_account_manager._app_id = RdrEnvironment.PROD.value
with mock.patch.object(self.service_account_manager, '_get_service_accounts_for_project') as get_accounts_mock,\
mock.patch.object(self.service_account_manager, '_get_keys_for_account') as mock_get_keys:
self._mock_accounts_for_project(get_accounts_mock, 'all-of-us-ops-data-api-prod', [
ServiceAccount(email='<EMAIL>')
])
mock_get_keys.return_value = [
ServiceAccountKey(name='do_not_delete', start_date=datetime.now() - timedelta(days=100))
]
self.service_account_manager.expire_old_keys()
self.assertNoKeysDeleted()
def _mock_accounts_for_project(self, get_accounts_mock, project_name, accounts):
def get_accounts_for_project(project_name_requested):
return accounts if project_name_requested == project_name else []
get_accounts_mock.side_effect = get_accounts_for_project
def _mock_service_accounts(self, service_accounts):
self.mock_account_list.return_value.execute.return_value = {
'accounts': service_accounts
}
def _mock_keys(self, keys):
self.mock_key_list.return_value.execute.return_value = {
'keys': keys
}
def assertKeyDeleted(self, key_name):
mock_delete_request_builder = self.mock_project_sa_call.keys.return_value.delete
mock_delete_request_builder.assert_any_call(name=key_name)
mock_delete_request_builder.return_value.execute.assert_called()
def assertNoKeysDeleted(self):
mock_delete_request_builder = self.mock_project_sa_call.keys.return_value.delete
mock_delete_request_builder.assert_not_called()
|
11507080 | import os
import tempfile
import warnings
import pytest
from hypothesis import Verbosity, settings
import mygrad as mg
import mygrad._utils.graph_tracking as track
import mygrad._utils.lock_management as lock
from tests.utils.stateful import clear_all_mem_locking_state
settings.register_profile("ci", deadline=1000)
settings.register_profile("intense", deadline=None, max_examples=1000)
settings.register_profile("dev", max_examples=10)
settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default"))
COVERAGE_MODE = bool(os.getenv("MYGRAD_COVERAGE_MODE", False))
@pytest.fixture(autouse=True)
def seal_memguard() -> bool:
"""Ensure test cannot mutate MEM_GUARD value"""
initial_value = lock.MEM_GUARD
yield initial_value
if lock.MEM_GUARD is not initial_value:
warnings.warn("test toggled MEM_GUARD value")
lock.MEM_GUARD = initial_value
assert False
lock.MEM_GUARD = initial_value
@pytest.fixture()
def no_autodiff():
with mg.no_autodiff:
yield None
@pytest.fixture(autouse=True)
def seal_graph_tracking() -> bool:
"""Ensure test cannot mutate TRACK_GRAPH value"""
initial_value = track.TRACK_GRAPH
yield initial_value
if track.TRACK_GRAPH is not initial_value:
warnings.warn("test toggled TRACK_GRAPH value")
track.TRACK_GRAPH = initial_value
assert False
track.TRACK_GRAPH = initial_value
@pytest.fixture(autouse=True)
def raise_on_mem_locking_state_leakage() -> bool:
"""Ensure mem-locking state is isolated to each test, and raise if
a test leaks state"""
clear_all_mem_locking_state()
yield None
if any([lock._views_waiting_for_unlock, lock._array_tracker, lock._array_counter]):
warnings.warn(
f"leak\nviews waiting:{lock._views_waiting_for_unlock}"
f"\narr-tracker:{lock._array_tracker}"
f"\narr-counter{lock._array_counter}"
)
# coverage mode seems to mess with mem-guard synchronization
assert True
clear_all_mem_locking_state()
@pytest.fixture()
def cleandir() -> str:
"""This fixture will use the stdlib `tempfile` module to
move the current working directory to a tmp-dir for the
duration of the test.
Afterwards, the session returns to its previous working
directory, and the temporary directory and its contents
are removed.
Yields
------
str
The name of the temporary directory."""
with tempfile.TemporaryDirectory() as tmpdirname:
old_dir = os.getcwd()
os.chdir(tmpdirname)
yield tmpdirname
os.chdir(old_dir)
|
11507099 | from .datasets.pentathlon_dataset import PentathlonDataset
dataset_factory = {
'MSRVTT': PentathlonDataset,
'DiDeMo': PentathlonDataset,
'YouCook2': PentathlonDataset,
}
def get_dataset(name):
return dataset_factory[name]
|
11507115 | from .model import Model
import pandas as pd
import sklearn
import pickle
import zlib
from algoneer.dataset.pandas import PandasDataset
from algoneer.dataset import Dataset
from algoneer.algorithm import Algorithm
from typing import Dict, Any, Union, Optional
class SklearnModel(Model):
def __init__(
self,
algorithm: Algorithm,
dataset: Optional[Dataset],
estimator: sklearn.base.BaseEstimator,
):
super().__init__(algorithm=algorithm, dataset=dataset)
self._estimator = estimator
def _predict_raw(self, dataset: Any) -> Any:
"""
Directly calls the `predict` method of the underlying estimator with an
arbitrary argument and returns the result without wrapping it into a
dataset. This is useful for interoperability with
"""
return self._estimator.predict(dataset)
@property
def data(self) -> Dict[str, Any]:
return {"pickle": zlib.compress(pickle.dumps(self._estimator))}
def predict(self, dataset: Union[Dataset, Any]) -> Dataset:
if not isinstance(dataset, Dataset):
# if we don't get a dataset we return the raw value
return self._predict_raw(dataset)
pd_dataset = PandasDataset.from_dataset(dataset)
# we get the attributes that have the "x" role assigned to them
x = pd_dataset.roles.x
columns = list(dataset.roles.y.schema.attributes.keys())
yr = self._estimator.predict(x.df)
# if the estimator returns a 1D array we reshape it
if len(yr.shape) == 1:
yr = yr.reshape((yr.shape[0], 1))
# we predict the value using an sklearn estimator
y = pd.DataFrame(yr, columns=columns)
# we return a new dataset and return it
return PandasDataset(dataset.project, dataset.roles.y.schema, y)
|
11507126 | from __future__ import annotations
import os
import time
from .core import *
from .sublime import *
from .event import Handle, Event, EventReturning
from . import platform
from .error import Error
from .json import json_encode, json_decode
from .log import *
_current_package = __package__.split('.', 1)[0]
_current_package_path = os.path.join(sublime.packages_path(), _current_package)
def current_package() -> str:
return _current_package_path
def current_package_name() -> str:
return _current_package
class stopwatch:
def __init__(self, prefix: str = '') -> None:
self.ts = time.time()
self.prefix = prefix
def __call__(self, postfix='') -> None:
self.print(postfix)
def print(self, postfix=''):
te = time.time()
print ('%s: %2.2f ms %s' % (self.prefix.rjust(8), (te - self.ts) * 1000, postfix))
def elapsed(self):
te = time.time()
return (te - self.ts) * 1000
class timer:
def __init__(self, callback: Callable[[], None], interval: float, repeat: bool) -> None:
self.interval = interval
self.callback = callback
self.cancelable = core.call_later(interval, self.on_complete)
self.repeat = repeat
def schedule(self) -> None:
self.cancelable = core.call_later(self.interval, self.on_complete)
def on_complete(self) -> None:
self.callback()
if self.repeat:
self.schedule()
def dispose(self) -> None:
self.cancelable.cancel()
|
11507130 | import logging
from pytryfi.fiDevice import FiDevice
from pytryfi.common import query
from pytryfi.exceptions import *
from pytryfi.const import PET_ACTIVITY_ONGOINGWALK
import datetime
from sentry_sdk import capture_exception
LOGGER = logging.getLogger(__name__)
class FiPet(object):
def __init__(self, petId):
self._petId = petId
def setPetDetailsJSON(self, petJSON):
try:
self._name = petJSON['name']
self._homeCityState = petJSON['homeCityState']
self._yearOfBirth = int(petJSON['yearOfBirth'])
self._monthOfBirth = int(petJSON['monthOfBirth'])
self._dayOfBirth = int(petJSON['dayOfBirth'])
self._gender = petJSON['gender']
#weight is in kg
self._weight = float(petJSON['weight'])
self._breed = petJSON['breed']['name']
#track last updated
self._lastUpdated = datetime.datetime.now()
except TryFiError as e:
LOGGER.error(f"Unable to set values for Pet.\nException: {e}\nwhile parsing {petJSON}")
capture_exception(e)
raise TryFiError("Unable to set Pet Details")
except Exception as e:
capture_exception(e)
try:
self._photoLink = petJSON['photos']['first']['image']['fullSize']
except Exception as e:
#capture_exception(e)
LOGGER.warning(f"Cannot find photo of your pet. Defaulting to empty string.")
self._photoLink = ""
try:
self._device = FiDevice(petJSON['device']['id'])
self._device.setDeviceDetailsJSON(petJSON['device'])
except Exception as e:
capture_exception(e)
def __str__(self):
return f"Last Updated - {self.lastUpdated} - Pet ID: {self.petId} Name: {self.name} Is Lost: {self.isLost} From: {self.homeCityState} ActivityType: {self.activityType} Located: {self.currLatitude},{self.currLongitude} Last Updated: {self.currStartTime}\n \
using Device/Collar: {self._device}"
# set the Pet's current location details
def setCurrentLocation(self, activityJSON):
activityType = activityJSON['__typename']
self._activityType = activityType
self._areaName = activityJSON['areaName']
try:
if activityType == PET_ACTIVITY_ONGOINGWALK:
positionSize = len(activityJSON['positions'])
self._currLongitude = float(activityJSON['positions'][positionSize-1]['position']['longitude'])
self._currLatitude = float(activityJSON['positions'][positionSize-1]['position']['latitude'])
self._currStartTime = datetime.datetime.fromisoformat(activityJSON['start'].replace('Z', '+00:00'))
else:
self._currLongitude = float(activityJSON['position']['longitude'])
self._currLatitude = float(activityJSON['position']['latitude'])
self._currStartTime = datetime.datetime.fromisoformat(activityJSON['start'].replace('Z', '+00:00'))
try:
self._currPlaceName = activityJSON['place']['name']
self._currPlaceAddress = activityJSON['place']['address']
except Exception as e:
#capture_exception(e)
LOGGER.warning("Could not set place, defaulting to Unknown")
self._currPlaceName = "UNKNOWN"
self._currPlaceAddress = "UNKNOWN"
self._lastUpdated = datetime.datetime.now()
except TryFiError as e:
capture_exception(e)
LOGGER.error(f"Unable to set values Current Location for Pet {self.name}.\nException: {e}\nwhile parsing {activityJSON}")
raise TryFiError("Unable to set Pet Location Details")
except Exception as e:
capture_exception(e)
# set the Pet's current steps, goals and distance details for daily, weekly and monthly
def setStats(self, activityJSONDaily, activityJSONWeekly, activityJSONMonthly):
try:
#distance is in metres
self._dailyGoal = int(activityJSONDaily['stepGoal'])
self._dailySteps = int(activityJSONDaily['totalSteps'])
self._dailyTotalDistance = float(activityJSONDaily['totalDistance'])
except TryFiError as e:
LOGGER.error(f"Unable to set values Daily Stats for Pet {self.name}.\nException: {e}\nwhile parsing {activityJSONDaily}")
capture_exception(e)
raise TryFiError("Unable to set Pet Daily Stats")
except Exception as e:
capture_exception(e)
try:
self._weeklyGoal = int(activityJSONWeekly['stepGoal'])
self._weeklySteps = int(activityJSONWeekly['totalSteps'])
self._weeklyTotalDistance = float(activityJSONWeekly['totalDistance'])
except TryFiError as e:
LOGGER.error(f"Unable to set values Weekly Stats for Pet {self.name}.\nException: {e}\nwhile parsing {activityJSONWeekly}")
capture_exception(e)
raise TryFiError("Unable to set Pet Weekly Stats")
except Exception as e:
capture_exception(e)
try:
self._monthlyGoal = int(activityJSONMonthly['stepGoal'])
self._monthlySteps = int(activityJSONMonthly['totalSteps'])
self._monthlyTotalDistance = float(activityJSONMonthly['totalDistance'])
except TryFiError as e:
LOGGER.error(f"Unable to set values Monthly Stats for Pet {self.name}.\nException: {e}\nwhile parsing {activityJSONMonthly}")
capture_exception(e)
raise TryFiError("Unable to set Pet Monthly Stats")
except Exception as e:
capture_exception(e)
self._lastUpdated = datetime.datetime.now()
# Update the Stats of the pet
def updateStats(self, sessionId):
try:
pStatsJSON = query.getCurrentPetStats(sessionId,self.petId)
self.setStats(pStatsJSON['dailyStat'],pStatsJSON['weeklyStat'],pStatsJSON['monthlyStat'])
return True
except Exception as e:
LOGGER.error(f"Could not update stats for Pet {self.name}.\n{e}")
capture_exception(e)
# Update the Pet's GPS location
def updatePetLocation(self, sessionId):
try:
pLocJSON = query.getCurrentPetLocation(sessionId,self.petId)
self.setCurrentLocation(pLocJSON)
return True
except Exception as e:
LOGGER.error(f"Could not update Pet: {self.name}'s location.\n{e}")
capture_exception(e)
return False
# Update the device/collar details for this pet
def updateDeviceDetails(self, sessionId):
try:
deviceJSON = query.getDevicedetails(sessionId, self.petId)
self.device.setDeviceDetailsJSON(deviceJSON['device'])
return True
except Exception as e:
LOGGER.error(f"Could not update Device/Collar information for Pet: {self.name}\n{e}")
capture_exception(e)
return False
# Update all details regarding this pet
def updateAllDetails(self, sessionId):
self.updateDeviceDetails(sessionId)
self.updatePetLocation(sessionId)
self.updateStats(sessionId)
# set the color code of the led light on the pet collar
def setLedColorCode(self, sessionId, colorCode):
try:
moduleId = self.device.moduleId
ledColorCode = int(colorCode)
setColorJSON = query.setLedColor(sessionId, moduleId, ledColorCode)
try:
self.device.setDeviceDetailsJSON(setColorJSON['setDeviceLed'])
except Exception as e:
LOGGER.warning(f"Updated LED Color but could not get current status for Pet: {self.name}\nException: {e}")
capture_exception(e)
return True
except Exception as e:
LOGGER.error(f"Could not complete Led Color request:\n{e}")
capture_exception(e)
return False
# turn on or off the led light. action = True will enable the light, false turns off the light
def turnOnOffLed(self, sessionId, action):
try:
moduleId = self.device.moduleId
onOffResponse = query.turnOnOffLed(sessionId, moduleId, action)
try:
self.device.setDeviceDetailsJSON(onOffResponse['updateDeviceOperationParams'])
except Exception as e:
LOGGER.warning(f"Action: {action} was successful however unable to get current status for Pet: {self.name}")
capture_exception(e)
return True
except Exception as e:
LOGGER.error(f"Could not complete LED request:\n{e}")
capture_exception(e)
return False
# set the lost dog mode to Normal or Lost Dog. Action is true for lost dog and false for normal (not lost)
def setLostDogMode(self, sessionId, action):
try:
moduleId = self.device.moduleId
petModeResponse = query.setLostDogMode(sessionId, moduleId, action)
try:
self.device.setDeviceDetailsJSON(petModeResponse['updateDeviceOperationParams'])
except Exception as e:
LOGGER.warning(f"Action: {action} was successful however unable to get current status for Pet: {self.name}")
capture_exception(e)
return True
except Exception as e:
LOGGER.error(f"Could not complete Lost Dog Mode request:\n{e}")
LOGGER.error(f"Could not complete turn on/off light where ledEnable is {action}.\nException: {e}")
capture_exception(e)
return False
@property
def device(self):
return self._device
@property
def petId(self):
return self._petId
@property
def name(self):
return self._name
@property
def homeCityState(self):
return self._homeCityState
@property
def yearOfBirth(self):
return self._yearOfBirth
@property
def monthOfBirth(self):
return self._monthOfBirth
@property
def dayOfBirth(self):
return self._dayOfBirth
@property
def gender(self):
return self._gender
@property
def weight(self):
return self._weight
@property
def breed(self):
return self._breed
@property
def photoLink(self):
return self._photoLink
@property
def currLongitude(self):
return self._currLongitude
@property
def currLatitude(self):
return self._currLatitude
@property
def currStartTime(self):
return self._currStartTime
@property
def currPlaceName(self):
return self._currPlaceName
@property
def currPlaceAddress(self):
return self._currPlaceAddress
@property
def currPlaceAddress(self):
return self._currPlaceAddress
@property
def dailyGoal(self):
return self._dailyGoal
@property
def dailySteps(self):
return self._dailySteps
@property
def dailyTotalDistance(self):
return self._dailyTotalDistance
@property
def weeklyGoal(self):
return self._weeklyGoal
@property
def weeklySteps(self):
return self._weeklySteps
@property
def weeklyTotalDistance(self):
return self._weeklyTotalDistance
@property
def monthlyGoal(self):
return self._monthlyGoal
@property
def monthlySteps(self):
return self._monthlySteps
@property
def monthlyTotalDistance(self):
return self._monthlyTotalDistance
@property
def lastUpdated(self):
return self._lastUpdated
@property
def isLost(self):
return self.device.isLost
@property
def activityType(self):
return self._activityType
@property
def areaName(self):
return self._areaName
def getBirthDate(self):
return datetime.datetime(self.yearOfBirth, self.monthOfBirth, self.dayOfBirth)
def getDailySteps(self):
return self.dailySteps
def getDailyGoal(self):
return self.dailyGoal
def getDailyDistance(self):
return self.dailyTotalDistance
def getWeeklySteps(self):
return self.weeklySteps
def getWeeklyGoal(self):
return self.weeklyGoal
def getWeeklyDistance(self):
return self.weeklyTotalDistance
def getMonthlySteps(self):
return self.monthlySteps
def getMonthlyGoal(self):
return self.monthlyGoal
def getMonthlyDistance(self):
return self.monthlyTotalDistance
|
11507157 | import jax
from jax.config import config
import jax.numpy as np
import numpy as onp
# https://github.com/matthias-k/cyipopt
from ipopt import minimize_ipopt
import fenics as fn
import fenics_adjoint as fa
import ufl
from jaxfenics_adjoint import build_jax_fem_eval
from jaxfenics_adjoint import from_numpy
import matplotlib.pyplot as plt
config.update("jax_enable_x64", True)
fn.set_log_level(fn.LogLevel.ERROR)
tr, sym, grad, Identity = ufl.tr, ufl.sym, ufl.grad, ufl.Identity
inner, dot, dx = ufl.inner, ufl.dot, ufl.dx
# Geometry and elasticity
t, h, L = 2.0, 1.0, 5.0 # Thickness, height and length
E, nu = 210e3, 0.3 # Young Modulus
G = E / (2.0 * (1.0 + nu)) # Shear Modulus
lmbda = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)) # Lambda
def simp(x):
return eps + (1 - eps) * x ** p
max_volume = 0.4 * L * h # Volume constraint
p = 4 # Exponent
eps = fa.Constant(1.0e-6) # Epsilon for SIMP
# Mesh, Control and Solution Spaces
nelx = 192
nely = 64
mesh = fa.RectangleMesh.create(
[fn.Point(0.0, 0.0), fn.Point(L, h)], [nelx, nely], fn.CellType.Type.triangle
)
V = fn.VectorFunctionSpace(mesh, "CG", 1) # Displacements
C = fn.FunctionSpace(mesh, "CG", 1) # Control
# Volumetric Load
q = -10.0 / t
b = fa.Constant((0.0, q))
def Left_boundary(x, on_boundary):
return on_boundary and abs(x[0]) < fn.DOLFIN_EPS
u_L = fa.Constant((0.0, 0.0))
bcs = [fa.DirichletBC(V, u_L, Left_boundary)]
@build_jax_fem_eval((fa.Function(C),))
def forward(x):
u = fn.TrialFunction(V)
w = fn.TestFunction(V)
sigma = lmbda * tr(sym(grad(u))) * Identity(2) + 2 * G * sym(grad(u)) # Stress
R = simp(x) * inner(sigma, grad(w)) * dx - dot(b, w) * dx
a, L = ufl.lhs(R), ufl.rhs(R)
u = fa.Function(V)
fa.solve(a == L, u, bcs)
return u
@build_jax_fem_eval((fa.Function(V), fa.Function(C)))
def eval_cost(u, x):
J_form = dot(b, u) * dx + fa.Constant(1.0e-8) * dot(grad(x), grad(x)) * dx
J = fa.assemble(J_form)
return J
@build_jax_fem_eval((fa.Function(C),))
def eval_volume(rho):
J_form = rho * ufl.dx
J = fa.assemble(J_form)
return J
def obj_function(x):
u = forward(x)
cost = eval_cost(u, x)
return cost
def min_f(x):
value, grad = jax.value_and_grad(obj_function)(x)
return onp.array(value), onp.array(grad)
def volume_inequality_fun(rho):
"""Enforce the volume constraint g(rho) = V - rho*dx >= 0."""
return max_volume - eval_volume(rho)
constraints = [
{
"type": "ineq",
"fun": volume_inequality_fun,
"jac": lambda x: jax.grad(volume_inequality_fun)(x),
}
]
x0 = np.ones(C.dim()) * max_volume / (L * h)
res = minimize_ipopt(
min_f,
x0,
jac=True,
bounds=((0.0, 1.0),) * C.dim(),
constraints=constraints,
options={"print_level": 5, "max_iter": 100},
)
rho_opt_final = from_numpy(res.x, fa.Function(C))
c = fn.plot(rho_opt_final)
plt.colorbar(c)
plt.show()
# Save optimal solution for visualizing with ParaView
# with XDMFFile("1_dist_load/control_solution_1.xdmf") as f:
# f.write(rho_opt)
|
11507165 | import six
import json
import torch
import numpy as np
from functools import partial
from torchtext.data import RawField, Pipeline
from torchtext.data.utils import get_tokenizer
from onmt.inputters.datareader_base import DataReaderBase
class GrhDataReader(DataReaderBase):
def read(self, sequences, side, _dir=None):
"""Read edges data from disk.
Args:
sequences (str or Iterable[str]):
path to edge file or iterable of the actual edge data.
side (str): Prefix used in return dict. Usually
``"src"`` , ``"tgt" or "grh``.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with GrhDataReader."
# assert _dir is not None or _dir != "", \
# "Must use _dir with GrhDataReader (provide edges vocab)."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
# vocab = json.load(_dir)
for i, seq in enumerate(sequences):
if isinstance(seq, six.binary_type):
seq = seq.decode("utf-8")
yield {side: seq, "indices": i}
class GraphField(RawField):
""" custom field.
Because the grh data doesnt need to do numericalization (what the default field always do
in process()) and pad (can set sequential=False to avoid it, but you cannot do the tokenize
either). So we need to customize the special field which does our wanted operation.
Notice that here we dont implement multi-shards.
"""
def __init__(self, sequential=True, use_vocab=True, preprocessing=None,
# 等价于 tok=lambda s: s.split(), tokenize=tok 还是默认参数的用法
postprocessing=None, lower=False, tokenize=(lambda s: s.split()),
dtype=torch.long
):
super(GraphField, self).__init__()
self.sequential = sequential
self.use_vocab = use_vocab
self.preprocessing = preprocessing
self.postprocessing = postprocessing
self.lower = lower
self.tokenize = get_tokenizer(tokenize)
self.dtype = dtype
def preprocess(self, x):
"""Load a single example using this field, tokenizing if necessary.
If the input is a Python 2 `str`, it will be converted to Unicode
first. If `sequential=True`, it will be tokenized. Then the input
will be optionally lowercased and passed to the user-provided
`preprocessing` Pipeline."""
if (six.PY2 and isinstance(x, six.string_types) and
not isinstance(x, six.text_type)):
x = Pipeline(lambda s: six.text_type(s, encoding='utf-8'))(x)
if isinstance(x, six.text_type):
x = self.tokenize(x.rstrip('\n'))
if self.lower:
x = Pipeline(six.text_type.lower)(x)
# The Pipeline that will be applied to examples using this field after
# tokenizing but before numericalizing. Many Datasets replace this
# attribute with a custom preprocessor. Default: None.
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x
def process(self, batch, device=None):
""" Process a list of examples to create a torch.Tensor.
Graph information is in the form of an adjacency list.
We convert this to an adjacency matrix in NumPy format.
The matrix contains label ids.
IMPORTANT: we add one to the label id stored in the matrix.
This is because 0 is a valid vocab id but we want to use 0's
to represent lack of edges instead. This means that the GCN code
should account for this.
But it is better to be defined in postprocessing
Args:
batch (list(object)): A list of object from a batch of examples.
"""
def get_pad_len(l):
last_tuple = l[-1]
last_node, edge_type = last_tuple[0], last_tuple[-1]
#assert edge_type == 3, "must be the self-connection"
# assert last_node == last_tuple[1]
return last_node
# global_index_list = []
# sorted(final_grh, key=lambda x: int(x[0]))
pad_len = max(list(map(get_pad_len, batch)))+1
new_grh = np.array([np.zeros((pad_len, pad_len)) for _ in range(len(batch))])
for i, grh in enumerate(batch):
# global_index = 0
for tup in grh:
new_grh[i][tup[0]][tup[1]] = tup[2] + 1
if tup[0] == tup[1]:
self_id = tup[2]
# if tup[0] > global_index:
# global_index = tup[0]
# global_index_list.append(global_index)
for j in range(pad_len):
new_grh[i][j][j] = self_id + 1# the pad symbols need to have a self loop (还是直接mask掉?)
# error1. 因为field是preprocess就构造然后存到dataset里的, 所以加载时不会再执行init
# error2. torch.Tensor 是legacy constructor不要再用了! 它不支持device=str, 用torch.tensor!
arr = torch.tensor(new_grh,device=device, dtype=self.dtype)
if self.sequential:
arr = arr.contiguous()
return arr
def _edge_tokenizer(string, vocab=None):
assert vocab is not None, "the edges vocab cannot be None"
graph_tokens = string.rstrip().split()
adj_list = [(int(tok[1:-1].split(',')[0]),
int(tok[1:-1].split(',')[1]),
vocab[tok[1:-1].split(',')[2]]) for tok in graph_tokens]
return adj_list
# def _convert_to_adj_matrix():
def grh_fields(**kwargs):
"""Create graph fields."""
vocab_dir = kwargs.get("vocab")
with open(vocab_dir, encoding='utf8') as inp:
vocab = json.load(inp)
tokenize = partial(
_edge_tokenizer,
vocab=vocab
)
feat = GraphField(
sequential=False,
use_vocab=False,
tokenize=tokenize, )
return feat |
11507236 | import pytest
async def _query_human_resolver(*_args, **__kwargs):
return {"name": "Hooman"}
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(resolvers={"Query.human": _query_human_resolver})
@pytest.mark.parametrize(
"query,expected",
[
(
"""
query {
human(id: 1) {
name
}
}
""",
{"data": {"human": {"name": "Hooman"}}},
),
(
"""
query {
human(id: 1) {
name
unknownField
}
dog {
name
}
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField doesn't exist on Human",
"path": ["human", "unknownField"],
"locations": [{"line": 5, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
}
],
},
),
(
"""
query {
human(undefinedArgument: 1, id: 1) {
unknownField
name
}
dog {
name
}
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField doesn't exist on Human",
"path": ["human", "unknownField"],
"locations": [{"line": 4, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on field < Query.human >.",
"path": ["human"],
"locations": [{"line": 3, "column": 21}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
],
},
),
(
"""
query {
human(undefinedArgument: 1, id: 1) {
unknownField
name
}
unknownField {
name
}
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField doesn't exist on Human",
"path": ["human", "unknownField"],
"locations": [{"line": 4, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on field < Query.human >.",
"path": ["human"],
"locations": [{"line": 3, "column": 21}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Field name doesn't exist on Root",
"path": ["unknownField", "name"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField doesn't exist on Query",
"path": ["unknownField"],
"locations": [{"line": 7, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
],
},
),
(
"""
query {
dog {
doesKnowCommand(command: SIT)
}
}
""",
{
"data": None,
"errors": [
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 4, "column": 33}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 4, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
],
},
),
(
"""
fragment DogFields on Dog {
... on Dog {
doesKnowCommand(command: SIT)
}
}
query {
dog {
...DogFields
}
}
""",
{
"data": None,
"errors": [
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 4, "column": 33}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 4, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
],
},
),
(
"""
query {
unknownField1
dog {
doesKnowCommand(command: SIT) {
unknownField2
}
unknownField3
}
unknownField4
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField1 doesn't exist on Query",
"path": ["unknownField1"],
"locations": [{"line": 3, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField2 doesn't exist on Boolean",
"path": ["dog", "doesKnowCommand", "unknownField2"],
"locations": [{"line": 6, "column": 19}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field doesKnowCommand must not have a selection since type Boolean has no subfields.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 5, "column": 17}],
"extensions": {
"rule": "5.3.3",
"tag": "leaf-field-selections",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Leaf-Field-Selections",
"spec": "June 2018",
},
},
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 5, "column": 33}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 5, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
{
"message": "Field unknownField3 doesn't exist on Dog",
"path": ["dog", "unknownField3"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField4 doesn't exist on Query",
"path": ["unknownField4"],
"locations": [{"line": 10, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
],
},
),
(
"""
fragment QueryFields on Query {
unknownField1
}
fragment NestedDogFields on Dog {
... on Dog {
doesKnowCommand(command: SIT) {
unknownField2
}
}
}
fragment DogFields on Dog {
...NestedDogFields
unknownField3
}
query {
...QueryFields
dog {
...DogFields
unknownField4
}
unknownField5
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField1 doesn't exist on Query",
"path": ["unknownField1"],
"locations": [{"line": 3, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField2 doesn't exist on Boolean",
"path": ["doesKnowCommand", "unknownField2"],
"locations": [{"line": 9, "column": 19}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field doesKnowCommand must not have a selection since type Boolean has no subfields.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.3.3",
"tag": "leaf-field-selections",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Leaf-Field-Selections",
"spec": "June 2018",
},
},
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 33}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
{
"message": "Field unknownField3 doesn't exist on Dog",
"path": ["unknownField3"],
"locations": [{"line": 16, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField4 doesn't exist on Dog",
"path": ["dog", "unknownField4"],
"locations": [{"line": 23, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField5 doesn't exist on Query",
"path": ["unknownField5"],
"locations": [{"line": 25, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
],
},
),
(
"""
fragment QueryFields on Query {
unknownField1
}
fragment NestedDogFields on Dog {
... on Dog {
doesKnowCommand(command: SIT) {
unknownField2 {
unknownField21
}
}
}
}
fragment DogFields on Dog {
unknownField3
...NestedDogFields
}
query {
...QueryFields
dog {
...DogFields
unknownField4
}
unknownField5
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField1 doesn't exist on Query",
"path": ["unknownField1"],
"locations": [{"line": 3, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField21 doesn't exist on Root",
"path": [
"doesKnowCommand",
"unknownField2",
"unknownField21",
],
"locations": [{"line": 10, "column": 21}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField2 doesn't exist on Boolean",
"path": ["doesKnowCommand", "unknownField2"],
"locations": [{"line": 9, "column": 19}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field doesKnowCommand must not have a selection since type Boolean has no subfields.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.3.3",
"tag": "leaf-field-selections",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Leaf-Field-Selections",
"spec": "June 2018",
},
},
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 33}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 8, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
{
"message": "Field unknownField3 doesn't exist on Dog",
"path": ["unknownField3"],
"locations": [{"line": 17, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField4 doesn't exist on Dog",
"path": ["dog", "unknownField4"],
"locations": [{"line": 25, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field unknownField5 doesn't exist on Query",
"path": ["unknownField5"],
"locations": [{"line": 27, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
],
},
),
(
"""
fragment QueryFields on Query {
unknownField1
}
fragment NestedDogFields on Dog {
... on Dog {
doesKnowCommandDown: doesKnowCommand(dogCommand: DOWN)
doesKnowCommandSitError: doesKnowCommand(command: SIT) {
unknownField2 @deprecated(undefinedArgument: "undefined") {
unknownField21
}
}
doesKnowCommandHeel: doesKnowCommand(dogCommand: HEEL) @deprecated(undefinedArgument: "undefined")
doesKnowCommandSit: doesKnowCommand(dogCommand: SIT)
}
}
fragment DogFields on Dog {
unknownField3
...NestedDogFields
}
query {
...QueryFields
dog {
doesKnowCommandUndefinedArgument: doesKnowCommand(undefinedArgument: "undefined", dogCommand: SIT)
...DogFields
unknownField4
doesKnowCommandHeel: doesKnowCommand(dogCommand: HEEL) @deprecated(undefinedArgument: "undefined")
}
unknownField5
}
""",
{
"data": None,
"errors": [
{
"message": "Field unknownField1 doesn't exist on Query",
"path": ["unknownField1"],
"locations": [{"line": 3, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on directive < @deprecated >.",
"path": ["doesKnowCommand", "unknownField2"],
"locations": [{"line": 10, "column": 45}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Field unknownField21 doesn't exist on Root",
"path": [
"doesKnowCommand",
"unknownField2",
"unknownField21",
],
"locations": [{"line": 11, "column": 21}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Directive < @deprecated > is not used in a valid location.",
"path": ["doesKnowCommand", "unknownField2"],
"locations": [
{"line": 10, "column": 19},
{"line": 10, "column": 33},
],
"extensions": {
"rule": "5.7.2",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Directives-Are-In-Valid-Locations",
"tag": "directives-are-in-valid-locations",
},
},
{
"message": "Field unknownField2 doesn't exist on Boolean",
"path": ["doesKnowCommand", "unknownField2"],
"locations": [{"line": 10, "column": 19}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Field doesKnowCommand must not have a selection since type Boolean has no subfields.",
"path": ["doesKnowCommand"],
"locations": [{"line": 9, "column": 17}],
"extensions": {
"rule": "5.3.3",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Leaf-Field-Selections",
"tag": "leaf-field-selections",
},
},
{
"message": "Provided Argument < command > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 9, "column": 58}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Missing mandatory argument < dogCommand > in field < Dog.doesKnowCommand >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 9, "column": 17}],
"extensions": {
"rule": "5.4.2.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Required-Arguments",
"tag": "required-arguments",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on directive < @deprecated >.",
"path": ["doesKnowCommand"],
"locations": [{"line": 14, "column": 84}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Directive < @deprecated > is not used in a valid location.",
"path": ["doesKnowCommand"],
"locations": [
{"line": 14, "column": 17},
{"line": 14, "column": 72},
],
"extensions": {
"rule": "5.7.2",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Directives-Are-In-Valid-Locations",
"tag": "directives-are-in-valid-locations",
},
},
{
"message": "Field unknownField3 doesn't exist on Dog",
"path": ["unknownField3"],
"locations": [{"line": 20, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on field < Dog.doesKnowCommand >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 27, "column": 67}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Field unknownField4 doesn't exist on Dog",
"path": ["dog", "unknownField4"],
"locations": [{"line": 29, "column": 17}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
{
"message": "Provided Argument < undefinedArgument > doesn't exist on directive < @deprecated >.",
"path": ["dog", "doesKnowCommand"],
"locations": [{"line": 30, "column": 84}],
"extensions": {
"rule": "5.4.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Names",
"tag": "argument-names",
},
},
{
"message": "Directive < @deprecated > is not used in a valid location.",
"path": ["dog", "doesKnowCommand"],
"locations": [
{"line": 30, "column": 17},
{"line": 30, "column": 72},
],
"extensions": {
"rule": "5.7.2",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Directives-Are-In-Valid-Locations",
"tag": "directives-are-in-valid-locations",
},
},
{
"message": "Field unknownField5 doesn't exist on Query",
"path": ["unknownField5"],
"locations": [{"line": 32, "column": 15}],
"extensions": {
"rule": "5.3.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Field-Selections-on-Objects-Interfaces-and-Unions-Types",
"tag": "field-selections-on-objects-interfaces-and-unions-types",
},
},
],
},
),
],
)
async def test_issue105(engine, query, expected):
assert await engine.execute(query) == expected
|
11507252 | import os
import fire
def opensees_run(index):
name_before = "import_variables.tcl"
name_ = "import_variables_" + str(index) + ".tcl"
command0 = "cp ./InputFiles/import_variables_" + str(index) + ".tcl ./import_variables.tcl"
command1 = "module load opensees && OpenSees test.tcl"
os.system(command0)
os.system(command1)
current_dir_problem = os.getcwd()
path_data = os.path.join(os.sep, current_dir_problem, 'OutputFiles')
print(path_data)
os.makedirs(path_data, exist_ok=True)
command3 = "cp ./node20001.out ./OutputFiles/node20001_" + str(index) + ".out "
os.system(command3)
if __name__ == '__main__':
fire.Fire(opensees_run)
|
11507272 | from datetime import timedelta
from django.test import SimpleTestCase
from testil import eq
from corehq.util.metrics import make_buckets_from_timedeltas, DAY_SCALE_TIME_BUCKETS, bucket_value
from corehq.util.metrics.utils import sanitize_url, get_url_group
from corehq.util.test_utils import generate_cases
def test_make_buckets_from_timedeltas():
buckets = [1, 10, 60, 10 * 60, 60 * 60, 12 * 60 * 60, 24 * 60 * 60]
eq(make_buckets_from_timedeltas(
timedelta(seconds=1),
timedelta(seconds=10),
timedelta(minutes=1),
timedelta(minutes=10),
timedelta(hours=1),
timedelta(hours=12),
timedelta(hours=24),
), buckets)
eq(DAY_SCALE_TIME_BUCKETS, buckets)
class MetricsUtilsTest(SimpleTestCase):
"""Tests metrics utility functions"""
@generate_cases([
(0, (1, 2, 5), '', 'lt_001'),
(1, (1, 2, 5), '', 'lt_002'),
(6, (1, 2, 5), '', 'over_005'),
(101, (1, 2, 100), 's', 'over_100s'),
(4, (1, 2, 5), 's', 'lt_005s'),
(4, (1, 2, 5, 1000), 's', 'lt_0005s'),
(6, (1, 2, 5, 1000, 43000), 's', 'lt_01000s'),
(3000, (1, 2, 5, 1000), 's', 'over_1000s'),
], MetricsUtilsTest)
def test_bucket_value(self, value, buckets, unit, expected):
self.assertEqual(bucket_value(value, buckets, unit), expected)
@generate_cases([
(
'/a/uth-rhd/api/a26f2e21-5f24-48b6-b283-200a21f79bb6/20150922T034026.MP4',
'/a/*/api/*/20150922T034026.MP4'
),
('/a/ben/modules-1/forms-2/uuid:abc123/', '/a/*/modules-*/forms-*/uuid:*/')
], MetricsUtilsTest)
def test_sanitize_url(self, url, sanitized):
self.assertEqual(sanitize_url(url), sanitized)
@generate_cases([
('/', 'other'),
('/a/*/api', 'api'),
('/a/domain', 'other'),
('/1/2/3/4', 'other'),
('/a/*/cloudcare', 'cloudcare'),
], MetricsUtilsTest)
def test_url_group(self, url, group):
self.assertEqual(get_url_group(url), group)
|
11507283 | from rest_framework import generics
from rest_framework import permissions, exceptions
from rest_framework.response import Response
from django.contrib.auth.models import User
from rodan.models.project import Project
from rodan.serializers.project import ProjectListSerializer, ProjectDetailSerializer
from rodan.permissions import CustomObjectPermissions
class ProjectList(generics.ListCreateAPIView):
"""
Returns a list of Projects that the user has permissions to view. Accepts a POST
request with a data body to create a new Project. POST requests will return the
newly-created Project object.
"""
permission_classes = (permissions.IsAuthenticated,)
queryset = Project.objects.all()
serializer_class = ProjectListSerializer
filter_fields = {
"updated": ["lt", "gt"],
"uuid": ["exact"],
"created": ["lt", "gt"],
"creator": ["exact"],
"name": ["exact", "icontains"],
"description": ["exact", "icontains"],
}
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Performs operations on a single Project instance.
"""
permission_classes = (permissions.IsAuthenticated, CustomObjectPermissions)
_ignore_model_permissions = True
queryset = Project.objects.all()
serializer_class = ProjectDetailSerializer
class ProjectDetailAdmins(generics.GenericAPIView):
"""
Retrieve and update project admin user list. Only open to project creator.
"""
queryset = Project.objects.all()
permission_classes = (permissions.IsAuthenticated,)
def get_serializer_class(self):
# for rest browsable API displaying the PUT/PATCH form
from rest_framework import serializers
class DummySerializer(serializers.Serializer):
pass # empty class
return DummySerializer
def check_object_permissions(self, request, obj):
if self.request.user != obj.creator:
raise exceptions.PermissionDenied()
def get(self, request, *args, **kwargs):
p = self.get_object()
return Response(p.admin_group.user_set.values_list("username", flat=True))
def put(self, request, *args, **kwargs):
p = self.get_object()
users = []
for u_info in request.data:
try:
user = User.objects.get(username=u_info)
except User.DoesNotExist:
raise exceptions.ValidationError(
detail={"detail": "User {0} does not exist.".format(u_info)}
)
users.append(user)
p.admin_group.user_set.clear()
p.admin_group.user_set.add(*users)
if p.creator:
p.admin_group.user_set.add(p.creator)
return Response(p.admin_group.user_set.values_list("username", flat=True))
def patch(self, request, *args, **kwargs):
return self.put(request, *args, **kwargs)
class ProjectDetailWorkers(generics.GenericAPIView):
"""
Retrieve and update project worker user list. Only open to project creator and admin.
"""
queryset = Project.objects.all()
def get_serializer_class(self):
# for rest browsable API displaying the PUT/PATCH form
from rest_framework import serializers
class DummySerializer(serializers.Serializer):
pass # empty class
return DummySerializer
def check_object_permissions(self, request, obj):
if (
self.request.user != obj.creator and not self.request.user.groups.filter(
id=obj.admin_group.id
).exists()
):
raise exceptions.PermissionDenied() # not in project admin nor as creator
def get(self, request, *args, **kwargs):
p = self.get_object()
return Response(p.worker_group.user_set.values_list("username", flat=True))
def put(self, request, *args, **kwargs):
p = self.get_object()
users = []
for u_info in request.data:
try:
user = User.objects.get(username=u_info)
except User.DoesNotExist:
raise exceptions.ValidationError(
detail={"detail": "User {0} does not exist.".format(u_info)}
)
users.append(user)
p.worker_group.user_set.clear()
p.worker_group.user_set.add(*users)
return Response(p.worker_group.user_set.values_list("username", flat=True))
def patch(self, request, *args, **kwargs):
return self.put(request, *args, **kwargs)
|
11507293 | import wget
import os
import sys
updater_file = "nonGitUpdater.py"
base_url = "https://raw.githubusercontent.com/SamuelH91/Civ6EGRM/master/"
files = [
"endGameReplay.bat",
"endGameReplay.py",
"folderWatcher/__init__.py",
"folderWatcher/autoRunEndGameReplayHere.bat",
"folderWatcher/fileCopier.py",
"saveFileHandler/__init__.py",
"saveFileHandler/civColors.py",
"saveFileHandler/civLocalization.py",
"saveFileHandler/features.py",
"saveFileHandler/filehandler.py",
"saveFileHandler/gameDataHandler.py",
"utils/__init__.py",
"utils/binaryconvert.py",
"utils/hexagon.py",
"utils/hexgrid.py",
]
if len(sys.argv) < 2:
print(f"This file should be run from the root of the Civ6EGRM folder!!!")
print(f"Updating common files without git: Use git for to be sure that all necessary files are up-to-date!")
print(f"Updating this file first")
if os.path.exists(updater_file): # remove old first
os.remove(updater_file)
wget.download(base_url + updater_file, updater_file)
new_args = sys.argv[1:]
new_args.append("UpdateOtherFiles")
os.execl(sys.executable, 'python', __file__, *new_args) # Restart this script with additional arg
else:
print(f"\nUpdating other files next:")
for file in files:
print(f"\nDownloading file '{file}'")
path = os.path.dirname(file)
if path:
os.makedirs(path, exist_ok=True)
if os.path.exists(file): # remove old first
os.remove(file)
wget.download(base_url + file, file)
print(f"\nUpdate 'runFileWatcher.py' (contains your path) manually if needed!")
input("Press any key to close...") |
11507316 | import maya.OpenMayaUI as omUI
try:
from PySide2 import QtCore, QtGui, QtWidgets
from shiboken2 import wrapInstance
except ImportError:
from PySide import QtCore, QtGui
from PySide import QtGui as QtWidgets
from shiboken import wrapInstance
def get_mayamainwindow():
pointer = omUI.MQtUtil.mainWindow()
return wrapInstance(long(pointer), QtWidgets.QMainWindow)
class export_controls(QtWidgets.QWidget):
def __init__(self, parent=None):
super(export_controls, self).__init__(parent)
self.parent = parent
self.popup = None # reference for popup widget
self.create_controls()
def create_controls(self):
# create controls
# materials
self.list_materials = QtWidgets.QListWidget()
self.btn_mat_create = QtWidgets.QPushButton('Create ...', self)
self.btn_mat_edit = QtWidgets.QPushButton('Edit', self)
self.btn_mat_delete = QtWidgets.QPushButton('Delete', self)
self.btn_mat_refresh = QtWidgets.QPushButton('Refresh', self)
# animations
self.list_animations = QtWidgets.QListWidget()
self.list_animations.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.btn_anim_create = QtWidgets.QPushButton('Create ...', self)
self.btn_anim_edit = QtWidgets.QPushButton('Edit', self)
self.btn_anim_delete = QtWidgets.QPushButton('Delete', self)
self.btn_anim_refresh = QtWidgets.QPushButton('Refresh', self)
# TODO: re-enable these once supported
self.btn_anim_create.setDisabled(True)
self.btn_anim_edit.setDisabled(True)
self.btn_anim_delete.setDisabled(True)
self.btn_anim_refresh.setDisabled(True)
# create layouts
main_layout = QtWidgets.QHBoxLayout()
main_layout.setSpacing(5)
main_layout.setContentsMargins(5, 5, 5, 5)
left_layout = QtWidgets.QVBoxLayout()
left_layout.setSpacing(5)
grp_mats = QtWidgets.QGroupBox('Materials 000')
grp_mats_layout = QtWidgets.QVBoxLayout()
grp_mats_layout.setContentsMargins(4, 4, 4, 4)
grp_mats_button_layout = QtWidgets.QHBoxLayout()
grp_anims = QtWidgets.QGroupBox('Animations')
grp_anims_layout = QtWidgets.QVBoxLayout()
grp_anims_layout.setContentsMargins(4, 4, 4, 4)
grp_anims_button_layout = QtWidgets.QHBoxLayout()
right_layout = QtWidgets.QVBoxLayout()
right_layout.setSpacing(5)
grp_scene = QtWidgets.QGroupBox('Scene setup')
grp_scene_layout = QtWidgets.QGridLayout()
grp_scene_layout.setColumnStretch(1, 1)
grp_scene_layout.setColumnStretch(2, 2)
grp_scene_layout.setContentsMargins(4, 4, 4, 4)
grp_scene_layout.setVerticalSpacing(5)
# add controls
self.setLayout(main_layout)
main_layout.addLayout(left_layout)
main_layout.addLayout(right_layout)
left_layout.addWidget(grp_mats)
grp_mats.setLayout(grp_mats_layout)
grp_mats_layout.addWidget(self.list_materials)
grp_mats_layout.addLayout(grp_mats_button_layout)
grp_mats_button_layout.addWidget(self.btn_mat_create)
grp_mats_button_layout.addWidget(self.btn_mat_edit)
grp_mats_button_layout.addWidget(self.btn_mat_delete)
grp_mats_button_layout.addWidget(self.btn_mat_refresh)
grp_mats_button_layout.setSpacing(3)
left_layout.addWidget(grp_anims)
grp_anims.setLayout(grp_anims_layout)
grp_anims_layout.addWidget(self.list_animations)
grp_anims_layout.addLayout(grp_anims_button_layout)
grp_anims_button_layout.addWidget(self.btn_anim_create)
grp_anims_button_layout.addWidget(self.btn_anim_edit)
grp_anims_button_layout.addWidget(self.btn_anim_delete)
grp_anims_button_layout.addWidget(self.btn_anim_refresh)
grp_anims_button_layout.setSpacing(3)
right_layout.addWidget(grp_scene)
grp_scene.setLayout(grp_scene_layout)
class RSEmaya_ui(QtWidgets.QDialog):
"""
Main tool window.
"""
def __init__(self, parent=None):
# parent to the Maya main window.
if not parent:
parent = get_mayamainwindow()
super(RSEmaya_ui, self).__init__(parent)
self.popup = None # reference for popup widget
self.create_ui()
self.setStyleSheet(
'QGroupBox {'
'border: 1px solid;'
'border-color: rgba(0, 0, 0, 64);'
'border-radius: 4px;'
'margin-top: 8px;'
'padding: 5px 2px 2px 2px;'
'background-color: rgb(78, 80, 82);'
'}'
'QGroupBox::title {'
'subcontrol-origin: margin;'
'subcontrol-position: top left;'
'left: 10px;'
'}'
)
def create_ui(self):
# window properties
self.setWindowTitle('RSE Maya Tools')
self.setWindowFlags(QtCore.Qt.Window)
if self.parent():
parent_x = self.parent().x()
parent_y = self.parent().y()
self.setGeometry(parent_x + 60, parent_y + 220, self.width(), self.height())
# populate window
self.create_controls()
self.refresh_gui()
def create_controls(self):
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(main_layout)
# create all of the main controls
self.export_ctrls = export_controls(parent=self)
# create menubar and add widgets to main layout
main_layout.addWidget(self.export_ctrls)
def refresh_gui(self):
# call any gui refresh functions here
pass
@QtCore.Slot()
def do_import_mesh(self):
last_dir = IO_PDX_SETTINGS.last_import_mesh or ''
filepath, filefilter = QtWidgets.QFileDialog.getOpenFileName(
self, caption='Select .mesh file', dir=last_dir, filter='PDX Mesh files (*.mesh)'
)
if filepath != '':
filepath = os.path.abspath(filepath)
if os.path.splitext(filepath)[1] == '.mesh':
if self.popup:
self.popup.close()
self.popup = import_popup(filepath, parent=self)
self.popup.show()
IO_PDX_SETTINGS.last_import_mesh = filepath
else:
reply = QtWidgets.QMessageBox.warning(
self, 'READ ERROR',
'Unable to read selected file. The filepath ... '
'\n\n\t{0}'
'\n ... is not a .mesh file!'.format(filepath),
QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok
)
if reply == QtWidgets.QMessageBox.Ok:
IO_PDX_LOG.info("Nothing to import.")
def main():
pdx_tools = RSEmaya_ui()
pdx_tools.show()
if __name__ == '__main__':
main() |
11507317 | SPECIAL_TOKENS = {"URL_TOKEN": "<URL>", "POS_EM_TOKEN": "<POS_EM>",
"NEG_EM_TOKEN": "<NEG_EM>", "HEART_TOKEN": "<HEART>",
"USER_TOKEN": "<USER>"}
|
11507340 | import unittest
from unittest import TestCase
from numpy.testing import assert_equal
from fusedwind.plant_flow.comp import *
from fusedwind.fused_helper import *
from fusedwind.plant_flow.vt import GenericWindTurbineVT, GenericWindTurbinePowerCurveVT
from fusedwind.plant_flow.generate_fake_vt import generate_random_GenericWindTurbinePowerCurveVT, \
generate_random_wt_positions, generate_random_GenericWindRoseVT, generate_random_wt_layout
import numpy as np
from random import random
from numpy import array, vstack, linspace
wr_inputs = {
'wind_directions': [0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.],
'wind_speeds': [4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.,
25.],
'wind_rose_array': array(
[[0.00000000e+00, 3.59550200e-02, 9.22421500e+00, 2.38867200e+00],
[3.00000000e+01, 3.94968400e-02, 9.88496400e+00, 2.44726600e+00],
[6.00000000e+01, 5.19472500e-02, 9.67463200e+00, 2.43164100e+00],
[9.00000000e+01, 7.01142500e-02, 1.00520300e+01, 2.60351600e+00],
[1.20000000e+02, 8.36171100e-02, 1.01233300e+01, 2.75585900e+00],
[1.50000000e+02, 6.43188100e-02, 9.64359200e+00, 2.59179700e+00],
[1.80000000e+02, 8.63938000e-02, 9.63384700e+00, 2.58007800e+00],
[2.10000000e+02, 1.17646400e-01, 1.05676900e+01, 2.54492200e+00],
[2.40000000e+02, 1.51493800e-01, 1.14521200e+01, 2.46679700e+00],
[2.70000000e+02, 1.47303000e-01, 1.17420100e+01, 2.60351600e+00],
[3.00000000e+02, 1.00075900e-01, 1.16922200e+01, 2.62304700e+00],
[3.30000000e+02, 5.16379700e-02, 1.01387300e+01, 2.32226600e+00]])}
wr_result = {
'wind_directions': [0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.],
'wind_speeds': [4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.,
25.],
'frequency_array': array(
[[1.44588988e-03, 3.35727582e-03, 3.81029809e-03, 4.02470558e-03, 3.98990151e-03, 3.73046644e-03,
3.29906324e-03, 2.76423767e-03, 2.19642754e-03, 1.65575871e-03, 1.18426976e-03, 8.03545915e-04,
5.17057409e-04, 3.15388270e-04, 1.82265836e-04, 9.97392412e-05, 5.16484433e-05, 2.52926787e-05,
1.17053815e-05, 5.11599317e-06, 2.11020415e-06, 5.08752361e-07],
[1.30490339e-03, 3.09884391e-03, 3.62718954e-03, 3.96056671e-03, 4.06990314e-03, 3.95614496e-03,
3.64866956e-03, 3.19840948e-03, 2.66737868e-03, 2.11724064e-03, 1.59962624e-03, 1.15013583e-03,
7.86692880e-04, 5.11652765e-04, 3.16234066e-04, 1.85620497e-04, 1.03401615e-04, 5.46258501e-05,
2.73472412e-05, 1.29640581e-05, 5.81491675e-06, 1.50264971e-06],
[1.77962417e-03, 4.19952053e-03, 4.87241564e-03, 5.26922670e-03, 5.35765892e-03, 5.14772140e-03,
4.68765926e-03, 4.05270640e-03, 3.32955013e-03, 2.60048377e-03, 1.93096518e-03, 1.36289927e-03,
9.14038683e-04, 5.82191700e-04, 3.51980669e-04, 2.01858052e-04, 1.09736471e-04, 5.65096359e-05,
2.75447988e-05, 1.26991432e-05, 5.53343327e-06, 1.39586432e-06],
[2.03574440e-03, 4.98929943e-03, 6.05198952e-03, 6.80997401e-03, 7.17340152e-03, 7.10897476e-03,
6.64589168e-03, 5.86836208e-03, 4.89625745e-03, 3.85928948e-03, 2.87201235e-03, 2.01614762e-03,
1.33368507e-03, 8.30343766e-04, 4.85929034e-04, 2.66934444e-04, 1.37448038e-04, 6.62437807e-05,
2.98390332e-05, 1.25432885e-05, 4.91332268e-06, 1.12525886e-06],
[2.15186806e-03, 5.43110528e-03, 6.80657106e-03, 7.86793420e-03, 8.46630869e-03, 8.52083209e-03,
8.03827862e-03, 7.11211238e-03, 5.89954420e-03, 4.58318141e-03, 3.32962029e-03, 2.25796289e-03,
1.42643914e-03, 8.37636776e-04, 4.56170072e-04, 2.29844137e-04, 1.06885088e-04, 4.57615873e-05,
1.79926754e-05, 6.48041565e-06, 2.13264517e-06, 4.17265933e-07],
[2.24784777e-03, 5.46088720e-03, 6.53473087e-03, 7.23314626e-03, 7.47069754e-03, 7.23396208e-03,
6.58303613e-03, 5.63593453e-03, 4.54016275e-03, 3.44009748e-03, 2.44976880e-03, 1.63787627e-03,
1.02684668e-03, 6.02850400e-04, 3.30951610e-04, 1.69634591e-04, 8.10558227e-05, 3.60483982e-05,
1.48978462e-05, 5.71209909e-06, 2.02861392e-06, 4.27007926e-07],
[2.84919811e-03, 6.90561292e-03, 8.24189952e-03, 9.10295837e-03, 9.38562803e-03, 9.07665485e-03,
8.25355050e-03, 7.06455491e-03, 5.69322871e-03, 4.31834242e-03, 3.08069972e-03, 2.06505246e-03,
1.29914820e-03, 7.66078219e-04, 4.22840120e-04, 2.18143544e-04, 1.05033331e-04, 4.71273715e-05,
1.96750146e-05, 7.63101473e-06, 2.74537834e-06, 5.84660929e-07],
[3.15103170e-03, 7.69357254e-03, 9.33336956e-03, 1.05666829e-02, 1.12688451e-02, 1.13813576e-02,
1.09204112e-02, 9.97189992e-03, 8.67355879e-03, 7.18843635e-03, 5.67618809e-03, 4.26887875e-03,
3.05612657e-03, 2.08130177e-03, 1.34730899e-03, 8.28320491e-04, 4.83210345e-04, 2.67221613e-04,
1.39953406e-04, 6.93493727e-05, 3.24798328e-05, 8.68186021e-06],
[3.38534246e-03, 8.22612132e-03, 9.98196140e-03, 1.13899884e-02, 1.23378363e-02, 1.27597240e-02,
1.26435937e-02, 1.20308435e-02, 1.10083382e-02, 9.69411732e-03, 8.21957641e-03, 6.71153300e-03,
5.27736822e-03, 3.99546450e-03, 2.91176859e-03, 2.04191583e-03, 1.37732536e-03, 8.93223652e-04,
5.56674970e-04, 3.33229170e-04, 1.91495304e-04, 6.08835103e-05],
[2.87303824e-03, 7.17906769e-03, 8.99864707e-03, 1.05685790e-02, 1.17476848e-02, 1.24323246e-02,
1.25706437e-02, 1.21697691e-02, 1.12938057e-02, 1.00525910e-02, 8.58338998e-03, 7.02938634e-03,
5.51945197e-03, 4.15307210e-03, 2.99270142e-03, 2.06379390e-03, 1.36094123e-03, 8.57478070e-04,
5.15753711e-04, 2.95876874e-04, 1.61746247e-04, 4.91779082e-05],
[2.02163959e-03, 5.06769635e-03, 6.37296122e-03, 7.50260428e-03, 8.35231542e-03, 8.84484216e-03,
8.94099994e-03, 8.64540798e-03, 8.00521430e-03, 7.10171159e-03, 6.03649545e-03, 4.91515685e-03,
3.83200463e-03, 2.85882995e-03, 2.03944379e-03, 1.39010175e-03, 9.04514324e-04, 5.61332477e-04,
3.31930481e-04, 1.86840603e-04, 1.00013602e-04, 2.98627202e-05],
[1.76963956e-03, 4.12232356e-03, 4.73613444e-03, 5.11273708e-03, 5.23107828e-03, 5.09941651e-03,
4.75207060e-03, 4.24236873e-03, 3.63329069e-03, 2.98774782e-03, 2.36035193e-03, 1.79197095e-03,
1.30756674e-03, 9.17018472e-04, 6.18067439e-04, 4.00285013e-04, 2.49050823e-04, 1.48827861e-04,
8.53960207e-05, 4.70346524e-05, 2.48592270e-05, 7.39661364e-06]])}
class TestWindFarm(GenericWindFarm):
def execute(self):
self.wt_power = [random() * wt_desc.power_rating for wt_desc in self.wt_layout.wt_list]
self.wt_thrust = [pow_ / (random() * self.wind_speed) for pow_ in self.wt_power]
self.power = sum(self.wt_power)
self.thrust = sum(self.wt_thrust)
class test_GenericWindFarm(unittest.TestCase):
def fill_up(self, gwf):
"""Fill up a generic windfarm with random inputs"""
gwf.wind_speed = random() * 25.0
gwf.wind_direction = random() * 360.0
gwf.wt_layout = generate_random_wt_layout()
def test_init(self):
gwf = GenericWindFarm()
self.fill_up(gwf)
def test_execution(self):
"""Test the execution of a generic wind farm"""
gwf = TestWindFarm()
self.fill_up(gwf)
gwf.run()
class test_WeibullWindRose(unittest.TestCase):
def test_init(self):
c = WeibullWindRose()
init_container(c, **wr_inputs)
def test_run(self):
c = WeibullWindRose()
init_container(c, **wr_inputs)
c.run()
# Testing that the output wind_rose is equal to the wr_result dictionary
for k, v in wr_result.iteritems():
np.testing.assert_array_almost_equal(getattr(c.wind_rose, k), v)
def test_random(self):
wind_rose = generate_random_GenericWindRoseVT()
class test_GenericWSPosition(unittest.TestCase):
def test_init(self):
c = GenericWSPosition()
class test_HubCenterWSPosition(unittest.TestCase):
def test_init(self):
c = HubCenterWSPosition()
class test_GenericWakeSum(unittest.TestCase):
def test_init(self):
c = GenericWakeSum()
class test_GenericHubWindSpeed(unittest.TestCase):
def test_init(self):
c = GenericHubWindSpeed()
class test_GenericFlowModel(unittest.TestCase):
def test_init(self):
c = GenericFlowModel()
class test_GenericWakeModel(unittest.TestCase):
def test_init(self):
c = GenericWakeModel()
class test_GenericInflowGenerator(unittest.TestCase):
def test_init(self):
c = GenericInflowGenerator()
class test_GenericWindTurbine(unittest.TestCase):
def test_init(self):
c = GenericWindTurbine()
class test_WindTurbinePowerCurve(unittest.TestCase):
def test_init(self):
c = WindTurbinePowerCurve()
c.wt_desc = generate_random_GenericWindTurbinePowerCurveVT()
for ws, power in c.wt_desc.power_curve:
np.testing.assert_almost_equal(c(hub_wind_speed=ws).power, power)
class test_GenericWindRoseCaseGenerator(unittest.TestCase):
def test_init(self):
c = GenericWindRoseCaseGenerator()
class TestBaseAEPAggregator(TestCase):
pass
class TestWeibullWindRose(TestCase):
def test_execute(self):
pass
class TestMultipleWindRosesCaseGenerator(unittest.TestCase):
def test_execute(self):
# Preparing inputs
cG = MultipleWindRosesCaseGenerator()
cG.wind_speeds = np.linspace(4., 25., 22).tolist()
cG.wind_directions = np.linspace(0., 360., 36)[:-1].tolist()
nwt = 5
cG.wt_layout = generate_random_wt_layout(nwt=nwt)
cG.run()
nwd, nws = len(cG.wind_directions), len(cG.wind_speeds)
self.assertEqual(len(cG.all_wind_speeds), nws * nwd)
self.assertEqual(len(cG.all_wind_directions), nws * nwd)
self.assertEqual(len(cG.all_frequencies), nws * nwd)
self.assertEqual(len(cG.all_frequencies[0]), nwt)
class TestPostProcessMultipleWindRoses(unittest.TestCase):
def test_execute(self):
cG = MultipleWindRosesCaseGenerator()
cG.wind_speeds = np.linspace(4., 25., 22).tolist()
cG.wind_directions = np.linspace(0., 360., 36)[:-1].tolist()
nwt = 5
cG.wt_layout = generate_random_wt_layout(nwt=nwt)
cG.run()
cP = PostProcessMultipleWindRoses()
cP.wind_directions = cG.wind_directions
cP.wind_speeds = cG.wind_speeds
cP.frequencies = cG.all_frequencies
cP.powers = [[random()*2.E6 for iwt in range(nwt)] for i in cG.all_wind_speeds]
cP.run()
assert_equal(cP.array_aep.shape, [len(cP.wind_directions), len(cP.wind_speeds)])
if __name__ == '__main__':
unittest.main()
|
11507392 | from .LossLayer import *
class ContrastiveLoss(LossLayer):
def __init__(self, models, *args, **kwargs):
# models = [X1, X2, sim]
LossLayer.__init__(self, models, *args, **kwargs)
self.margin = kwargs.get("margin", 1.0)
def forward(self):
self.sim = (self.X[2] == 1).ravel()
n = self.sim.shape[0]
self.diff = self.X[0] - self.X[1]
dist_sq = np.sum(np.square(self.diff.reshape((n, -1))), 1)
self.dist = np.sqrt(dist_sq).reshape((n, 1))
df = (self.margin - self.dist).ravel()
self.bdf = ((~self.sim) & (df > 0))
self.Y = (np.sum(dist_sq[self.sim]) + np.sum(np.square(df[self.bdf]))) / (2.0 * n)
def backward(self):
n = self.sim.shape[0]
dX = np.zeros(self.X[0].shape)
dX[self.sim] = self.diff[self.sim] / n
dX[self.bdf] = (1.0 / n - self.margin / n / self.dist[self.bdf]) * self.diff[self.bdf]
self.dX[0] = self.dX[1] = dX * self.dY
|
11507410 | from .mean_average_precision_2d import MeanAveragePrecision2d
from .multiprocessing import MetricMultiprocessing
from .adapter import AdapterDefault
metrics_dict = {
'map_2d': MeanAveragePrecision2d
}
class MetricBuilder:
@staticmethod
def get_metrics_list():
""" Get evaluation metrics list."""
return list(metrics_dict.keys())
@staticmethod
def build_evaluation_metric(metric_type, async_mode=False, adapter_type=AdapterDefault, *args, **kwargs):
""" Build evaluation metric.
Arguments:
metric_type (str): type of evaluation metric.
async_mode (bool): use multiprocessing metric.
adapter_type (AdapterBase): type of adapter class.
Returns:
metric_fn (MetricBase): instance of the evaluation metric.
"""
assert metric_type in metrics_dict, "Unknown metric_type"
if not async_mode:
metric_fn = metrics_dict[metric_type](*args, **kwargs)
else:
metric_fn = MetricMultiprocessing(metrics_dict[metric_type], *args, **kwargs)
return adapter_type(metric_fn)
|
11507425 | import sys
sys.path.append('../')
import numpy as np
from pathlib import Path
from py_diff_stokes_flow.core.py_diff_stokes_flow_core import Cell2d
from py_diff_stokes_flow.common.common import ndarray, print_error, print_info
from py_diff_stokes_flow.common.grad_check import check_gradients
def test_cell_2d(verbose):
np.random.seed(42)
cell = Cell2d()
E = 1e5
nu = 0.45
threshold = 1e-3
edge_sample_num = 3
# Consider a line that passes (0.5, 0.5) with a slope between 1/3 and 1.
p = ndarray([0.5, 0.5])
k = np.random.uniform(low=1 / 3, high=1)
# Line equation: (y - p[1]) / (x - p[0]) = k.
# y - p[1] = kx - kp[0].
# kx - y + p[1] - kp[0].
line_eq = ndarray([k, -1, p[1] - k * p[0]])
# Solid area: line_eq >= 0.
# So, the lower part is the solid area.
# This means corner distance from [0, 0] and [1, 0] are positive.
sdf_at_corners = []
for c in [(0, 0), (0, 1), (1, 0), (1, 1)]:
sdf_at_corners.append((line_eq[0] * c[0] + line_eq[1] * c[1] + line_eq[2]) / np.linalg.norm(line_eq[:2]))
cell.Initialize(E, nu, threshold, edge_sample_num, sdf_at_corners)
# Check if all areas are correct.
dx = 1 / 3
x_intercept = (-line_eq[1] * dx - line_eq[2]) / line_eq[0]
area_00 = x_intercept * x_intercept * k * 0.5
area_01 = dx ** 2 - (dx - x_intercept) ** 2 * k * 0.5
area_02 = dx ** 2
area_10 = 0
area_11 = dx ** 2 * 0.5
area_12 = dx ** 2
area_20 = 0
area_21 = dx ** 2 - area_01
area_22 = dx ** 2 - area_00
area = ndarray([area_00, area_01, area_02, area_10, area_11, area_12, area_20, area_21, area_22])
area_from_cell = ndarray(cell.sample_areas())
if not np.allclose(area, area_from_cell):
if verbose:
print_error('area is inconsistent.')
return False
# Check if all line segments are correct.
line_00 = np.sqrt(1 + k ** 2) * x_intercept
line_01 = np.sqrt(1 + k ** 2) * (dx - x_intercept)
line_02 = 0
line_10 = 0
line_11 = np.sqrt(1 + k ** 2) * dx
line_12 = 0
line_20 = 0
line_21 = line_01
line_22 = line_00
line = ndarray([line_00, line_01, line_02, line_10, line_11, line_12, line_20, line_21, line_22])
line_from_cell = ndarray(cell.sample_boundary_areas())
if not np.allclose(line, line_from_cell):
if verbose:
print_error('boundary area is inconsistent.')
return False
# Test the gradients.
for loss_func, grad_func, name in [
(cell.py_normal, cell.py_normal_gradient, 'normal'),
(cell.offset, cell.py_offset_gradient, 'offset'),
(cell.sample_areas, cell.py_sample_areas_gradient, 'sample_areas'),
(cell.sample_boundary_areas, cell.py_sample_boundary_areas_gradient, 'sample_boundary_areas'),
(cell.area, cell.py_area_gradient, 'area'),
(cell.py_energy_matrix, cell.py_energy_matrix_gradient, 'energy_matrix'),
(cell.py_dirichlet_vector, cell.py_dirichlet_vector_gradient, 'dirichlet_vector')
]:
if verbose:
print_info('Checking loss and gradient:', name)
dim = ndarray(loss_func()).size
weight = np.random.normal(size=dim)
def loss_and_grad(x):
cell.Initialize(E, nu, threshold, edge_sample_num, x)
loss = ndarray(loss_func()).ravel().dot(weight)
grad = np.zeros(4)
for i in range(4):
grad[i] = ndarray(grad_func(i)).ravel().dot(weight)
return loss, grad
if not check_gradients(loss_and_grad, ndarray(sdf_at_corners), verbose=verbose):
if verbose:
print_error('Gradient check failed.')
return False
return True
if __name__ == '__main__':
verbose = True
test_cell_2d(verbose) |
11507497 | from .email_form_page import EmailFormPage
from .form_page import FormPage
from .email_form_field import EmailFormField
from .form_field import FormField
|
11507504 | import praw
import sys # Used only for exiting script
import time
import sqlite3
from blackjack import Game
try:
import config
except ImportError:
# Handle error if no config.py file found
pass
if __name__ == '__main__':
# Connect to or create database
print('Opening SQL Database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS users(user_id INTEGER PRIMARY KEY, reddit_name TEXT, reddit_fullname TEXT, created_date TEXT)')
cur.execute('CREATE TABLE IF NOT EXISTS hands(hand_id INTEGER PRIMARY KEY, dealer_hand TEXT, player_hand TEXT, created_date TEXT, completed_date TEXT)')
# Connect to reddit
reddit = praw.Reddit(client_id=config.CLIENT_ID,
client_secret=config.CLIENT_SECRET,
user_agent=config.USER_AGENT,
username=config.USERNAME,
password=<PASSWORD>.PASSWORD)
# TODO: Verify that reddit class inst correctly
loops = 0
while True:
loops += 1
print("Loop {}".format(loops))
try:
for subreddit in config.SUBREDDITS:
sub = reddit.subreddit(subreddit)
for post in sub.new():
# TODO: Check if the post itself contains a summon
for comment in post.comments:
for summon in config.SUMMON_STRINGS:
if summon in comment.body.lower():
# TODO: Check if bot has already responded to this comment
print("Summoned by {} in thread {}. Comment ID: {}".format(comment.author, post.title,
comment.id))
d = Game()
d.deal()
reply = d.get_reddit_reply()
comment.reply(reply)
except KeyboardInterrupt:
sys.exit()
except praw.exceptions.APIException as e:
# TODO: Investigate if this catches only rate limit exceptions, or more
print(e)
print("Rate limit exceeded. Sleeping for 10 minutes.")
time.sleep(60)
except Exception as e:
print("EXCEPTION: {}".format(e))
|
11507610 | import contextlib
from operator import attrgetter
import django
from django import forms
from django.db import models, transaction, router, DEFAULT_DB_ALIAS
from django.db.models.fields import Field
from django.db.models.fields.files import ImageFileDescriptor, ImageFieldFile
from django.db.models.fields.related import ManyToManyRel, ManyToManyField
from django.utils.functional import cached_property
from django.utils import six
from django.contrib.contenttypes.models import ContentType
from generic_plus.fields import GenericForeignFileField
from generic_plus.forms import (
generic_fk_file_formset_factory, generic_fk_file_formfield_factory,
generic_fk_file_widget_factory)
import cropduster.settings
from .forms import CropDusterInlineFormSet, CropDusterWidget, CropDusterThumbFormField
from .utils import json
from .resizing import Box, Crop
try:
from django.db.models.fields.related import (
create_foreign_related_manager)
except ImportError:
from django.db.models.fields.related_descriptors import (
create_reverse_many_to_one_manager)
class ReverseForeignRelatedObjectsRel(object):
def __init__(self, field, related_model):
self.field = field
self.related_model = related_model
def create_foreign_related_manager(superclass, rel_field, rel_model):
return create_reverse_many_to_one_manager(
superclass, ReverseForeignRelatedObjectsRel(rel_field, rel_model))
compat_rel = lambda f: getattr(f, 'remote_field' if django.VERSION >= (1, 9) else 'rel')
compat_rel_to = lambda f: getattr(compat_rel(f), 'model' if django.VERSION >= (1, 9) else 'to')
class CropDusterImageFieldFile(ImageFieldFile):
@property
def sizes(self):
if six.callable(self.field.db_field.sizes):
return self.field.db_field.sizes(self.instance, related=self.related_object)
else:
return self.field.db_field.sizes
def _get_new_crop_thumb(self, size):
# "Imports"
Image = compat_rel_to(self.field.db_field)
Thumb = compat_rel_to(Image._meta.get_field("thumbs"))
box = Box(0, 0, self.width, self.height)
crop_box = Crop(box, self.name)
best_fit = size.fit_to_crop(crop_box, original_image=self.name)
fit_box = best_fit.box
crop_thumb = Thumb(**{
"name": size.name,
"width": fit_box.w,
"height": fit_box.h,
"crop_x": fit_box.x1,
"crop_y": fit_box.y1,
"crop_w": fit_box.w,
"crop_h": fit_box.h,
})
return crop_thumb
def generate_thumbs(self, permissive=False, skip_existing=False):
# "Imports"
Image = compat_rel_to(self.field.db_field)
Thumb = compat_rel_to(Image._meta.get_field("thumbs"))
has_existing_image = self.related_object is not None
if not has_existing_image:
obj_ct = ContentType.objects.get_for_model(
self.instance, for_concrete_model=False)
image = Image(**{
'content_type': obj_ct,
'object_id': self.instance.pk,
'field_identifier': self.field.generic_field.field_identifier,
'width': self.width,
'height': self.height,
'image': self.name,
})
image.save()
self.related_object = image
for size in self.sizes:
if getattr(size, 'is_alias', False):
continue
try:
crop_thumb = self.related_object.thumbs.get(name=size.name)
except Thumb.DoesNotExist:
crop_thumb = self._get_new_crop_thumb(size)
thumbs = self.related_object.save_size(
size, thumb=crop_thumb, permissive=permissive, skip_existing=skip_existing)
for slug, thumb in six.iteritems(thumbs):
thumb.image = self.related_object
thumb.save()
class CropDusterImageField(models.ImageField):
attr_class = CropDusterImageFieldFile
def formfield(self, *args, **kwargs):
kwargs.pop('sizes', None)
return super(CropDusterImageField, self).formfield(*args, **kwargs)
class CropDusterImageFileDescriptor(ImageFileDescriptor):
"""
The same as ImageFileDescriptor, except only updates image dimensions if
the file has changed
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
if previous_file is not None:
if previous_file != value:
self.field.update_dimension_fields(instance, force=True)
class CropDusterSimpleImageField(models.ImageField):
"""
Used for the field 'image' on cropduster.models.Image. Just overrides the
descriptor_class to prevent unnecessary IO lookups on form submissions.
"""
descriptor_class = CropDusterImageFileDescriptor
class CropDusterField(GenericForeignFileField):
file_field_cls = CropDusterImageField
file_descriptor_cls = CropDusterImageFileDescriptor
rel_file_field_name = 'image'
field_identifier_field_name = 'field_identifier'
def __init__(self, verbose_name=None, **kwargs):
sizes = kwargs.pop('sizes', None)
if isinstance(sizes, (list, tuple)) and all([isinstance(s, dict) for s in sizes]):
sizes = json.loads(json.dumps(sizes))
self.sizes = sizes
to = kwargs.pop('to', '%s.Image' % cropduster.settings.CROPDUSTER_APP_LABEL)
kwargs.update({
'upload_to': kwargs.pop('upload_to', None) or '',
})
self.require_alt_text = kwargs.pop('require_alt_text', False)
super(CropDusterField, self).__init__(to, verbose_name=verbose_name, **kwargs)
def formfield(self, **kwargs):
factory_kwargs = {
'sizes': kwargs.pop('sizes', None) or self.sizes,
'related': compat_rel(self),
}
widget = generic_fk_file_widget_factory(CropDusterWidget, **factory_kwargs)
formfield = generic_fk_file_formfield_factory(widget=widget, **factory_kwargs)
kwargs.update({
'widget': widget,
'form_class': formfield,
})
return super(CropDusterField, self).formfield(**kwargs)
def get_inline_admin_formset(self, *args, **kwargs):
for_concrete_model = self.for_concrete_model
def get_formset(self, request, obj=None, **kwargs):
formset_attrs = {'sizes': self.field.sizes, 'max_num': 1,
'require_alt_text': self.field.require_alt_text}
formset_attrs.update(kwargs)
return generic_fk_file_formset_factory(
formset=CropDusterInlineFormSet,
field=self.field,
formset_attrs=formset_attrs,
prefix=self.default_prefix,
form_attrs={
"caption": forms.CharField(required=False),
"alt_text": forms.CharField(required=False),
},
for_concrete_model=for_concrete_model)
return super(CropDusterField, self).get_inline_admin_formset(
formset_cls=CropDusterInlineFormSet,
attrs={
'sizes': self.sizes,
'get_formset': get_formset,
'field': self,
}
)
class CropDusterThumbField(ManyToManyField):
pass
def create_reverse_foreign_related_manager(
superclass, rel_field, rel_model, limit_choices_to):
attname = compat_rel(rel_field).get_related_field().attname
new_superclass = create_foreign_related_manager(superclass, rel_field, rel_model)
class RelatedManager(new_superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__(instance)
self.core_filters = {
"%s__%s" % (rel_field.name, attname): getattr(instance, attname),
}
def __call__(self, **kwargs):
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_foreign_related_manager(
manager.__class__, rel_field, rel_model, limit_choices_to)
return manager_class(self.instance)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
qset = super(RelatedManager, self).get_queryset()
return qset.complex_filter(limit_choices_to)
def set(self, objs, **kwargs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
super(RelatedManager, self).set(objs, **kwargs)
for obj in objs:
obj.save()
set.alters_data = True
def get_prefetch_queryset(self, instances, queryset=None):
if isinstance(instances[0], CropDusterImageFieldFile):
instances = [i.related_object for i in instances]
if queryset is None:
queryset = super(new_superclass, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = attrgetter(rel_field.get_attname())
instance_attr = attrgetter(attname)
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {
'%s__%s__in' % (rel_field.name, attname): set(map(instance_attr, instances)),
}
queryset = queryset.complex_filter(limit_choices_to).filter(**query)
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return (
queryset, rel_obj_attr, instance_attr, False, cache_name,
) + (() if django.VERSION < (2, 0) else (False,))
return RelatedManager
class ReverseForeignRelatedObjectsDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
rel_field = compat_rel_to(self.field)._meta.get_field(self.field.field_name)
if rel_field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
rel_model = compat_rel_to(self.field)
rel_field = rel_model._meta.get_field(self.field.field_name)
superclass = rel_model._default_manager.__class__
limit_choices_to = compat_rel(self.field).limit_choices_to
return create_reverse_foreign_related_manager(
superclass, rel_field, rel_model, limit_choices_to)
class FalseThrough(object):
"""
Django 1.7+ expects rel.through._meta.auto_created to not throw an
AttributeError on fields that extend ManyToManyField. So we create a
falsey object that has rel.through._meta.auto_created = False
"""
def __nonzero__(cls):
return False
__bool__ = __nonzero__
_meta = type('Options', (object,), {
'auto_created': False,
'managed': False,
'local_fields': [],
})
@contextlib.contextmanager
def rel_through_none(instance):
"""
Temporarily set instance.rel.through to None, instead of our FalseThrough
object.
"""
through, compat_rel(instance).through = compat_rel(instance).through, None
instance.many_to_many = False
yield
instance.many_to_many = True
compat_rel(instance).through = through
class ReverseForeignRelation(ManyToManyField):
"""Provides an accessor to reverse foreign key related objects"""
# Field flags
auto_created = False
many_to_many = True
many_to_one = False
one_to_many = True
one_to_one = False
db_table = None
swappable = False
has_null_arg = False
def __init__(self, to, field_name, **kwargs):
is_migration = kwargs.pop('is_migration', False)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
m2m_rel_kwargs = {
'related_name': None,
'symmetrical': True,
'limit_choices_to': kwargs.pop('limit_choices_to', None),
'through': None,
}
if is_migration:
m2m_rel_kwargs['through'] = None
self.many_to_many = False
else:
m2m_rel_kwargs['through'] = FalseThrough()
kwargs['rel'] = ManyToManyRel(self, to, **m2m_rel_kwargs)
self.field_name = field_name
kwargs['blank'] = True
kwargs['editable'] = True
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def is_hidden(self):
return True
def m2m_db_table(self):
return compat_rel_to(self)._meta.db_table
def m2m_column_name(self):
return compat_rel_to(self)._meta.get_field(self.field_name).attname
def m2m_reverse_name(self):
return compat_rel_to(self)._meta.pk.column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return compat_rel_to(self)._meta.pk.name
def get_attname_column(self):
attname, column = super(ReverseForeignRelation, self).get_attname_column()
return attname, None
def contribute_to_class(self, cls, name, **kwargs):
if django.VERSION < (1, 10):
kwargs['virtual_only'] = True
self.model = cls
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# Add the descriptor for the reverse fk relation
setattr(cls, self.name, ReverseForeignRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def get_internal_type(self):
return "ManyToManyField"
def formfield(self, **kwargs):
kwargs.update({
'form_class': CropDusterThumbFormField,
'queryset': compat_rel_to(self)._default_manager.none(),
})
return super(ManyToManyField, self).formfield(**kwargs)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``ReverseForeignRelation``.
"""
rel_field_attname = compat_rel_to(self)._meta.get_field(self.field_name).attname
return (
compat_rel_to(self)._base_manager.db_manager(using)
.complex_filter(compat_rel(self).limit_choices_to)
.filter(**{'%s__in' % rel_field_attname: [obj.pk for obj in objs]}))
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name followed by '+', which prevents its actual use.
return '%s+' % self.opts.object_name.lower()
def _check_relationship_model(self, from_model=None, **kwargs):
# Override error in Django 1.7 (fields.E331: "Field specifies a
# many-to-many relation through model 'None', which has not been
# installed"), which is spurious for a reverse foreign key field.
with rel_through_none(self):
errors = super(ReverseForeignRelation, self)._check_relationship_model(from_model, **kwargs)
return [e for e in errors if e.id != 'fields.E331']
def deconstruct(self):
with rel_through_none(self):
name, path, args, kwargs = super(ReverseForeignRelation, self).deconstruct()
kwargs['field_name'] = self.field_name
kwargs['is_migration'] = True
return name, path, args, kwargs
def clone(self):
new_field = super(ReverseForeignRelation, self).clone()
new_field.many_to_many = False
return new_field
|
11507621 | import os
import sys
import pymysql
# Database and table name
mysql_db_name = "checklist"
mysql_db_table = "items"
use_ssl = "yes"
# Format a string to be included in a SQL query as value
def escape_quotes(this_value):
return str(this_value).replace("'", "\\'")
# Get database credentials from environment variables
mysql_server_fqdn = os.environ.get("MYSQL_FQDN")
if mysql_server_fqdn == None:
print("Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server")
sys.exit(1)
mysql_server_name = mysql_server_fqdn.split('.')[0]
mysql_server_username = os.environ.get("MYSQL_USER")
if mysql_server_username == None:
print("Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username")
sys.exit(1)
if not mysql_server_username.__contains__('@'):
mysql_server_username += '@' + mysql_server_name
mysql_server_password = os.environ.get("MYSQL_PASSWORD")
if mysql_server_password == None:
print("Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password")
sys.exit(1)
# Create connection to MySQL server and number of records
print ("Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username))
if use_ssl == 'yes':
db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}})
else:
db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password)
sql_query = "SELECT COUNT(*) FROM {0};".format (mysql_db_table)
cursor = db.cursor()
cursor.execute(sql_query)
rows = cursor.fetchall()
if len(rows) > 0:
row_count = rows[0][0]
else:
row_count = 0
print ("Table {0} in database {1} contains {2} records".format(mysql_db_table, mysql_db_name, str(row_count)))
# Bye
db.close() |
11507641 | from django.views.generic import TemplateView
from haystack.generic_views import SearchView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from person.models import Person
class PersonsView(TemplateView):
template_name = 'person/persons.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['persons'] = Person.objects.all()
return context
class PersonView(TemplateView):
template_name = 'person/person.html'
def get_context_data(self, slug, **kwargs):
context = super().get_context_data(**kwargs)
context['person'] = Person.objects.get(slug=slug)
return context
class TwitterPersonsView(TemplateView):
template_name = 'person/twitter_users.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
persons = Person.objects.exclude(twitter_username='').order_by('surname')
context['persons'] = persons
return context
class PersonsCheckView(TemplateView):
template_name = 'person/check/persons_check.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['persons_same_surname'] = Person.same_surname()
context['persons_surname_only'] = Person.objects.filter(forename='', initials='')
return context
class PersonSlugCheckView(TemplateView):
template_name = 'person/check/persons_slug_check.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
same_slug_ids = []
persons = Person.objects.all()
for person in persons:
persons_same_slug = Person.objects.filter(slug=person.slug)
if persons_same_slug.count() > 1:
for p in persons_same_slug:
same_slug_ids.append(p.id)
context['persons_same_slug'] = Person.objects.filter(pk__in=same_slug_ids).order_by('slug')
return context
class PersonTKIDCheckView(TemplateView):
template_name = 'person/check/persons_tk_id_check.html'
@staticmethod
def get_duplicate_person_tk_ids():
persons = Person.objects.all()
duplicate_tk_ids = set()
for p in persons:
if not p.tk_id:
continue
if Person.objects.filter(tk_id=p.tk_id).count() > 1:
duplicate_tk_ids.add(p.tk_id)
return list(duplicate_tk_ids)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
duplicate_tk_ids = self.get_duplicate_person_tk_ids()
context['duplicate_tk_ids'] = Person.objects.filter(tk_id__in=duplicate_tk_ids).order_by('tk_id')
return context
class PersonSearchView(SearchView):
template_name='search/searchperson.html'
load_all=False
form_class=SearchForm
queryset=SearchQuerySet().models(Person)
# def extra_context(self):
# return {
# 'yourValue': 112,
# }
|
11507672 | class Tree:
def __init_(self):
self.root = None
class Node:
def __init__(self, key):
self.parent = None
self.right = None
self.left = None
self.key = key
def is_leaf(node):
return node.left == None and node.right == None
def insert(key, tree):
new_node = Node(key)
current_node = tree.root
parent_node = None
while current_node:
parent_node = current_node
if key < current_node.key:
current_node = current_node.left
else:
current_node = current_node.right
new_node.parent = parent_node
if parent_node is None:
tree.root = new_node
elif key < parent_node.key:
parent_node.left = new_node
else:
parent_node.right = new_node
def search(key, node):
if node.key == key or node is None:
return node
if node.key < key:
return search(key, node.right)
else:
return search(key, node.left)
def tree_search(key, tree):
search(key, tree.root)
|
11507679 | import os
import time
import logging
class Timer:
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
self.duration = 0.
def mkdir_if_missing(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
elif data_type == 'custom':
save_format = '{frame},{id},{class}'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logging.info('save results to {}'.format(filename))
|
11507722 | from functools import partial
from typing import List, Union, Dict
import click
def status(state: str, title: str) -> List:
s = partial(click.style, bold=True)
# fmt: off
styles = {
"update": s("Updated:", fg="blue"),
"skip" : s("Skipped:", fg="yellow"),
"create": s("Created:", fg="green"),
"failed": s("Failed:" , fg="red"),
}
# fmt: on
click.echo(" ".join([styles[state], click.style(title, underline=True)]))
def summarize(db: str, bibtex: Union[List, Dict], notion: Union[List, Dict]) -> None:
magenta = partial(click.style, fg="magenta")
bold = partial(click.style, bold=True)
n_bibtex = bold(len(bibtex))
n_notion = bold(len(notion))
summary = ["Found", n_bibtex, f"{db} in BibTeX and", n_notion, "on Notion."]
click.echo(" ".join(map(magenta, summary)))
|
11507724 | from enum import Enum
import numpy as np
import numba as nb
from .rect import area, get_center
INF_DIST = 1e5
class Metric(Enum):
EUCLIDEAN = 0
COSINE = 1
@nb.njit(parallel=True, fastmath=True, cache=True)
def cdist(XA, XB, metric, empty_mask=None, fill_val=None):
"""Numba implementation of Scipy's cdist"""
assert XA.ndim == XB.ndim == 2
assert XA.shape[1] == XB.shape[1]
if empty_mask is not None:
assert empty_mask.ndim == 2
assert empty_mask.shape[0] == XA.shape[0]
assert empty_mask.shape[1] == XB.shape[0]
filler = 1. if fill_val is None else fill_val
if metric == Metric.EUCLIDEAN:
return euclidean(XA, XB, empty_mask, filler)
elif metric == Metric.COSINE:
return cosine(XA, XB, empty_mask, filler)
else:
raise ValueError('Unsupported distance metric')
@nb.njit(parallel=True, fastmath=True, cache=True)
def pdist(X, metric):
"""Numba implementation of Scipy's pdist"""
assert X.ndim == 2
if metric == Metric.EUCLIDEAN:
return euclidean(X, X, symmetric=True)
elif metric == Metric.COSINE:
return cosine(X, X, symmetric=True)
else:
raise ValueError('Unsupported distance metric')
@nb.njit(parallel=True, fastmath=True, cache=True, inline='always')
def euclidean(XA, XB, empty_mask=None, filler=1., symmetric=False):
"""Numba implementation of Scipy's euclidean"""
Y = np.empty((XA.shape[0], XB.shape[0]))
for i in nb.prange(XA.shape[0]):
for j in range(XB.shape[0]):
if symmetric and i >= j:
Y[i, j] = INF_DIST
elif empty_mask is not None and empty_mask[i, j]:
Y[i, j] = filler
else:
norm = 0.
for k in range(XA.shape[1]):
norm += (XA[i, k] - XB[j, k])**2
Y[i, j] = np.sqrt(norm)
return Y
@nb.njit(parallel=True, fastmath=True, cache=True, inline='always')
def cosine(XA, XB, empty_mask=None, filler=1., symmetric=False):
"""Numba implementation of Scipy's cosine"""
Y = np.empty((XA.shape[0], XB.shape[0]))
for i in nb.prange(XA.shape[0]):
for j in range(XB.shape[0]):
if symmetric and i >= j:
Y[i, j] = INF_DIST
elif empty_mask is not None and empty_mask[i, j]:
Y[i, j] = filler
else:
dot = 0.
a_norm = 0.
b_norm = 0.
for k in range(XA.shape[1]):
dot += XA[i, k] * XB[j, k]
a_norm += XA[i, k] * XA[i, k]
b_norm += XB[j, k] * XB[j, k]
a_norm = np.sqrt(a_norm)
b_norm = np.sqrt(b_norm)
Y[i, j] = 1. - dot / (a_norm * b_norm)
return Y
@nb.njit(parallel=False, fastmath=True, cache=True)
def iou_dist(tlbrs1, tlbrs2):
"""Computes pairwise IoU distance."""
assert tlbrs1.ndim == tlbrs2.ndim == 2
assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4
Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0]))
for i in nb.prange(tlbrs1.shape[0]):
area1 = area(tlbrs1[i, :])
for j in range(tlbrs2.shape[0]):
iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
if iw > 0 and ih > 0:
area_inter = iw * ih
area_union = area1 + area(tlbrs2[j, :]) - area_inter
Y[i, j] = 1. - area_inter / area_union
else:
Y[i, j] = 1.
return Y
@nb.njit(parallel=False, fastmath=True, cache=True)
def giou_dist(tlbrs1, tlbrs2):
"""Computes pairwise GIoU distance."""
assert tlbrs1.ndim == tlbrs2.ndim == 2
assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4
Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0]))
for i in nb.prange(tlbrs1.shape[0]):
area1 = area(tlbrs1[i, :])
for j in range(tlbrs2.shape[0]):
iou = 0.
area_union = area1 + area(tlbrs2[j, :])
iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
if iw > 0 and ih > 0:
area_inter = iw * ih
area_union -= area_inter
iou = area_inter / area_union
ew = max(tlbrs1[i, 2], tlbrs2[j, 2]) - min(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
eh = max(tlbrs1[i, 3], tlbrs2[j, 3]) - min(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
area_encls = ew * eh
giou = iou - (area_encls - area_union) / area_encls
Y[i, j] = (1. - giou) * 0.5
return Y
@nb.njit(parallel=True, fastmath=True, cache=True)
def diou_dist(tlbrs1, tlbrs2):
"""Computes pairwise DIoU distance."""
assert tlbrs1.ndim == tlbrs2.ndim == 2
assert tlbrs1.shape[1] == tlbrs2.shape[1] == 4
Y = np.empty((tlbrs1.shape[0], tlbrs2.shape[0]))
for i in nb.prange(tlbrs1.shape[0]):
area1 = area(tlbrs1[i, :])
x1, y1 = get_center(tlbrs1[i, :])
for j in range(tlbrs2.shape[0]):
iou = 0.
iw = min(tlbrs1[i, 2], tlbrs2[j, 2]) - max(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
ih = min(tlbrs1[i, 3], tlbrs2[j, 3]) - max(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
if iw > 0 and ih > 0:
area_inter = iw * ih
area_union = area1 + area(tlbrs2[j, :]) - area_inter
iou = area_inter / area_union
ew = max(tlbrs1[i, 2], tlbrs2[j, 2]) - min(tlbrs1[i, 0], tlbrs2[j, 0]) + 1
eh = max(tlbrs1[i, 3], tlbrs2[j, 3]) - min(tlbrs1[i, 1], tlbrs2[j, 1]) + 1
c = ew**2 + eh**2
x2, y2 = get_center(tlbrs2[j, :])
d = (x2 - x1)**2 + (y2 - y1)**2
diou = iou - (d / c)**0.6
Y[i, j] = (1. - diou) * 0.5
return Y
|
11507742 | from __future__ import print_function, division, absolute_import, unicode_literals
from os.path import join
from tempfile import mkdtemp
from shutil import rmtree
import h5py
from dagian.tools.dagian_runner import dagian_run_with_configs
def test_generate_lifetime_features():
test_output_dir = mkdtemp(prefix="dagian_test_output_")
h5py_hdf_dir = join(test_output_dir, "h5py")
pandas_hdf_dir = join(test_output_dir, "pandas")
pickle_dir = join(test_output_dir, "pickle")
data_bundles_dir = join(test_output_dir, "data_bundles")
global_config = {
'generator_class': 'dagian.tests.lifetime_feature_generator'
'.LifetimeFeatureGenerator',
'data_bundles_dir': data_bundles_dir,
'generator_kwargs': {
'h5py_hdf_dir': h5py_hdf_dir,
'pandas_hdf_dir': pandas_hdf_dir,
'pickle_dir': pickle_dir,
},
}
bundle_config = {
'name': 'default',
'structure': {
'label': 'lifetime',
'test_filters': [
'is_in_test_set',
],
'test_dict': {
'comparison': [
'weight',
'height',
'mem_raw_data',
'pd_weight',
'pd_height',
'pd_raw_data',
'pd_raw_data_append',
],
'others': {
'light_weight': 'light_weight',
'height_divide_weight': {
'key': 'division',
'args': {'dividend': 'weight'},
},
},
},
'features': [
'weight',
'height',
'mem_raw_data',
'man_raw_data',
'man_sparse_raw_data',
'pd_weight',
'pd_height',
'pd_raw_data',
'BMI',
{'key': 'division',
'args': {'dividend': 'weight'},
'loop': [
{}, # default divisor: height
{'divisor': 'income'},
]},
{'key': 'division_2_divisor',
'args': {'dividend': 'weight',
'divisor1': 'height',
'divisor2': 'height'}},
{'key': 'division_pd_2_divisor',
'args': {'dividend': 'weight',
'divisor1': 'height',
'divisor2': 'height'}},
{'key': 'recursive_division',
'args': {'dividend': {'key': 'recursive_division',
'args': {'dividend': 'weight',
'divisor': 'height'}},
'divisor': 'height'}},
{'key': 'sequential_division',
'args': {'sequence': [{'key': 'weight'}, 'height', {'key': 'height'}]}},
'nan',
],
},
'structure_config': {
'features': {
'concat': True,
}
}
}
dagian_run_with_configs(global_config, bundle_config)
data_bundle_hdf_path = join(data_bundles_dir, bundle_config['name'] + '.h5')
with h5py.File(data_bundle_hdf_path, "r") as data_bundle_h5f:
assert set(data_bundle_h5f) == {'features', 'test_filters', 'label', 'test_dict'}
assert set(data_bundle_h5f['test_filters']) == {'is_in_test_set'}
assert set(data_bundle_h5f['test_dict']) == {'comparison', 'others'}
assert (set(data_bundle_h5f['test_dict/comparison'])
== set(bundle_config['structure']['test_dict']['comparison']))
assert data_bundle_h5f['features'].shape == (6, 20)
rmtree(test_output_dir)
|
11507743 | from kutana import Plugin
def test_deprecated():
plugin = Plugin('')
assert plugin.on_any_message()
assert plugin.on_any_unprocessed_message()
assert plugin.on_any_update()
assert plugin.on_any_unprocessed_update()
assert plugin.vk.on_payload()
|
11507767 | import requests, vk_api, json
with open('keys.json', 'r') as file:
s=json.loads(file.read())
vks=vk_api.VkApi(login=s['login'], password=s['password'])
vks.auth()
#Загружаем изображение на сервер
with open('re.jpg', 'wb') as file:
file.write(requests.get(img[i]).content)
#Загружаем изображение в ВК
photo=vk_api.VkUpload(vks).photo('re.jpg', group_id=151412216, album_id=247265476)[0]
img[i]='photo{}_{}'.format(photo['owner_id'], photo['id']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.