code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import pytest
from snuba.clickhouse.columns import ColumnSet, String, UInt
from snuba.query import SelectedExpression
from snuba.query.conditions import binary_condition
from snuba.query.expressions import Column, FunctionCall, Literal
from snuba.query.logical import Query
from snuba.query.processors.custom_function import (
CustomFunction,
InvalidCustomFunctionCall,
partial_function,
simple_function,
)
from snuba.query.validation.signature import Column as ColType
from snuba.request.request_settings import HTTPRequestSettings
TEST_CASES = [
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression("column1", Column("column1", None, "column1")),
],
groupby=[Column("column1", None, "column1")],
condition=binary_condition(
None,
"equals",
FunctionCall(
"group_id", "f", (Column("something", None, "something"),)
),
Literal(None, 1),
),
),
Query(
{},
None,
selected_columns=[
SelectedExpression("column1", Column("column1", None, "column1")),
],
groupby=[Column("column1", None, "column1")],
condition=binary_condition(
None,
"equals",
FunctionCall(
"group_id", "f", (Column("something", None, "something"),)
),
Literal(None, 1),
),
),
id="Function not present",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(Literal(None, "literal1"), Column("param2", None, "param2"),),
),
),
],
),
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call_impl",
(
Literal(None, "literal1"),
FunctionCall(
None, "inner_call", (Column("param2", None, "param2"),)
),
Literal(None, 420),
),
),
),
],
),
id="Expand simple function",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(
Column("param1", None, "param1"),
FunctionCall(
None,
"assumeNotNull",
(Column("param2", None, "param2"),),
),
),
),
),
],
),
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call_impl",
(
Column("param1", None, "param1"),
FunctionCall(
None,
"inner_call",
(
FunctionCall(
None,
"assumeNotNull",
(Column("param2", None, "param2"),),
),
),
),
Literal(None, 420),
),
),
),
],
),
id="Expand simple function",
),
]
@pytest.mark.parametrize("query, expected_query", TEST_CASES)
def test_format_expressions(query: Query, expected_query: Query) -> None:
processor = CustomFunction(
ColumnSet([("param1", String()), ("param2", UInt(8)), ("other_col", String())]),
"f_call",
[("param1", ColType({String})), ("param2", ColType({UInt}))],
partial_function(
"f_call_impl(param1, inner_call(param2), my_const)", [("my_const", 420)],
),
)
# We cannot just run == on the query objects. The content of the two
# objects is different, being one the AST and the ont the AST + raw body
processor.process_query(query, HTTPRequestSettings())
assert (
query.get_selected_columns_from_ast()
== expected_query.get_selected_columns_from_ast()
)
assert query.get_groupby_from_ast() == expected_query.get_groupby_from_ast()
assert query.get_condition_from_ast() == expected_query.get_condition_from_ast()
assert query.get_arrayjoin_from_ast() == expected_query.get_arrayjoin_from_ast()
assert query.get_having_from_ast() == expected_query.get_having_from_ast()
assert query.get_orderby_from_ast() == expected_query.get_orderby_from_ast()
INVALID_QUERIES = [
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func", "f_call", (Column("param2", None, "param2"),),
),
),
],
),
id="Invalid number of parameters",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(
Column("param2", None, "param2"),
Column("param1", None, "param1"),
),
),
),
],
),
id="Inverting parameter types",
),
]
@pytest.mark.parametrize("query", INVALID_QUERIES)
def test_invalid_call(query: Query) -> None:
processor = CustomFunction(
ColumnSet([("param1", String()), ("param2", UInt(8)), ("other_col", String())]),
"f_call",
[("param1", ColType({String})), ("param2", ColType({UInt}))],
simple_function("f_call_impl(param1, inner_call(param2))"),
)
with pytest.raises(InvalidCustomFunctionCall):
processor.process_query(query, HTTPRequestSettings()) | tests/query/processors/test_custom_function.py | import pytest
from snuba.clickhouse.columns import ColumnSet, String, UInt
from snuba.query import SelectedExpression
from snuba.query.conditions import binary_condition
from snuba.query.expressions import Column, FunctionCall, Literal
from snuba.query.logical import Query
from snuba.query.processors.custom_function import (
CustomFunction,
InvalidCustomFunctionCall,
partial_function,
simple_function,
)
from snuba.query.validation.signature import Column as ColType
from snuba.request.request_settings import HTTPRequestSettings
TEST_CASES = [
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression("column1", Column("column1", None, "column1")),
],
groupby=[Column("column1", None, "column1")],
condition=binary_condition(
None,
"equals",
FunctionCall(
"group_id", "f", (Column("something", None, "something"),)
),
Literal(None, 1),
),
),
Query(
{},
None,
selected_columns=[
SelectedExpression("column1", Column("column1", None, "column1")),
],
groupby=[Column("column1", None, "column1")],
condition=binary_condition(
None,
"equals",
FunctionCall(
"group_id", "f", (Column("something", None, "something"),)
),
Literal(None, 1),
),
),
id="Function not present",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(Literal(None, "literal1"), Column("param2", None, "param2"),),
),
),
],
),
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call_impl",
(
Literal(None, "literal1"),
FunctionCall(
None, "inner_call", (Column("param2", None, "param2"),)
),
Literal(None, 420),
),
),
),
],
),
id="Expand simple function",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(
Column("param1", None, "param1"),
FunctionCall(
None,
"assumeNotNull",
(Column("param2", None, "param2"),),
),
),
),
),
],
),
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call_impl",
(
Column("param1", None, "param1"),
FunctionCall(
None,
"inner_call",
(
FunctionCall(
None,
"assumeNotNull",
(Column("param2", None, "param2"),),
),
),
),
Literal(None, 420),
),
),
),
],
),
id="Expand simple function",
),
]
@pytest.mark.parametrize("query, expected_query", TEST_CASES)
def test_format_expressions(query: Query, expected_query: Query) -> None:
processor = CustomFunction(
ColumnSet([("param1", String()), ("param2", UInt(8)), ("other_col", String())]),
"f_call",
[("param1", ColType({String})), ("param2", ColType({UInt}))],
partial_function(
"f_call_impl(param1, inner_call(param2), my_const)", [("my_const", 420)],
),
)
# We cannot just run == on the query objects. The content of the two
# objects is different, being one the AST and the ont the AST + raw body
processor.process_query(query, HTTPRequestSettings())
assert (
query.get_selected_columns_from_ast()
== expected_query.get_selected_columns_from_ast()
)
assert query.get_groupby_from_ast() == expected_query.get_groupby_from_ast()
assert query.get_condition_from_ast() == expected_query.get_condition_from_ast()
assert query.get_arrayjoin_from_ast() == expected_query.get_arrayjoin_from_ast()
assert query.get_having_from_ast() == expected_query.get_having_from_ast()
assert query.get_orderby_from_ast() == expected_query.get_orderby_from_ast()
INVALID_QUERIES = [
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func", "f_call", (Column("param2", None, "param2"),),
),
),
],
),
id="Invalid number of parameters",
),
pytest.param(
Query(
{},
None,
selected_columns=[
SelectedExpression(
"my_func",
FunctionCall(
"my_func",
"f_call",
(
Column("param2", None, "param2"),
Column("param1", None, "param1"),
),
),
),
],
),
id="Inverting parameter types",
),
]
@pytest.mark.parametrize("query", INVALID_QUERIES)
def test_invalid_call(query: Query) -> None:
processor = CustomFunction(
ColumnSet([("param1", String()), ("param2", UInt(8)), ("other_col", String())]),
"f_call",
[("param1", ColType({String})), ("param2", ColType({UInt}))],
simple_function("f_call_impl(param1, inner_call(param2))"),
)
with pytest.raises(InvalidCustomFunctionCall):
processor.process_query(query, HTTPRequestSettings()) | 0.612194 | 0.465691 |
from sample_players import DataPlayer
import math, random
class CustomPlayer_AB(DataPlayer):
def minimax(self, state, depth):
'''
Min-max algorithm
:param state: Game state
:param depth: Depth of tree
:return: the state with highest score
'''
def min_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1))
return value
def max_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1))
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1))
def alphabeta(self, state, depth):
'''
Return the move along a branch of the game tree that
has the best possible value. A move is a pair of coordinates
in (column, row) order corresponding to a legal move for
the searching player.
'''
def min_value(state, depth, alpha, beta):
'''
Return the value for a win (+1) if the game is over,
otherwise return the minimum value over all legal child
nodes
'''
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1, alpha, beta))
beta = min(beta, value)
if beta <= alpha: break
return value
def max_value(state, depth, alpha, beta):
'''
Return the value for a loss (-1) if the game is over,
otherwise return the maximum value over all legal child
nodes.
'''
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1, alpha, beta))
alpha = max(alpha, value)
if beta <= alpha: break
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1, float("-inf"), float("inf")))
def score(self, state):
'''
Scoring function
'''
own_loc = state.locs[self.player_id]
opp_loc = state.locs[1 - self.player_id]
own_liberties = state.liberties(own_loc)
opp_liberties = state.liberties(opp_loc)
return len(own_liberties) - len(opp_liberties)
def get_action(self, state):
import random
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
depth = 3
# Iterative Deepening
for i in range(1, depth + 1):
self.queue.put(self.alphabeta(state, depth=i))
class MCTS():
def __init__(self, state):
self.root_node = self.TreeNode(state)
def select(self, node):
while not node.state.terminal_test():
if not node.explored():
expand_node = self.expand(node)
return expand_node
else:
node = self.best_child(node)
return node
def best_child(self, node):
'''
Select a unexplored child, or best child
'''
best_child_nodes = []
best_score = float('-inf')
C = math.sqrt(2)
for child in node.childrens:
exploit = child.q_value / child.visited
explore = C * math.sqrt(math.log(node.visited) / child.visited)
child_score = exploit + explore
if child_score == best_score:
best_child_nodes.append(child)
elif child_score > best_score:
best_child_nodes = []
best_child_nodes.append(child)
best_score = child_score
if len(best_child_nodes) == 0:
return None
return random.choice(best_child_nodes)
def expand(self, node):
'''
Pick an action, execute and get next child
'''
possible_actions = node.actions_available()
if len(possible_actions) > 0:
action = possible_actions[0]
child_state = node.state.result(action)
child_node = MCTS.TreeNode(child_state, node, action)
node.childrens.append(child_node)
node.actioned.append(action)
return node.childrens[-1]
else:
return None
def simulate(self, state):
'''
Simulate to the end of the game, get reward 1 in case of winning of -1 otherwise
'''
player_id = state.player()
while not state.terminal_test():
state = state.result(random.choice(state.actions()))
return -1 if state._has_liberties(player_id) else 1
def backpropagation(self, node, reward):
'''
Update all nodes with reward, from leaf node all the way back to the root
'''
while node is not None:
node.update_qvalue(reward)
node = node.parent
reward = -reward
def best_action(self, node):
return self.best_child(node).parent_action
def run(self):
num_iter = 60
try:
if self.root_node.state.terminal_test():
return random.choice(self.root_node.state.actions())
for i in range(num_iter):
node = self.select(self.root_node)
if node is None:
continue
reward = self.simulate(node.state)
self.backpropagation(node, reward)
except Exception as ex:
print('Exception: {0}'.format(str(ex)))
action = self.best_action(self.root_node)
return action
class TreeNode():
'''
Represents a game state, with available actions to explore future game states further down the tree
'''
def __init__(self, state, parent=None, parent_action=None):
self.state = state
self.parent = parent
self.parent_action = parent_action
self.actions = state.actions()
self.actioned = []
self.childrens = []
self.q_value = 0
self.visited = 1
def explored(self):
return len(self.actions) == len(self.actioned)
def actions_available(self):
actions_left = list(set(self.actions) - set(self.actioned))
return actions_left
def update_qvalue(self, reward):
self.q_value += reward
self.visited += 1
class CustomPlayer_MCTS(DataPlayer):
def get_action(self, state):
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
mcts = MCTS(state)
best_move = mcts.run()
self.queue.put(best_move)
CustomPlayer = CustomPlayer_MCTS | Projects/3_Adversarial Search/my_custom_player.py | from sample_players import DataPlayer
import math, random
class CustomPlayer_AB(DataPlayer):
def minimax(self, state, depth):
'''
Min-max algorithm
:param state: Game state
:param depth: Depth of tree
:return: the state with highest score
'''
def min_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1))
return value
def max_value(state, depth):
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1))
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1))
def alphabeta(self, state, depth):
'''
Return the move along a branch of the game tree that
has the best possible value. A move is a pair of coordinates
in (column, row) order corresponding to a legal move for
the searching player.
'''
def min_value(state, depth, alpha, beta):
'''
Return the value for a win (+1) if the game is over,
otherwise return the minimum value over all legal child
nodes
'''
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1, alpha, beta))
beta = min(beta, value)
if beta <= alpha: break
return value
def max_value(state, depth, alpha, beta):
'''
Return the value for a loss (-1) if the game is over,
otherwise return the maximum value over all legal child
nodes.
'''
if state.terminal_test(): return state.utility(self.player_id)
if depth <= 0: return self.score(state)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1, alpha, beta))
alpha = max(alpha, value)
if beta <= alpha: break
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1, float("-inf"), float("inf")))
def score(self, state):
'''
Scoring function
'''
own_loc = state.locs[self.player_id]
opp_loc = state.locs[1 - self.player_id]
own_liberties = state.liberties(own_loc)
opp_liberties = state.liberties(opp_loc)
return len(own_liberties) - len(opp_liberties)
def get_action(self, state):
import random
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
depth = 3
# Iterative Deepening
for i in range(1, depth + 1):
self.queue.put(self.alphabeta(state, depth=i))
class MCTS():
def __init__(self, state):
self.root_node = self.TreeNode(state)
def select(self, node):
while not node.state.terminal_test():
if not node.explored():
expand_node = self.expand(node)
return expand_node
else:
node = self.best_child(node)
return node
def best_child(self, node):
'''
Select a unexplored child, or best child
'''
best_child_nodes = []
best_score = float('-inf')
C = math.sqrt(2)
for child in node.childrens:
exploit = child.q_value / child.visited
explore = C * math.sqrt(math.log(node.visited) / child.visited)
child_score = exploit + explore
if child_score == best_score:
best_child_nodes.append(child)
elif child_score > best_score:
best_child_nodes = []
best_child_nodes.append(child)
best_score = child_score
if len(best_child_nodes) == 0:
return None
return random.choice(best_child_nodes)
def expand(self, node):
'''
Pick an action, execute and get next child
'''
possible_actions = node.actions_available()
if len(possible_actions) > 0:
action = possible_actions[0]
child_state = node.state.result(action)
child_node = MCTS.TreeNode(child_state, node, action)
node.childrens.append(child_node)
node.actioned.append(action)
return node.childrens[-1]
else:
return None
def simulate(self, state):
'''
Simulate to the end of the game, get reward 1 in case of winning of -1 otherwise
'''
player_id = state.player()
while not state.terminal_test():
state = state.result(random.choice(state.actions()))
return -1 if state._has_liberties(player_id) else 1
def backpropagation(self, node, reward):
'''
Update all nodes with reward, from leaf node all the way back to the root
'''
while node is not None:
node.update_qvalue(reward)
node = node.parent
reward = -reward
def best_action(self, node):
return self.best_child(node).parent_action
def run(self):
num_iter = 60
try:
if self.root_node.state.terminal_test():
return random.choice(self.root_node.state.actions())
for i in range(num_iter):
node = self.select(self.root_node)
if node is None:
continue
reward = self.simulate(node.state)
self.backpropagation(node, reward)
except Exception as ex:
print('Exception: {0}'.format(str(ex)))
action = self.best_action(self.root_node)
return action
class TreeNode():
'''
Represents a game state, with available actions to explore future game states further down the tree
'''
def __init__(self, state, parent=None, parent_action=None):
self.state = state
self.parent = parent
self.parent_action = parent_action
self.actions = state.actions()
self.actioned = []
self.childrens = []
self.q_value = 0
self.visited = 1
def explored(self):
return len(self.actions) == len(self.actioned)
def actions_available(self):
actions_left = list(set(self.actions) - set(self.actioned))
return actions_left
def update_qvalue(self, reward):
self.q_value += reward
self.visited += 1
class CustomPlayer_MCTS(DataPlayer):
def get_action(self, state):
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
mcts = MCTS(state)
best_move = mcts.run()
self.queue.put(best_move)
CustomPlayer = CustomPlayer_MCTS | 0.753104 | 0.612107 |
from django.views.generic.edit import (
CreateView,
UpdateView,
)
from django_addanother.views import CreatePopupMixin, UpdatePopupMixin
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.auth.views import redirect_to_login
from gbe_utils.mixins import (
GbeFormMixin,
ProfileRequiredMixin,
)
from gbe.models import (
Profile,
Troupe,
UserMessage,
)
from gbe.forms import TroupeForm
from gbetext import (
default_edit_troupe_msg,
no_persona_msg,
troupe_header_text,
)
class TroupeCreate(CreatePopupMixin,
GbeFormMixin,
ProfileRequiredMixin,
CreateView):
model = Troupe
form_class = TroupeForm
template_name = 'gbe/modal_performer_form.tmpl'
success_url = reverse_lazy('home', urlconf="gbe.urls")
page_title = 'Manage Troupe'
view_title = 'Tell Us About Your Troupe'
intro_text = troupe_header_text
mode = "troupe"
valid_message = default_edit_troupe_msg
def get_initial(self):
initial = super().get_initial()
initial['contact'] = self.request.user.profile
return initial
def get_form(self, form_class=None):
form = super().get_form(form_class)
form.fields['contact'].queryset = Profile.objects.filter(
resourceitem_id=self.request.user.profile.resourceitem_id)
return form
def get(self, request, *args, **kwargs):
if self.request.user.profile.personae.all().count() == 0:
msg = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PERSONA_REQUIRED",
defaults={
'summary': "Troupe requires Persona",
'description': no_persona_msg})
messages.warning(self.request, msg[0].description)
return redirect_to_login(
self.request.path,
reverse('persona-add', urlconf="gbe.urls", args=[1]),
self.get_redirect_field_name())
else:
return super().get(request, *args, **kwargs)
class TroupeUpdate(UpdatePopupMixin,
GbeFormMixin,
ProfileRequiredMixin,
UpdateView):
model = Troupe
form_class = TroupeForm
template_name = 'gbe/modal_performer_form.tmpl'
success_url = reverse_lazy('home', urlconf="gbe.urls")
page_title = 'Manage Troupe'
view_title = 'Tell Us About Your Troupe'
mode = "update"
intro_text = troupe_header_text
valid_message = default_edit_troupe_msg
def get_form(self, form_class=None):
form = super().get_form(form_class)
form.fields['contact'].queryset = Profile.objects.filter(
resourceitem_id=self.request.user.profile.resourceitem_id)
return form
def get_queryset(self):
return self.model.objects.filter(
contact__user_object=self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['delete_url'] = reverse("performer-delete",
urlconf="gbe.urls",
args=[self.get_object().pk])
return context | gbe/views/make_troupe_view.py | from django.views.generic.edit import (
CreateView,
UpdateView,
)
from django_addanother.views import CreatePopupMixin, UpdatePopupMixin
from django.urls import reverse, reverse_lazy
from django.contrib import messages
from django.contrib.auth.views import redirect_to_login
from gbe_utils.mixins import (
GbeFormMixin,
ProfileRequiredMixin,
)
from gbe.models import (
Profile,
Troupe,
UserMessage,
)
from gbe.forms import TroupeForm
from gbetext import (
default_edit_troupe_msg,
no_persona_msg,
troupe_header_text,
)
class TroupeCreate(CreatePopupMixin,
GbeFormMixin,
ProfileRequiredMixin,
CreateView):
model = Troupe
form_class = TroupeForm
template_name = 'gbe/modal_performer_form.tmpl'
success_url = reverse_lazy('home', urlconf="gbe.urls")
page_title = 'Manage Troupe'
view_title = 'Tell Us About Your Troupe'
intro_text = troupe_header_text
mode = "troupe"
valid_message = default_edit_troupe_msg
def get_initial(self):
initial = super().get_initial()
initial['contact'] = self.request.user.profile
return initial
def get_form(self, form_class=None):
form = super().get_form(form_class)
form.fields['contact'].queryset = Profile.objects.filter(
resourceitem_id=self.request.user.profile.resourceitem_id)
return form
def get(self, request, *args, **kwargs):
if self.request.user.profile.personae.all().count() == 0:
msg = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PERSONA_REQUIRED",
defaults={
'summary': "Troupe requires Persona",
'description': no_persona_msg})
messages.warning(self.request, msg[0].description)
return redirect_to_login(
self.request.path,
reverse('persona-add', urlconf="gbe.urls", args=[1]),
self.get_redirect_field_name())
else:
return super().get(request, *args, **kwargs)
class TroupeUpdate(UpdatePopupMixin,
GbeFormMixin,
ProfileRequiredMixin,
UpdateView):
model = Troupe
form_class = TroupeForm
template_name = 'gbe/modal_performer_form.tmpl'
success_url = reverse_lazy('home', urlconf="gbe.urls")
page_title = 'Manage Troupe'
view_title = 'Tell Us About Your Troupe'
mode = "update"
intro_text = troupe_header_text
valid_message = default_edit_troupe_msg
def get_form(self, form_class=None):
form = super().get_form(form_class)
form.fields['contact'].queryset = Profile.objects.filter(
resourceitem_id=self.request.user.profile.resourceitem_id)
return form
def get_queryset(self):
return self.model.objects.filter(
contact__user_object=self.request.user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['delete_url'] = reverse("performer-delete",
urlconf="gbe.urls",
args=[self.get_object().pk])
return context | 0.461988 | 0.059237 |
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
import os
# whether cuda is available
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# load, resize, totensor
imsize_h = 512
imsize_w = 512
# imsize = 512 if use_cuda else 128
loader = transforms.Compose([
transforms.Scale((imsize_w, imsize_h)),
transforms.ToTensor()
])
def image_loader(image_name):
"""convert an image to a variable tensor"""
image = Image.open(image_name)
image = Variable(loader(image))
image = image.unsqueeze(0)
return image
# load image to variable tensor
output_img_path = 'images/output/'
style_img_path = 'images/style/'
content_img_path = 'images/content/'
style_img_name = 'the_shipwreck_of_the_minotaur.jpg'
content_img_name = 'Dipping-Sun.jpg'
style_img = image_loader(style_img_path + style_img_name).type(dtype)
content_img = image_loader(content_img_path + content_img_name).type(dtype)
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size, but style size is ({}), " \
"content size is *({})".format(style_img.size(), content_img.size())
# convert a tensor to a PILImage
unloader = transforms.ToPILImage()
def imshow(tensor, title=None):
"""show image"""
image = tensor.clone().cpu()
image = image.view(3, imsize_w, imsize_h)
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
class ContentLoss(nn.Module):
"""Content Loss"""
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.criterion = nn.MSELoss()
def forward(self, input):
self.loss = self.criterion(input * self.weight, self.target)
self.output = input
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target, weight):
super(StyleLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.gram = GramMatrix()
self.criterion = nn.MSELoss()
def forward(self, input):
self.output = input.clone()
self.G = self.gram(input)
self.G.mul_(self.weight)
self.loss = self.criterion(self.G, self.target)
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
# load model
cnn = models.vgg19(pretrained=True).features
if use_cuda:
cnn = cnn.cuda()
content_layers_default = ['conv_4']
style_layers_dafault = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, style_img, content_img,
style_weight=1000, content_weight=1,
content_layers=content_layers_default,
style_layers=style_layers_dafault):
cnn = copy.deepcopy(cnn)
content_losses = []
style_losses = []
model = nn.Sequential()
gram = GramMatrix()
if use_cuda:
model = model.cuda()
gram = gram.cuda()
i = 1
for layer in list(cnn):
if isinstance(layer, nn.Conv2d):
name = 'conv_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
if isinstance(layer, nn.ReLU):
name = 'relu_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
i += 1
if isinstance(layer, nn.MaxPool2d):
name = 'pool_' + str(i)
model.add_module(name, layer)
return model, style_losses, content_losses
# input_img = content_img.clone()
input_img = Variable(torch.randn(content_img.data.size())).type(dtype)
def get_input_param_optimizer(input_img):
input_param = nn.Parameter(input_img.data)
# optimizer = optim.LBFGS([input_param])
optimizer = optim.Adam([input_param])
return input_param, optimizer
def run_style_transfer(cnn, content_img, style_img, input_img, num_steps=500,
style_weight=1000, content_weight=1):
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
style_img, content_img, style_weight,
content_weight)
input_param, optimizer = get_input_param_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
input_param.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_param)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.backward()
for cl in content_losses:
content_score += cl.backward()
run[0] += 1
if run[0] % 50 == 0:
print('run {}:'.format(run))
print('Style Loss : {:4f} Content Loss : {:4f}'.format(
style_score.data[0], content_score.data[0]
))
print()
return style_score + content_score
optimizer.step(closure)
input_param.data.clamp_(0, 1)
return input_param.data
output = run_style_transfer(cnn, content_img, style_img, input_img)
def savefig(img_tensor, path='./images/output/', name=os.path.splitext(style_img_name)[0] + content_img_name):
img = img_tensor.cpu()
img = img.view(3, imsize_h, imsize_w)
img = unloader(img)
img.save(path + name)
savefig(output)
# model, style_losses, content_losses = get_style_model_and_losses(cnn, style_img, content_img)
# print(str(model)) | main.py | from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
import os
# whether cuda is available
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# load, resize, totensor
imsize_h = 512
imsize_w = 512
# imsize = 512 if use_cuda else 128
loader = transforms.Compose([
transforms.Scale((imsize_w, imsize_h)),
transforms.ToTensor()
])
def image_loader(image_name):
"""convert an image to a variable tensor"""
image = Image.open(image_name)
image = Variable(loader(image))
image = image.unsqueeze(0)
return image
# load image to variable tensor
output_img_path = 'images/output/'
style_img_path = 'images/style/'
content_img_path = 'images/content/'
style_img_name = 'the_shipwreck_of_the_minotaur.jpg'
content_img_name = 'Dipping-Sun.jpg'
style_img = image_loader(style_img_path + style_img_name).type(dtype)
content_img = image_loader(content_img_path + content_img_name).type(dtype)
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size, but style size is ({}), " \
"content size is *({})".format(style_img.size(), content_img.size())
# convert a tensor to a PILImage
unloader = transforms.ToPILImage()
def imshow(tensor, title=None):
"""show image"""
image = tensor.clone().cpu()
image = image.view(3, imsize_w, imsize_h)
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
class ContentLoss(nn.Module):
"""Content Loss"""
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.criterion = nn.MSELoss()
def forward(self, input):
self.loss = self.criterion(input * self.weight, self.target)
self.output = input
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target, weight):
super(StyleLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
self.gram = GramMatrix()
self.criterion = nn.MSELoss()
def forward(self, input):
self.output = input.clone()
self.G = self.gram(input)
self.G.mul_(self.weight)
self.loss = self.criterion(self.G, self.target)
return self.output
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
# load model
cnn = models.vgg19(pretrained=True).features
if use_cuda:
cnn = cnn.cuda()
content_layers_default = ['conv_4']
style_layers_dafault = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, style_img, content_img,
style_weight=1000, content_weight=1,
content_layers=content_layers_default,
style_layers=style_layers_dafault):
cnn = copy.deepcopy(cnn)
content_losses = []
style_losses = []
model = nn.Sequential()
gram = GramMatrix()
if use_cuda:
model = model.cuda()
gram = gram.cuda()
i = 1
for layer in list(cnn):
if isinstance(layer, nn.Conv2d):
name = 'conv_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
if isinstance(layer, nn.ReLU):
name = 'relu_' + str(i)
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module('content_loss' + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).clone()
target_feature_gram = gram(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module('style_loss' + str(i), style_loss)
style_losses.append(style_loss)
i += 1
if isinstance(layer, nn.MaxPool2d):
name = 'pool_' + str(i)
model.add_module(name, layer)
return model, style_losses, content_losses
# input_img = content_img.clone()
input_img = Variable(torch.randn(content_img.data.size())).type(dtype)
def get_input_param_optimizer(input_img):
input_param = nn.Parameter(input_img.data)
# optimizer = optim.LBFGS([input_param])
optimizer = optim.Adam([input_param])
return input_param, optimizer
def run_style_transfer(cnn, content_img, style_img, input_img, num_steps=500,
style_weight=1000, content_weight=1):
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
style_img, content_img, style_weight,
content_weight)
input_param, optimizer = get_input_param_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
input_param.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_param)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.backward()
for cl in content_losses:
content_score += cl.backward()
run[0] += 1
if run[0] % 50 == 0:
print('run {}:'.format(run))
print('Style Loss : {:4f} Content Loss : {:4f}'.format(
style_score.data[0], content_score.data[0]
))
print()
return style_score + content_score
optimizer.step(closure)
input_param.data.clamp_(0, 1)
return input_param.data
output = run_style_transfer(cnn, content_img, style_img, input_img)
def savefig(img_tensor, path='./images/output/', name=os.path.splitext(style_img_name)[0] + content_img_name):
img = img_tensor.cpu()
img = img.view(3, imsize_h, imsize_w)
img = unloader(img)
img.save(path + name)
savefig(output)
# model, style_losses, content_losses = get_style_model_and_losses(cnn, style_img, content_img)
# print(str(model)) | 0.932622 | 0.427576 |
from lenstronomywrapper.LensSystem.LensComponents.macromodel_base import ComponentBase
class NFWperturber(ComponentBase):
def __init__(self, redshift, kwargs_init=None, param_min=None, param_max=None, prior=[], convention_index=False):
self._prior = prior
self._kwargs_init = kwargs_init
self._redshift = redshift
self.reoptimize = False
self.x_center, self.y_center = kwargs_init[0]['center_x'], kwargs_init[0]['center_y']
if param_min is None:
self._logM_min, self._cmin, self._xmin, self._ymin = 1, 2, -10, -10
param_min = [{'logM': self._logM_min, 'concentration': self._cmin, 'center_x': self._xmin, 'center_y': self._ymin}]
if param_max is None:
self._logM_max, self._cmax, self._xmax, self._ymax = 12, 20, 10, 10
param_max = [{'logM': self._logM_max, 'concentration': self._cmax, 'center_x': self._xmax, 'center_y': self._ymax}]
self._param_min, self._param_max = param_min, param_max
super(NFWperturber, self).__init__(self.lens_model_list, [redshift]*self.n_models, self._kwargs_init,
convention_index, fixed=False)
@classmethod
def from_Mc(cls, redshift, logM, concentration, center_x, center_y, param_min=None, param_max=None, prior=[], convention_index=False):
kwargs_init = [{'logM': logM, 'concentration': concentration, 'center_x': center_x, 'center_y': center_y}]
powerlawshear = cls(redshift, kwargs_init, param_min, param_max, prior, convention_index)
return powerlawshear
@property
def priors(self):
indexes = []
priors = []
for prior in self._prior:
pname = prior[0]
if pname == 'gamma1' or pname == 'gamma2':
idx = 1
else:
idx = 0
indexes.append(idx)
priors.append(prior)
return indexes, priors
@property
def n_models(self):
return 1
@property
def param_init(self):
return self._kwargs_init
@property
def param_sigma(self):
return [{'logM': 2, 'concentration': 3, 'center_x': 0.5, 'center_y': 0.5}]
@property
def param_lower(self):
return self._param_min
@property
def param_upper(self):
return self._param_max
@property
def lens_model_list(self):
return ['NFW_MC']
@property
def redshift_list(self):
return [self._redshift] | lenstronomywrapper/LensSystem/LensComponents/NFWperturber.py | from lenstronomywrapper.LensSystem.LensComponents.macromodel_base import ComponentBase
class NFWperturber(ComponentBase):
def __init__(self, redshift, kwargs_init=None, param_min=None, param_max=None, prior=[], convention_index=False):
self._prior = prior
self._kwargs_init = kwargs_init
self._redshift = redshift
self.reoptimize = False
self.x_center, self.y_center = kwargs_init[0]['center_x'], kwargs_init[0]['center_y']
if param_min is None:
self._logM_min, self._cmin, self._xmin, self._ymin = 1, 2, -10, -10
param_min = [{'logM': self._logM_min, 'concentration': self._cmin, 'center_x': self._xmin, 'center_y': self._ymin}]
if param_max is None:
self._logM_max, self._cmax, self._xmax, self._ymax = 12, 20, 10, 10
param_max = [{'logM': self._logM_max, 'concentration': self._cmax, 'center_x': self._xmax, 'center_y': self._ymax}]
self._param_min, self._param_max = param_min, param_max
super(NFWperturber, self).__init__(self.lens_model_list, [redshift]*self.n_models, self._kwargs_init,
convention_index, fixed=False)
@classmethod
def from_Mc(cls, redshift, logM, concentration, center_x, center_y, param_min=None, param_max=None, prior=[], convention_index=False):
kwargs_init = [{'logM': logM, 'concentration': concentration, 'center_x': center_x, 'center_y': center_y}]
powerlawshear = cls(redshift, kwargs_init, param_min, param_max, prior, convention_index)
return powerlawshear
@property
def priors(self):
indexes = []
priors = []
for prior in self._prior:
pname = prior[0]
if pname == 'gamma1' or pname == 'gamma2':
idx = 1
else:
idx = 0
indexes.append(idx)
priors.append(prior)
return indexes, priors
@property
def n_models(self):
return 1
@property
def param_init(self):
return self._kwargs_init
@property
def param_sigma(self):
return [{'logM': 2, 'concentration': 3, 'center_x': 0.5, 'center_y': 0.5}]
@property
def param_lower(self):
return self._param_min
@property
def param_upper(self):
return self._param_max
@property
def lens_model_list(self):
return ['NFW_MC']
@property
def redshift_list(self):
return [self._redshift] | 0.785144 | 0.171027 |
face_normal_happy = ['00001001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_tongue = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000011000']
face_normal_wink = ['00000001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_sad = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00010000100']
face_normal_meh = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000000000']
face_normal_shock = ['00001001000',
'00001001000',
'00000000000',
'00000110000',
'00000110000']
face_chibbi_happy = ['00100001000',
'01010010100',
'00000000000',
'00100001000',
'00011110000']
face_chibbi_sad = ['00100001000',
'01010010100',
'00000000000',
'00011110000',
'00100001000']
face_chibbi_shock = ['01000000100',
'00100001000',
'01000000100',
'00001100000',
'00001100000']
face_chibbi_meh = ['01000000100',
'00100001000',
'01000000100',
'00000000000',
'00011110000']
face_chibbi_dead = ['10100001010',
'01000000100',
'10100001010',
'00000000000',
'00011110000']
face_chibbi_ugh = ['01010010100',
'01010010100',
'00100001000',
'00000000000',
'00011110000']
spriteMap = { 'face_normal_happy' : face_normal_happy,
'face_normal_tongue' : face_normal_tongue,
'face_normal_wink' : face_normal_wink,
'face_normal_sad' : face_normal_sad,
'face_normal_meh' : face_normal_meh,
'face_normal_shock' : face_normal_shock,
'face_chibbi_happy' : face_chibbi_happy,
'face_chibbi_sad' : face_chibbi_sad,
'face_chibbi_shock' : face_chibbi_shock,
'face_chibbi_meh' : face_chibbi_meh,
'face_chibbi_dead' : face_chibbi_dead,
'face_chibbi_ugh' : face_chibbi_ugh
} | sprite.py | face_normal_happy = ['00001001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_tongue = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000011000']
face_normal_wink = ['00000001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_sad = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00010000100']
face_normal_meh = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000000000']
face_normal_shock = ['00001001000',
'00001001000',
'00000000000',
'00000110000',
'00000110000']
face_chibbi_happy = ['00100001000',
'01010010100',
'00000000000',
'00100001000',
'00011110000']
face_chibbi_sad = ['00100001000',
'01010010100',
'00000000000',
'00011110000',
'00100001000']
face_chibbi_shock = ['01000000100',
'00100001000',
'01000000100',
'00001100000',
'00001100000']
face_chibbi_meh = ['01000000100',
'00100001000',
'01000000100',
'00000000000',
'00011110000']
face_chibbi_dead = ['10100001010',
'01000000100',
'10100001010',
'00000000000',
'00011110000']
face_chibbi_ugh = ['01010010100',
'01010010100',
'00100001000',
'00000000000',
'00011110000']
spriteMap = { 'face_normal_happy' : face_normal_happy,
'face_normal_tongue' : face_normal_tongue,
'face_normal_wink' : face_normal_wink,
'face_normal_sad' : face_normal_sad,
'face_normal_meh' : face_normal_meh,
'face_normal_shock' : face_normal_shock,
'face_chibbi_happy' : face_chibbi_happy,
'face_chibbi_sad' : face_chibbi_sad,
'face_chibbi_shock' : face_chibbi_shock,
'face_chibbi_meh' : face_chibbi_meh,
'face_chibbi_dead' : face_chibbi_dead,
'face_chibbi_ugh' : face_chibbi_ugh
} | 0.436622 | 0.086054 |
import math
import random
import sys
import numpy as np
import tensorflow as tf
from multiprocessing import Pool
class paramAdapter(object):
"""This object stores the variables required to implement an adaptive
step size and number of leapfrog steps as detailed in "Adaptive Hamiltonian
and Riemann Manifold Monte Carlo Samplers" by Wang, Mohamed, and
<NAME>. This method performs Bayesian inference on these paramaters
assuming a uniform prior between specified values. Over time, the
probability of a new state being proposed decreases so that the values will
converge to specific values.
In a slight divergence from the paper three features are included to
prevent the adapter from settling to early into an non-optimal position, to
compensate for the optimal position chaning drastically through trainining,
and to generally improve the suggested points. First, the adapter will
randomly propose for a certain number of steps at the beginning as set by
the randomSteps keyword argument. Secondly, if the adapter goes through a
set number of iterations specified with the strikes keyword argument and
the SJD is 0 every single time then the entire paramAdapter is reset to its
initial condition. It is quite possible that this will happen after the BNN
converges to a minimum and the maximum feasible step size is much smaller.
Finally, the adapter will scale the leapfrog steps and step size to the
range -1 to 1 in order for the 0 mean Gaussian priors used in the Bayeisan
inference to better fit the data.
In order to more rapidly search through the grid of possible step sizes
and leapfrog steps this object uses parallel processing so that all
available computing resources are used.
"""
def __init__(self, e1, L1, el, eu, eNumber, Ll, Lu, lStep, m, k, a=4,
delta=0.1, cores=4, strikes=10, randomSteps=10):
""" Creates a paramAdapter object.
Arguments:
* e1: starting step size
* L1: starting number of leapfrog steps
* el: lower step size bound
* eu: upper step size bound
* eNumber: number of step sizes in gride
* Ll: lower leapfrog bound
* Lu: upper leapfrog bound
* lStep: leapfrog step size in grid
* m: number of averaging steps
* k: iterations before proposal probability starts decreasing
* a: constant, 4 in paper
* delta: constant, 0.1 in paper
* cores: number of cores to use in processing
* strikes: iterations with no movement before reseting adapter
* randomSteps: averaging cycles at beginning with random values
"""
self.currentE = e1
self.currentL = L1
self.el = el
self.eu = eu
self.Ll = Ll
self.Lu = Lu
self.eNumber = eNumber
self.eGrid = np.linspace(el, eu, num=eNumber)
self.lGrid = np.array(range(Ll, Lu + 1, int(lStep)))
self.delta = delta
kappa = 0.2
self.sigma = np.diag(
[1 / ((kappa * (2))**2), 1 / ((kappa * (2))**2)])
self.previousGamma = []
self.allSD = []
self.k = k
self.K = None
self.m = m
self.currentData = []
self.allData = []
self.maxR = 1e-8
self.a = a
self.i = -2
self.previous_state = None
self.current_state = None
np.random.seed(10)
self.cores = cores
self.strikes = 0
self.maxStrikes = strikes
self.randomSteps = randomSteps
def calck(self, gammaI, gammaJ):
""" Calculates the covariance k between two states
Arguments:
* gammaI: state 1
* gammaJ: state 2
Returns:
* k: covaraiance between gammaI and gammaJ
"""
gamma1 = (-1+2*(gammaI[0]-self.el)/(self.eu-self.el),
-1+2*(gammaI[1]-self.Ll)/(self.Lu-self.Ll))
gamma2 = (-1+2*(gammaJ[0]-self.el)/(self.eu-self.el),
-1+2*(gammaJ[1]-self.Ll)/(self.Lu-self.Ll))
k = np.exp(-0.5 * (np.matmul(np.transpose(gamma1),
np.matmul(self.sigma, gamma2))))
return(k)
def calcUCB(self, testGamma):
""" Calculates a varraint of the upper confidence bound for a test
state.
Arguments:
* testGamma: the test state
* s: a scaling factor
* inverse: inverse of the covariance matrix
* inverseR: inverse of the covariance matrix time the data
* p: the decay value
* rootBeta: a constant based on the number of variables in the
state
Returns:
* ucb: upper confidence bound
"""
k = [None] * self.inverse.shape[0]
for gamma, index in zip(self.previousGamma,
range(len(self.previousGamma))):
k[index] = self.calck(gamma, testGamma)
mean = np.matmul(np.transpose(k), self.inverseR) * self.s
variance = np.matmul(self.inverse, k)
variance = np.matmul(np.transpose(k), variance)
variance = self.calck(testGamma, testGamma) - variance
ucb = mean + variance * self.p * self.rootbeta
return(ucb, mean, variance)
def reset(self):
"""Resets the adapter"""
self.previousGamma = []
self.allSD = []
self.K = None
self.currentData = []
self.allData = []
self.maxR = 1e-8
self.i = -2
self.previous_state = None
self.current_state = None
self.strikes = 0
def processChunk(self, eList, lList):
"""Processes a chunk of the e, L combinations.
Arguments:
* eList: list of step sizes to check
* lList: list of leapfrog steps to check
Returns:
* best: a tuple of the form ((best e, best L), ucb) where the e and
L selected are those with the highest ucb, which is also included
"""
best = ((eList[0], lList[0]), -1000)
for e in eList:
for L in lList:
ucb, mean, variance = self.calcUCB([e, L])
if(ucb > best[1]):
best = ((e, L), ucb, mean, variance)
return(best)
def update(self, state):
""" Steps the adapter forward by one step
Arguments:
* state: the newest state proposed by the HMC algorithm
Returns:
* currentE: the new step size
* currentL: the new number of leapfrog steps
"""
if(self.strikes == self.maxStrikes):
self.el = self.el/2
self.eu = self.eu/2
self.eGrid = np.linspace(self.el, self.eu, num=self.eNumber)
self.reset()
self.strikes = 0
self.previous_state, self.current_state = self.current_state, state
# Calculate the square jumping distance scaled by L^(-0.5)
if(self.previous_state is not None):
val = 0
for old, new in zip(self.previous_state, self.current_state):
val += tf.math.reduce_sum(np.square(tf.reshape(new,[-1]) - tf.reshape(old,[-1]))) / (self.currentL)**(0.5)
print("SJD:", val.numpy())
self.currentData.append(val)
if(val < 1e-8 and self.i // self.m > self.randomSteps):
self.strikes += 1
else:
self.strikes = 0
# Update E and L if this is not just an averaging step
if(self.i % self.m == 0 and self.i > 0):
u = np.random.uniform(low=0, high=1)
self.p = max(self.i / self.m - self.k + 1, 1)**(-0.5)
if(u < self.p): # Over time the probability of updating will decay
mean = np.mean(self.currentData)
sd = np.std(self.currentData)
self.currentData = []
self.allData.append(mean)
self.allSD.append(sd)
self.maxR = max(self.allData)
# Update the covariance matrix
self.previousGamma.append((self.currentE, self.currentL))
size = len(self.previousGamma)
newK = np.ones([size, size])
if(size > 0):
newK[:size - 1, :size - 1] = self.K
for gamma, index in zip(self.previousGamma, range(
len(self.previousGamma))):
k = self.calck(gamma, self.previousGamma[-1])
newK[-1, index] = k
newK[index, -1] = k
self.K = newK
self.s = self.a / self.maxR # update scalling constant
sigmaNu = np.mean(self.allSD) # Variance of noise
# calculate inverse and other values only once
try: # In case the covaraince matrix is singular
self.inverse = np.linalg.inv(
self.K + (sigmaNu**2) * np.eye(self.K.shape[0]))
except BaseException:
self.inverse = np.linalg.inv(
self.K + (sigmaNu**2) * np.eye(self.K.shape[0]) +
0.1 * np.eye(self.K.shape[0]))
self.inverseR = np.matmul(self.inverse, self.allData)
self.rootbeta = (self.i / self.m + 1)**(3) * math.pi**2
self.rootbeta /= (3 * self.delta)
self.rootbeta = np.log(self.rootbeta)*2
self.rootbeta = self.rootbeta**(0.5)
# Start parallel searches, take best result found
if(self.i//self.m >= self.randomSteps):
# Evenly split up search space between cores
increment = len(self.lGrid) // self.cores
eList = []
lList = []
for x in range(self.cores - 1):
temp = self.lGrid[x * increment:(x + 1) * increment]
eList.append(self.eGrid)
lList.append(temp)
temp = self.lGrid[(self.cores - 1) * increment:]
eList.append(self.eGrid)
lList.append(temp)
best = ((self.eGrid[0], self.lGrid[0]), -1000)
with Pool(processes=self.cores) as pool:
for i in pool.starmap(
self.processChunk, zip(
eList, lList)):
if(i[1] > best[1]):
best = (i[0], i[1])
# Pick the state with the highest upper confidence bound
self.currentE = np.float32(best[0][0])
self.currentL = np.int64(best[0][1])
else:
self.currentE = random.choice(self.eGrid)
self.currentL = random.choice(self.lGrid)
if(size==50):
self.K=self.K[1:,1:]
self.previousGamma=self.previousGamma[1:]
self.allData=self.allData[1:]
self.allSD=self.allSD[1:]
self.i += 1
return(self.currentE, self.currentL) | tensorBNN/paramAdapter.py | import math
import random
import sys
import numpy as np
import tensorflow as tf
from multiprocessing import Pool
class paramAdapter(object):
"""This object stores the variables required to implement an adaptive
step size and number of leapfrog steps as detailed in "Adaptive Hamiltonian
and Riemann Manifold Monte Carlo Samplers" by Wang, Mohamed, and
<NAME>. This method performs Bayesian inference on these paramaters
assuming a uniform prior between specified values. Over time, the
probability of a new state being proposed decreases so that the values will
converge to specific values.
In a slight divergence from the paper three features are included to
prevent the adapter from settling to early into an non-optimal position, to
compensate for the optimal position chaning drastically through trainining,
and to generally improve the suggested points. First, the adapter will
randomly propose for a certain number of steps at the beginning as set by
the randomSteps keyword argument. Secondly, if the adapter goes through a
set number of iterations specified with the strikes keyword argument and
the SJD is 0 every single time then the entire paramAdapter is reset to its
initial condition. It is quite possible that this will happen after the BNN
converges to a minimum and the maximum feasible step size is much smaller.
Finally, the adapter will scale the leapfrog steps and step size to the
range -1 to 1 in order for the 0 mean Gaussian priors used in the Bayeisan
inference to better fit the data.
In order to more rapidly search through the grid of possible step sizes
and leapfrog steps this object uses parallel processing so that all
available computing resources are used.
"""
def __init__(self, e1, L1, el, eu, eNumber, Ll, Lu, lStep, m, k, a=4,
delta=0.1, cores=4, strikes=10, randomSteps=10):
""" Creates a paramAdapter object.
Arguments:
* e1: starting step size
* L1: starting number of leapfrog steps
* el: lower step size bound
* eu: upper step size bound
* eNumber: number of step sizes in gride
* Ll: lower leapfrog bound
* Lu: upper leapfrog bound
* lStep: leapfrog step size in grid
* m: number of averaging steps
* k: iterations before proposal probability starts decreasing
* a: constant, 4 in paper
* delta: constant, 0.1 in paper
* cores: number of cores to use in processing
* strikes: iterations with no movement before reseting adapter
* randomSteps: averaging cycles at beginning with random values
"""
self.currentE = e1
self.currentL = L1
self.el = el
self.eu = eu
self.Ll = Ll
self.Lu = Lu
self.eNumber = eNumber
self.eGrid = np.linspace(el, eu, num=eNumber)
self.lGrid = np.array(range(Ll, Lu + 1, int(lStep)))
self.delta = delta
kappa = 0.2
self.sigma = np.diag(
[1 / ((kappa * (2))**2), 1 / ((kappa * (2))**2)])
self.previousGamma = []
self.allSD = []
self.k = k
self.K = None
self.m = m
self.currentData = []
self.allData = []
self.maxR = 1e-8
self.a = a
self.i = -2
self.previous_state = None
self.current_state = None
np.random.seed(10)
self.cores = cores
self.strikes = 0
self.maxStrikes = strikes
self.randomSteps = randomSteps
def calck(self, gammaI, gammaJ):
""" Calculates the covariance k between two states
Arguments:
* gammaI: state 1
* gammaJ: state 2
Returns:
* k: covaraiance between gammaI and gammaJ
"""
gamma1 = (-1+2*(gammaI[0]-self.el)/(self.eu-self.el),
-1+2*(gammaI[1]-self.Ll)/(self.Lu-self.Ll))
gamma2 = (-1+2*(gammaJ[0]-self.el)/(self.eu-self.el),
-1+2*(gammaJ[1]-self.Ll)/(self.Lu-self.Ll))
k = np.exp(-0.5 * (np.matmul(np.transpose(gamma1),
np.matmul(self.sigma, gamma2))))
return(k)
def calcUCB(self, testGamma):
""" Calculates a varraint of the upper confidence bound for a test
state.
Arguments:
* testGamma: the test state
* s: a scaling factor
* inverse: inverse of the covariance matrix
* inverseR: inverse of the covariance matrix time the data
* p: the decay value
* rootBeta: a constant based on the number of variables in the
state
Returns:
* ucb: upper confidence bound
"""
k = [None] * self.inverse.shape[0]
for gamma, index in zip(self.previousGamma,
range(len(self.previousGamma))):
k[index] = self.calck(gamma, testGamma)
mean = np.matmul(np.transpose(k), self.inverseR) * self.s
variance = np.matmul(self.inverse, k)
variance = np.matmul(np.transpose(k), variance)
variance = self.calck(testGamma, testGamma) - variance
ucb = mean + variance * self.p * self.rootbeta
return(ucb, mean, variance)
def reset(self):
"""Resets the adapter"""
self.previousGamma = []
self.allSD = []
self.K = None
self.currentData = []
self.allData = []
self.maxR = 1e-8
self.i = -2
self.previous_state = None
self.current_state = None
self.strikes = 0
def processChunk(self, eList, lList):
"""Processes a chunk of the e, L combinations.
Arguments:
* eList: list of step sizes to check
* lList: list of leapfrog steps to check
Returns:
* best: a tuple of the form ((best e, best L), ucb) where the e and
L selected are those with the highest ucb, which is also included
"""
best = ((eList[0], lList[0]), -1000)
for e in eList:
for L in lList:
ucb, mean, variance = self.calcUCB([e, L])
if(ucb > best[1]):
best = ((e, L), ucb, mean, variance)
return(best)
def update(self, state):
""" Steps the adapter forward by one step
Arguments:
* state: the newest state proposed by the HMC algorithm
Returns:
* currentE: the new step size
* currentL: the new number of leapfrog steps
"""
if(self.strikes == self.maxStrikes):
self.el = self.el/2
self.eu = self.eu/2
self.eGrid = np.linspace(self.el, self.eu, num=self.eNumber)
self.reset()
self.strikes = 0
self.previous_state, self.current_state = self.current_state, state
# Calculate the square jumping distance scaled by L^(-0.5)
if(self.previous_state is not None):
val = 0
for old, new in zip(self.previous_state, self.current_state):
val += tf.math.reduce_sum(np.square(tf.reshape(new,[-1]) - tf.reshape(old,[-1]))) / (self.currentL)**(0.5)
print("SJD:", val.numpy())
self.currentData.append(val)
if(val < 1e-8 and self.i // self.m > self.randomSteps):
self.strikes += 1
else:
self.strikes = 0
# Update E and L if this is not just an averaging step
if(self.i % self.m == 0 and self.i > 0):
u = np.random.uniform(low=0, high=1)
self.p = max(self.i / self.m - self.k + 1, 1)**(-0.5)
if(u < self.p): # Over time the probability of updating will decay
mean = np.mean(self.currentData)
sd = np.std(self.currentData)
self.currentData = []
self.allData.append(mean)
self.allSD.append(sd)
self.maxR = max(self.allData)
# Update the covariance matrix
self.previousGamma.append((self.currentE, self.currentL))
size = len(self.previousGamma)
newK = np.ones([size, size])
if(size > 0):
newK[:size - 1, :size - 1] = self.K
for gamma, index in zip(self.previousGamma, range(
len(self.previousGamma))):
k = self.calck(gamma, self.previousGamma[-1])
newK[-1, index] = k
newK[index, -1] = k
self.K = newK
self.s = self.a / self.maxR # update scalling constant
sigmaNu = np.mean(self.allSD) # Variance of noise
# calculate inverse and other values only once
try: # In case the covaraince matrix is singular
self.inverse = np.linalg.inv(
self.K + (sigmaNu**2) * np.eye(self.K.shape[0]))
except BaseException:
self.inverse = np.linalg.inv(
self.K + (sigmaNu**2) * np.eye(self.K.shape[0]) +
0.1 * np.eye(self.K.shape[0]))
self.inverseR = np.matmul(self.inverse, self.allData)
self.rootbeta = (self.i / self.m + 1)**(3) * math.pi**2
self.rootbeta /= (3 * self.delta)
self.rootbeta = np.log(self.rootbeta)*2
self.rootbeta = self.rootbeta**(0.5)
# Start parallel searches, take best result found
if(self.i//self.m >= self.randomSteps):
# Evenly split up search space between cores
increment = len(self.lGrid) // self.cores
eList = []
lList = []
for x in range(self.cores - 1):
temp = self.lGrid[x * increment:(x + 1) * increment]
eList.append(self.eGrid)
lList.append(temp)
temp = self.lGrid[(self.cores - 1) * increment:]
eList.append(self.eGrid)
lList.append(temp)
best = ((self.eGrid[0], self.lGrid[0]), -1000)
with Pool(processes=self.cores) as pool:
for i in pool.starmap(
self.processChunk, zip(
eList, lList)):
if(i[1] > best[1]):
best = (i[0], i[1])
# Pick the state with the highest upper confidence bound
self.currentE = np.float32(best[0][0])
self.currentL = np.int64(best[0][1])
else:
self.currentE = random.choice(self.eGrid)
self.currentL = random.choice(self.lGrid)
if(size==50):
self.K=self.K[1:,1:]
self.previousGamma=self.previousGamma[1:]
self.allData=self.allData[1:]
self.allSD=self.allSD[1:]
self.i += 1
return(self.currentE, self.currentL) | 0.741955 | 0.748605 |
import os
import tushare as ts
import pandas as pd
from datetime import datetime
from datetime import timedelta
from log import log
from scipy.stats import norm
import numpy as np
import math
import json
PREFIX = 'data'
def __estimation_formula_bg_dynamic(growth, eps, pe):
''' BG formula, integrate with the normal pe (based on Gaussian Distribution)
original: (2*growth+8.5)*eps
'''
return (2*growth+pe)*eps
def __estimation_formula_pb(bvps, pb):
''' normal pb (based on Gaussian Distribution)
original: bvps*pb
'''
return bvps*pb
def __pd_read_basics():
''' pd.read_csv, for basics
'''
return pd.read_csv(PREFIX+'/'+'basics.csv', dtype={'code': object}).drop_duplicates()
def __pd_read_report(q_stat):
''' pd.read_csv, for report
args: q_stat(e.g. '2015q4.profit' or '2016q4')
'''
return pd.read_csv(PREFIX+'/'+q_stat+'.csv', dtype={'code': object}).sort_values(by='code').drop_duplicates()
def __pd_read_today_all():
''' pd.read_csv, for today
'''
return pd.read_csv(PREFIX+'/'+'today_all.csv', dtype={'code': object}).drop_duplicates()
def __quarter_to_date(quarter):
y = quarter.split('q')[0]
q = quarter.split('q')[1]
tail = ['-03-31', '-03-31', '-06-30', '-09-30', '-12-31']
end = y + tail[int(q)]
start = str(int(y)-1) + tail[int(q)]
return start, end
def __get_pe_and_eps(code, quarter):
''' get pe of specific quarter
args: code, quarter(e.g. 2015q3)
'''
r = {}
np = 0
y = quarter.split('q')[0]
q = quarter.split('q')[1]
q_str = 'code==' + '\"' + code + '\"'
b = __pd_read_basics()
totals = b.query(q_str).totals.values[0]
log.debug('totals: %.2f', totals)
r[quarter] = __pd_read_report(quarter)
if (q == '4'):
if (len(r[quarter].query(q_str)) > 0):
np = r[quarter].query(q_str).net_profits.values[0]
else:
log.warn('no entry in %s (net_profits)', quarter)
return False, False
else:
last_q4 = str(int(y)-1)+'q4'
last_q = str(int(y)-1)+'q'+q
r[last_q4] = __pd_read_report(last_q4)
r[last_q] = __pd_read_report(last_q)
if ((len(r[quarter].query(q_str)) > 0) & (len(r[last_q4].query(q_str)) > 0) & (len(r[last_q].query(q_str)) > 0)):
np = r[last_q4].query(q_str).net_profits.values[0] - r[last_q].query(q_str).net_profits.values[0] + r[quarter].query(q_str).net_profits.values[0]
else:
if (len(r[quarter].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', quarter)
if (len(r[last_q4].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', last_q4)
if (len(r[last_q].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', last_q)
return False, False
eps = np/totals/10000.0
s, e = __quarter_to_date(quarter)
k = ts.get_k_data(code, ktype='M', start=s, end=e)
if (len(k) == 0):
log.warn('no k data entry in %s', quarter)
return False, False
pps = k.loc[k.last_valid_index()].close
log.debug('%s, price: %.2f', e, pps)
log.debug('np: %.2f', np)
log.debug('eps: %.2f', eps)
pe = round(pps/eps, 2)
log.debug('pe: %.2f', pe)
return pe, eps
def __get_growth(code, years):
g = []
qs = [4, 3, 2, 1]
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in qs:
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.growth.csv')):
rg = __pd_read_report(quarter+'.growth')
q_str = 'code==' + '\"' + code + '\"'
if (len(rg.query(q_str)) > 0):
tmp_g = round(rg.query(q_str).nprg.values[0], 2)
if (math.isnan(tmp_g)):
tmp_g = 0
g.append(tmp_g)
log.debug('growth@%s: %.2f%%', quarter, tmp_g)
break
growth = round(np.mean(g)/100.0, 2)
log.info('growth: %.2f %d~%d %s', growth, datetime.now().year - years, datetime.now().year, str(g))
return growth
def __get_eps(code):
''' Deprecated! This eps is not a full fiscal year data!
'''
b = __pd_read_basics()
q_str = 'code==' + '\"' + code + '\"'
eps = b.query(q_str).esp.values[0]
log.info('eps: %.2f', eps)
return eps
def __get_k_data_of_last_trade_day(code):
d = datetime.now()
k = None
while True:
k = ts.get_k_data(code, ktype='M', start=d.strftime("%Y-%m-%d"), end=d.strftime("%Y-%m-%d"))
if (len(k) > 0):
break
else:
d = d + timedelta(days = -1)
return k, d
def __get_est_price_mode_pe(realtime, code, years):
q_str = 'code==' + '\"' + code + '\"'
pe_obj = {}
eps = 0
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in range(1, 5):
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.csv')):
r = __pd_read_report(quarter)
if (len(r.query(q_str)) > 0):
# save all pe history and latest eps
tmp_pe, tmp_eps = __get_pe_and_eps(code, quarter)
if (isinstance(tmp_pe, float) & isinstance(tmp_eps, float)):
pe_obj[quarter] = tmp_pe
eps = tmp_eps
log.debug('%s pe: %.2f, eps: %.2f', quarter, pe_obj[quarter], eps)
else:
log.warn('skip %s', quarter)
continue
#sorted(pe_obj)
#log.debug(pe_obj)
arr = pe_obj.values()
mu, std = norm.fit(arr)
if (realtime):
d = datetime.now()
today = __pd_read_today_all()
close = round(today.query(q_str).trade.values[0], 2)
else:
k, d = __get_k_data_of_last_trade_day(code)
close = round(k.close.values[0], 2)
log.info('%s price: %.2f @ pe %.2f, eps %.2f', d.strftime("%Y-%m-%d"), close, close/eps, eps)
log.info('mu, std: %.2f, %.2f', mu, std)
growth = __get_growth(code, years)
left = __estimation_formula_bg_dynamic(growth, eps, mu - std)
centrum = __estimation_formula_bg_dynamic(growth, eps, mu)
right = __estimation_formula_bg_dynamic(growth, eps, mu + std)
value = __estimation_formula_bg_dynamic(growth, eps, 8.5)
log.info('est dynamic: %.2f~%.2f~%.2f', left, centrum, right)
log.info('est value: %.2f', value)
log.info('range from left: %.2f%%', (close-left)/left*100.0)
log.info('position: %.2f%%', (close-left)/(right-left)*100.0)
return left, centrum, right, value
def __get_pb_and_bvps(code, quarter):
''' get pb of spbcific quarter
args: code, quarter(e.g. 2015q3)
'''
r = {}
bvps = 0
y = quarter.split('q')[0]
q = quarter.split('q')[1]
q_str = 'code==' + '\"' + code + '\"'
r[quarter] = __pd_read_report(quarter)
if (len(r[quarter].query(q_str)) > 0):
bvps = r[quarter].query(q_str).bvps.values[0]
else:
log.warn('no entry in %s (bvps)', quarter)
return False, False
s, e = __quarter_to_date(quarter)
k = ts.get_k_data(code, ktype='M', start=s, end=e)
if (len(k) == 0):
log.warn('no k data entry in %s', quarter)
return False, False
pps = k.loc[k.last_valid_index()].close
log.debug('%s, price: %.2f', e, pps)
log.debug('bvps: %.2f', bvps)
pb = round(pps/bvps, 2)
log.debug('pb: %.2f', pb)
return pb, bvps
def __get_est_price_mode_pb(realtime, code, years):
q_str = 'code==' + '\"' + code + '\"'
pb_obj = {}
bvps = 0
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in range(1, 5):
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.csv')):
r = __pd_read_report(quarter)
if (len(r.query(q_str)) > 0):
# save all pb history and latest bvps
tmp_pb, tmp_bvps = __get_pb_and_bvps(code, quarter)
if (isinstance(tmp_pb, float) & isinstance(tmp_bvps, float)):
pb_obj[quarter] = tmp_pb
bvps = tmp_bvps
log.debug('%s pb: %.2f, bvps: %.2f', quarter, pb_obj[quarter], bvps)
else:
log.warn('skip %s', quarter)
continue
#sorted(pb_obj)
#log.debug(pb_obj)
arr = pb_obj.values()
mu, std = norm.fit(arr)
if (realtime):
d = datetime.now()
today = __pd_read_today_all()
close = round(today.query(q_str).trade.values[0], 2)
else:
k, d = __get_k_data_of_last_trade_day(code)
close = round(k.close.values[0], 2)
log.info('%s price: %.2f @ pb %.2f, bvps %.2f', d.strftime("%Y-%m-%d"), close, close/bvps, bvps)
log.info('mu, std: %.2f, %.2f', mu, std)
left = __estimation_formula_pb(bvps, mu - std)
centrum = __estimation_formula_pb(bvps, mu)
right = __estimation_formula_pb(bvps, mu + std)
value = __estimation_formula_pb(bvps, 1.0)
log.info('est dynamic: %.2f~%.2f~%.2f', left, centrum, right)
log.info('est value: %.2f', value)
log.info('range from left: %.2f%%', (close-left)/left*100.0)
log.info('position: %.2f%%', (close-left)/(right-left)*100.0)
return left, centrum, right, value
def get_name_by_code(code):
q_str = 'code==' + '\"' + code + '\"'
# FIXME should use the latest report file
df = pd.read_csv('data/2015q4.csv', dtype={'code': object}).sort_values(by='code').drop_duplicates()
df_code = df.query(q_str)
if (len(df_code) > 0):
return df_code.name.values[0]
else:
return None
def get_est_price(realtime, mode, years, code):
''' return left, centrum, right price, to form a range
'''
if (mode == 'pe'):
return __get_est_price_mode_pe(realtime, code, years)
elif (mode == 'pb'):
return __get_est_price_mode_pb(realtime, code, years)
else:
return 0, 0, 0
def get_stock_basics():
''' invoke tushare get_stock_basics() with csv output
args:
returns: csv format data containing the whole martket information
json fomat, df.to_json('basics.json', orient='index')
'''
filename = PREFIX + '/' + 'basics.csv'
df = ts.get_stock_basics()
df.sort_index(inplace=True)
return df.to_csv(filename, encoding='UTF-8')
def save_to_file(filename, df):
''' save df content to file
args: filename, df
returns: df
'''
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None, encoding='UTF-8', index=False)
else:
df.to_csv(filename, encoding='UTF-8', index=False)
df = pd.read_csv(filename, dtype={'code': object}).sort_values(by='code').drop_duplicates()
return df.to_csv(filename, encoding='UTF-8', index=False)
def get_report_data(year, quarter):
''' invoke tushare get_report_data() with csv output
brief: to improve data integrality, we repeatedly do these actions in a row,
call API -> append to file -> drop duplicates
args: year, quarter
returns: csv format data containing the whole martket report in specific year, quarter
json fomat, df.to_json(year+'q'+quarter+'.json', orient='index')
'''
# profit
print "[%s] profit %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.profit.csv'
df = ts.get_profit_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# operation
print "[%s] operation %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.operation.csv'
df = ts.get_operation_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# growth
print "[%s] growth %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.growth.csv'
df = ts.get_growth_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# debtpaying
print "[%s] debtpaying %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.debtpaying.csv'
df = ts.get_debtpaying_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# cashflow
print "[%s] cashflow %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.cashflow.csv'
df = ts.get_cashflow_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# main report
print "[%s] main %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.csv'
df = ts.get_report_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
return save_to_file(filename, df)
def get_today_all():
print "[%s] get_today_all" %(datetime.now().strftime("%H:%M:%S.%f"))
df = ts.get_today_all()
filename = PREFIX + '/' + 'today_all.csv'
os.remove(filename)
return save_to_file(filename, df)
def eval_cashcow(s):
cf_nm_arr = []
cashflowratio_arr = []
years = []
for y in range(datetime.now().year - 1, datetime.now().year - 10, -1):
cf = pd.read_csv(PREFIX + '/' + str(y) + 'q4.cashflow.csv')
q_str = 'code==' + '\"' + s + '\"'
res = cf.query(q_str)
if len(res) is 0:
log.info('till ' + str(datetime.now().year - 1) + ' since ' + str(y))
break
else:
cf_nm_arr.insert(0, res.cf_nm.values[0])
cashflowratio_arr.insert(0, res.cashflowratio.values[0])
years.insert(0, y)
log.info('cf_nm:')
log.info(cf_nm_arr)
log.info('mean: %f', np.mean(cf_nm_arr))
log.info('std: %f', np.std(cf_nm_arr))
z = np.polyfit(years, cf_nm_arr, 1)
p = np.poly1d(z)
# p[1]*x + p[0]
log.info('fit: %s', str(p).split('\n')[1])
log.info('cashflowratio:')
log.info(cashflowratio_arr)
log.info('mean: %f', np.mean(cashflowratio_arr))
log.info('std: %f', np.std(cashflowratio_arr))
z = np.polyfit(years, cashflowratio_arr, 1)
p = np.poly1d(z)
# p[1]*x + p[0]
log.info('fit: %s', str(p).split('\n')[1])
return years, cf_nm_arr, cashflowratio_arr
def find_cashcow():
securities = []
cows = []
cf_nm_arr_of_sec = {}
mean_of_sec = {}
std_of_sec = {}
log.info('start filtering...')
for y in range(datetime.now().year - 1, datetime.now().year - 20, -1):
try:
cf = pd.read_csv(PREFIX + '/' + str(y) + 'q4.cashflow.csv')
except Exception as e:
break
if len(securities) is 0:
#securities = cf.code.head(5).values.tolist()
securities = cf.code.values.tolist()
for s in securities:
#log.info(s)
q_str = 'code==' + str(s)
res = cf.query(q_str)
if len(res) is 0:
continue
else:
try:
cf_nm_arr_of_sec[str(s)].insert(0, res.cf_nm.values[0])
except Exception as e:
cf_nm_arr_of_sec[str(s)] = []
for s in securities:
cow = {}
#log.info('%06d', s)
c = cf_nm_arr_of_sec[str(s)]
#log.info(c)
#log.info('mean: %f', np.mean(c))
#log.info('std: %f', np.std(c))
if len(c) == 0:
continue
mean = np.mean(c)
std = np.std(c)
if mean > 1.2 and std < 0.4 and std > 0:
cow['name'] = get_name_by_code(str(s).zfill(6))
cow['code'] = str(s).zfill(6)
cow['mean'] = round(mean, 3)
cow['std'] = round(std, 3)
cows.append(cow)
cows = json.dumps(cows, ensure_ascii = False, sort_keys = True, indent = 4)
log.info(cows)
return cows
#mean_of_sec[str(s)] = np.mean(cf_nm_arr_of_sec[str(s)])
#std_of_sec[str(s)] = np.std(cf_nm_arr_of_sec[str(s)])
'''
for s in securities:
if mean_of_sec[str(s)] > 1 and std_of_sec[str(s)] < 0.6:
cows.append(s)
log.info(get_name_by_code(s) + '(%08d) %s, mean %f, std %f', s, cf_nm_arr_of_sec[str(s)], mean_of_sec[str(s)], std_of_sec[str(s)])
''' | libs/worker.py | import os
import tushare as ts
import pandas as pd
from datetime import datetime
from datetime import timedelta
from log import log
from scipy.stats import norm
import numpy as np
import math
import json
PREFIX = 'data'
def __estimation_formula_bg_dynamic(growth, eps, pe):
''' BG formula, integrate with the normal pe (based on Gaussian Distribution)
original: (2*growth+8.5)*eps
'''
return (2*growth+pe)*eps
def __estimation_formula_pb(bvps, pb):
''' normal pb (based on Gaussian Distribution)
original: bvps*pb
'''
return bvps*pb
def __pd_read_basics():
''' pd.read_csv, for basics
'''
return pd.read_csv(PREFIX+'/'+'basics.csv', dtype={'code': object}).drop_duplicates()
def __pd_read_report(q_stat):
''' pd.read_csv, for report
args: q_stat(e.g. '2015q4.profit' or '2016q4')
'''
return pd.read_csv(PREFIX+'/'+q_stat+'.csv', dtype={'code': object}).sort_values(by='code').drop_duplicates()
def __pd_read_today_all():
''' pd.read_csv, for today
'''
return pd.read_csv(PREFIX+'/'+'today_all.csv', dtype={'code': object}).drop_duplicates()
def __quarter_to_date(quarter):
y = quarter.split('q')[0]
q = quarter.split('q')[1]
tail = ['-03-31', '-03-31', '-06-30', '-09-30', '-12-31']
end = y + tail[int(q)]
start = str(int(y)-1) + tail[int(q)]
return start, end
def __get_pe_and_eps(code, quarter):
''' get pe of specific quarter
args: code, quarter(e.g. 2015q3)
'''
r = {}
np = 0
y = quarter.split('q')[0]
q = quarter.split('q')[1]
q_str = 'code==' + '\"' + code + '\"'
b = __pd_read_basics()
totals = b.query(q_str).totals.values[0]
log.debug('totals: %.2f', totals)
r[quarter] = __pd_read_report(quarter)
if (q == '4'):
if (len(r[quarter].query(q_str)) > 0):
np = r[quarter].query(q_str).net_profits.values[0]
else:
log.warn('no entry in %s (net_profits)', quarter)
return False, False
else:
last_q4 = str(int(y)-1)+'q4'
last_q = str(int(y)-1)+'q'+q
r[last_q4] = __pd_read_report(last_q4)
r[last_q] = __pd_read_report(last_q)
if ((len(r[quarter].query(q_str)) > 0) & (len(r[last_q4].query(q_str)) > 0) & (len(r[last_q].query(q_str)) > 0)):
np = r[last_q4].query(q_str).net_profits.values[0] - r[last_q].query(q_str).net_profits.values[0] + r[quarter].query(q_str).net_profits.values[0]
else:
if (len(r[quarter].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', quarter)
if (len(r[last_q4].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', last_q4)
if (len(r[last_q].query(q_str)) <= 0):
log.warn('no entry in %s (net_profits)', last_q)
return False, False
eps = np/totals/10000.0
s, e = __quarter_to_date(quarter)
k = ts.get_k_data(code, ktype='M', start=s, end=e)
if (len(k) == 0):
log.warn('no k data entry in %s', quarter)
return False, False
pps = k.loc[k.last_valid_index()].close
log.debug('%s, price: %.2f', e, pps)
log.debug('np: %.2f', np)
log.debug('eps: %.2f', eps)
pe = round(pps/eps, 2)
log.debug('pe: %.2f', pe)
return pe, eps
def __get_growth(code, years):
g = []
qs = [4, 3, 2, 1]
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in qs:
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.growth.csv')):
rg = __pd_read_report(quarter+'.growth')
q_str = 'code==' + '\"' + code + '\"'
if (len(rg.query(q_str)) > 0):
tmp_g = round(rg.query(q_str).nprg.values[0], 2)
if (math.isnan(tmp_g)):
tmp_g = 0
g.append(tmp_g)
log.debug('growth@%s: %.2f%%', quarter, tmp_g)
break
growth = round(np.mean(g)/100.0, 2)
log.info('growth: %.2f %d~%d %s', growth, datetime.now().year - years, datetime.now().year, str(g))
return growth
def __get_eps(code):
''' Deprecated! This eps is not a full fiscal year data!
'''
b = __pd_read_basics()
q_str = 'code==' + '\"' + code + '\"'
eps = b.query(q_str).esp.values[0]
log.info('eps: %.2f', eps)
return eps
def __get_k_data_of_last_trade_day(code):
d = datetime.now()
k = None
while True:
k = ts.get_k_data(code, ktype='M', start=d.strftime("%Y-%m-%d"), end=d.strftime("%Y-%m-%d"))
if (len(k) > 0):
break
else:
d = d + timedelta(days = -1)
return k, d
def __get_est_price_mode_pe(realtime, code, years):
q_str = 'code==' + '\"' + code + '\"'
pe_obj = {}
eps = 0
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in range(1, 5):
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.csv')):
r = __pd_read_report(quarter)
if (len(r.query(q_str)) > 0):
# save all pe history and latest eps
tmp_pe, tmp_eps = __get_pe_and_eps(code, quarter)
if (isinstance(tmp_pe, float) & isinstance(tmp_eps, float)):
pe_obj[quarter] = tmp_pe
eps = tmp_eps
log.debug('%s pe: %.2f, eps: %.2f', quarter, pe_obj[quarter], eps)
else:
log.warn('skip %s', quarter)
continue
#sorted(pe_obj)
#log.debug(pe_obj)
arr = pe_obj.values()
mu, std = norm.fit(arr)
if (realtime):
d = datetime.now()
today = __pd_read_today_all()
close = round(today.query(q_str).trade.values[0], 2)
else:
k, d = __get_k_data_of_last_trade_day(code)
close = round(k.close.values[0], 2)
log.info('%s price: %.2f @ pe %.2f, eps %.2f', d.strftime("%Y-%m-%d"), close, close/eps, eps)
log.info('mu, std: %.2f, %.2f', mu, std)
growth = __get_growth(code, years)
left = __estimation_formula_bg_dynamic(growth, eps, mu - std)
centrum = __estimation_formula_bg_dynamic(growth, eps, mu)
right = __estimation_formula_bg_dynamic(growth, eps, mu + std)
value = __estimation_formula_bg_dynamic(growth, eps, 8.5)
log.info('est dynamic: %.2f~%.2f~%.2f', left, centrum, right)
log.info('est value: %.2f', value)
log.info('range from left: %.2f%%', (close-left)/left*100.0)
log.info('position: %.2f%%', (close-left)/(right-left)*100.0)
return left, centrum, right, value
def __get_pb_and_bvps(code, quarter):
''' get pb of spbcific quarter
args: code, quarter(e.g. 2015q3)
'''
r = {}
bvps = 0
y = quarter.split('q')[0]
q = quarter.split('q')[1]
q_str = 'code==' + '\"' + code + '\"'
r[quarter] = __pd_read_report(quarter)
if (len(r[quarter].query(q_str)) > 0):
bvps = r[quarter].query(q_str).bvps.values[0]
else:
log.warn('no entry in %s (bvps)', quarter)
return False, False
s, e = __quarter_to_date(quarter)
k = ts.get_k_data(code, ktype='M', start=s, end=e)
if (len(k) == 0):
log.warn('no k data entry in %s', quarter)
return False, False
pps = k.loc[k.last_valid_index()].close
log.debug('%s, price: %.2f', e, pps)
log.debug('bvps: %.2f', bvps)
pb = round(pps/bvps, 2)
log.debug('pb: %.2f', pb)
return pb, bvps
def __get_est_price_mode_pb(realtime, code, years):
q_str = 'code==' + '\"' + code + '\"'
pb_obj = {}
bvps = 0
for y in range(datetime.now().year - years, datetime.now().year + 1):
for q in range(1, 5):
quarter = str(y)+'q'+str(q)
if (os.path.exists(PREFIX+'/'+quarter+'.csv')):
r = __pd_read_report(quarter)
if (len(r.query(q_str)) > 0):
# save all pb history and latest bvps
tmp_pb, tmp_bvps = __get_pb_and_bvps(code, quarter)
if (isinstance(tmp_pb, float) & isinstance(tmp_bvps, float)):
pb_obj[quarter] = tmp_pb
bvps = tmp_bvps
log.debug('%s pb: %.2f, bvps: %.2f', quarter, pb_obj[quarter], bvps)
else:
log.warn('skip %s', quarter)
continue
#sorted(pb_obj)
#log.debug(pb_obj)
arr = pb_obj.values()
mu, std = norm.fit(arr)
if (realtime):
d = datetime.now()
today = __pd_read_today_all()
close = round(today.query(q_str).trade.values[0], 2)
else:
k, d = __get_k_data_of_last_trade_day(code)
close = round(k.close.values[0], 2)
log.info('%s price: %.2f @ pb %.2f, bvps %.2f', d.strftime("%Y-%m-%d"), close, close/bvps, bvps)
log.info('mu, std: %.2f, %.2f', mu, std)
left = __estimation_formula_pb(bvps, mu - std)
centrum = __estimation_formula_pb(bvps, mu)
right = __estimation_formula_pb(bvps, mu + std)
value = __estimation_formula_pb(bvps, 1.0)
log.info('est dynamic: %.2f~%.2f~%.2f', left, centrum, right)
log.info('est value: %.2f', value)
log.info('range from left: %.2f%%', (close-left)/left*100.0)
log.info('position: %.2f%%', (close-left)/(right-left)*100.0)
return left, centrum, right, value
def get_name_by_code(code):
q_str = 'code==' + '\"' + code + '\"'
# FIXME should use the latest report file
df = pd.read_csv('data/2015q4.csv', dtype={'code': object}).sort_values(by='code').drop_duplicates()
df_code = df.query(q_str)
if (len(df_code) > 0):
return df_code.name.values[0]
else:
return None
def get_est_price(realtime, mode, years, code):
''' return left, centrum, right price, to form a range
'''
if (mode == 'pe'):
return __get_est_price_mode_pe(realtime, code, years)
elif (mode == 'pb'):
return __get_est_price_mode_pb(realtime, code, years)
else:
return 0, 0, 0
def get_stock_basics():
''' invoke tushare get_stock_basics() with csv output
args:
returns: csv format data containing the whole martket information
json fomat, df.to_json('basics.json', orient='index')
'''
filename = PREFIX + '/' + 'basics.csv'
df = ts.get_stock_basics()
df.sort_index(inplace=True)
return df.to_csv(filename, encoding='UTF-8')
def save_to_file(filename, df):
''' save df content to file
args: filename, df
returns: df
'''
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None, encoding='UTF-8', index=False)
else:
df.to_csv(filename, encoding='UTF-8', index=False)
df = pd.read_csv(filename, dtype={'code': object}).sort_values(by='code').drop_duplicates()
return df.to_csv(filename, encoding='UTF-8', index=False)
def get_report_data(year, quarter):
''' invoke tushare get_report_data() with csv output
brief: to improve data integrality, we repeatedly do these actions in a row,
call API -> append to file -> drop duplicates
args: year, quarter
returns: csv format data containing the whole martket report in specific year, quarter
json fomat, df.to_json(year+'q'+quarter+'.json', orient='index')
'''
# profit
print "[%s] profit %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.profit.csv'
df = ts.get_profit_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# operation
print "[%s] operation %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.operation.csv'
df = ts.get_operation_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# growth
print "[%s] growth %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.growth.csv'
df = ts.get_growth_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# debtpaying
print "[%s] debtpaying %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.debtpaying.csv'
df = ts.get_debtpaying_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# cashflow
print "[%s] cashflow %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.cashflow.csv'
df = ts.get_cashflow_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
save_to_file(filename, df)
# main report
print "[%s] main %sq%s" %(datetime.now().strftime("%H:%M:%S.%f"), year, quarter)
filename = PREFIX + '/' + year + 'q' + quarter + '.csv'
df = ts.get_report_data(int(year), int(quarter)).sort_values(by='code').drop_duplicates()
print "\n"
return save_to_file(filename, df)
def get_today_all():
print "[%s] get_today_all" %(datetime.now().strftime("%H:%M:%S.%f"))
df = ts.get_today_all()
filename = PREFIX + '/' + 'today_all.csv'
os.remove(filename)
return save_to_file(filename, df)
def eval_cashcow(s):
cf_nm_arr = []
cashflowratio_arr = []
years = []
for y in range(datetime.now().year - 1, datetime.now().year - 10, -1):
cf = pd.read_csv(PREFIX + '/' + str(y) + 'q4.cashflow.csv')
q_str = 'code==' + '\"' + s + '\"'
res = cf.query(q_str)
if len(res) is 0:
log.info('till ' + str(datetime.now().year - 1) + ' since ' + str(y))
break
else:
cf_nm_arr.insert(0, res.cf_nm.values[0])
cashflowratio_arr.insert(0, res.cashflowratio.values[0])
years.insert(0, y)
log.info('cf_nm:')
log.info(cf_nm_arr)
log.info('mean: %f', np.mean(cf_nm_arr))
log.info('std: %f', np.std(cf_nm_arr))
z = np.polyfit(years, cf_nm_arr, 1)
p = np.poly1d(z)
# p[1]*x + p[0]
log.info('fit: %s', str(p).split('\n')[1])
log.info('cashflowratio:')
log.info(cashflowratio_arr)
log.info('mean: %f', np.mean(cashflowratio_arr))
log.info('std: %f', np.std(cashflowratio_arr))
z = np.polyfit(years, cashflowratio_arr, 1)
p = np.poly1d(z)
# p[1]*x + p[0]
log.info('fit: %s', str(p).split('\n')[1])
return years, cf_nm_arr, cashflowratio_arr
def find_cashcow():
securities = []
cows = []
cf_nm_arr_of_sec = {}
mean_of_sec = {}
std_of_sec = {}
log.info('start filtering...')
for y in range(datetime.now().year - 1, datetime.now().year - 20, -1):
try:
cf = pd.read_csv(PREFIX + '/' + str(y) + 'q4.cashflow.csv')
except Exception as e:
break
if len(securities) is 0:
#securities = cf.code.head(5).values.tolist()
securities = cf.code.values.tolist()
for s in securities:
#log.info(s)
q_str = 'code==' + str(s)
res = cf.query(q_str)
if len(res) is 0:
continue
else:
try:
cf_nm_arr_of_sec[str(s)].insert(0, res.cf_nm.values[0])
except Exception as e:
cf_nm_arr_of_sec[str(s)] = []
for s in securities:
cow = {}
#log.info('%06d', s)
c = cf_nm_arr_of_sec[str(s)]
#log.info(c)
#log.info('mean: %f', np.mean(c))
#log.info('std: %f', np.std(c))
if len(c) == 0:
continue
mean = np.mean(c)
std = np.std(c)
if mean > 1.2 and std < 0.4 and std > 0:
cow['name'] = get_name_by_code(str(s).zfill(6))
cow['code'] = str(s).zfill(6)
cow['mean'] = round(mean, 3)
cow['std'] = round(std, 3)
cows.append(cow)
cows = json.dumps(cows, ensure_ascii = False, sort_keys = True, indent = 4)
log.info(cows)
return cows
#mean_of_sec[str(s)] = np.mean(cf_nm_arr_of_sec[str(s)])
#std_of_sec[str(s)] = np.std(cf_nm_arr_of_sec[str(s)])
'''
for s in securities:
if mean_of_sec[str(s)] > 1 and std_of_sec[str(s)] < 0.6:
cows.append(s)
log.info(get_name_by_code(s) + '(%08d) %s, mean %f, std %f', s, cf_nm_arr_of_sec[str(s)], mean_of_sec[str(s)], std_of_sec[str(s)])
''' | 0.2227 | 0.195038 |
from xdevs.models import Coupled, Port
from typing import Dict, Tuple, Optional
from mercury.config.core import CoreConfig
from mercury.config.network import NodeConfig
from mercury.config.smart_grid import ConsumerConfig
from mercury.config.edcs import EdgeFederationConfig
from mercury.config.iot_devices import ServiceConfig
from mercury.config.radio import RadioAccessNetworkConfig
from mercury.msg.network import NodeLocation
from mercury.msg.smart_grid import ElectricityOffer
from mercury.msg.network import PhysicalPacket, NetworkPacket
from .amf import AccessAndMobilityManagementFunction
from .cnfs import CoreNetworkFunctions
from .demand_estimator import DemandEstimator
from .efc import EdgeFederationController
from .sdnc import SoftwareDefinedNetworkController, SoftwareDefinedNetworkControllerLite
class Core(Coupled):
def __init__(self, core_config: CoreConfig, edge_fed_config: Optional[EdgeFederationConfig],
aps: Dict[str, Tuple[float, ...]], services: Dict[str, ServiceConfig],
consumers: Dict[str, ConsumerConfig], lite: bool = False):
"""
Core Layer Module for Mercury Simulator
:param core_config: Core Layer Configuration.
:param edge_fed_config: Edge federation configuration parameters.
:param aps: dictionary containing the APs in the scenario and their location.
:param services: list containing all the services defined in the scenario.
:param consumers: Smart Grid consumers configurations.
"""
super().__init__('core')
edcs = edge_fed_config.edcs
edc_locations = {edc_id: edc_config.edc_location for edc_id, edc_config in edcs.items()}
service_ids = {service for service in services}
port_type = PhysicalPacket if not lite else NetworkPacket
self.input_electricity_offer = Port(ElectricityOffer, 'input_electricity_offer')
self.input_data = Port(port_type, 'input_data')
self.output_data = Port(port_type, 'output_data')
self.add_in_port(self.input_electricity_offer)
self.add_in_port(self.input_data)
self.add_out_port(self.output_data)
# Core Network Functions: classifies all the messages etc. and encapsulate them for the network
cnfs = CoreNetworkFunctions(lite)
self.add_component(cnfs)
self.add_coupling(self.input_data, cnfs.input_data)
self.add_coupling(cnfs.output_data, self.output_data)
congestion = edge_fed_config.congestion
slicing = edge_fed_config.edc_slicing
if lite:
sdnc = SoftwareDefinedNetworkControllerLite(core_config, aps, edc_locations,
service_ids, congestion, slicing, consumers)
self.add_component(sdnc)
self.input_remove_node = Port(str, 'input_remove_node')
self.input_node_location = Port(NodeLocation, 'input_node_location')
self.input_create_node = Port(NodeConfig, 'input_create_node')
self.add_in_port(self.input_remove_node)
self.add_in_port(self.input_node_location)
self.add_in_port(self.input_create_node)
self.add_coupling(self.input_remove_node, sdnc.input_remove_node)
self.add_coupling(self.input_node_location, sdnc.input_node_location)
self.add_coupling(self.input_create_node, sdnc.input_create_node)
else:
sdnc = SoftwareDefinedNetworkController(core_config, aps, edc_locations,
service_ids, congestion, slicing, consumers)
self.add_component(sdnc)
self.add_coupling(self.input_electricity_offer, sdnc.input_electricity_offer)
self.add_coupling(cnfs.output_power_consumption, sdnc.input_edc_report)
self.add_coupling(cnfs.output_datacenter_request, sdnc.input_datacenter_request)
self.add_coupling(sdnc.output_datacenter_response, cnfs.input_datacenter_response)
if edge_fed_config is not None:
edcs_controller = EdgeFederationController(edge_fed_config, service_ids)
self.add_component(edcs_controller)
self.add_coupling(self.input_electricity_offer, edcs_controller.input_electricity_offer)
self.add_coupling(cnfs.output_power_consumption, edcs_controller.input_edc_report)
self.add_coupling(edcs_controller.output_edc_slicing, sdnc.input_edc_slicing)
self.add_coupling(edcs_controller.output_dispatching, cnfs.input_dispatching)
self.add_coupling(edcs_controller.output_hot_standby, cnfs.input_hot_standby)
for service, config in services.items():
if config.estimator_name is not None:
estimator = DemandEstimator(service, config.estimator_name, **config.estimator_config)
self.add_component(estimator)
self.add_coupling(estimator.output_demand_estimation, edcs_controller.input_demand_estimation)
if not RadioAccessNetworkConfig.bypass_amf:
amf = AccessAndMobilityManagementFunction()
self.add_component(amf)
self.add_coupling(cnfs.output_amf_request, amf.input_request)
self.add_coupling(amf.output_response, cnfs.input_amf_response) | mercury/fog_model/core/core.py | from xdevs.models import Coupled, Port
from typing import Dict, Tuple, Optional
from mercury.config.core import CoreConfig
from mercury.config.network import NodeConfig
from mercury.config.smart_grid import ConsumerConfig
from mercury.config.edcs import EdgeFederationConfig
from mercury.config.iot_devices import ServiceConfig
from mercury.config.radio import RadioAccessNetworkConfig
from mercury.msg.network import NodeLocation
from mercury.msg.smart_grid import ElectricityOffer
from mercury.msg.network import PhysicalPacket, NetworkPacket
from .amf import AccessAndMobilityManagementFunction
from .cnfs import CoreNetworkFunctions
from .demand_estimator import DemandEstimator
from .efc import EdgeFederationController
from .sdnc import SoftwareDefinedNetworkController, SoftwareDefinedNetworkControllerLite
class Core(Coupled):
def __init__(self, core_config: CoreConfig, edge_fed_config: Optional[EdgeFederationConfig],
aps: Dict[str, Tuple[float, ...]], services: Dict[str, ServiceConfig],
consumers: Dict[str, ConsumerConfig], lite: bool = False):
"""
Core Layer Module for Mercury Simulator
:param core_config: Core Layer Configuration.
:param edge_fed_config: Edge federation configuration parameters.
:param aps: dictionary containing the APs in the scenario and their location.
:param services: list containing all the services defined in the scenario.
:param consumers: Smart Grid consumers configurations.
"""
super().__init__('core')
edcs = edge_fed_config.edcs
edc_locations = {edc_id: edc_config.edc_location for edc_id, edc_config in edcs.items()}
service_ids = {service for service in services}
port_type = PhysicalPacket if not lite else NetworkPacket
self.input_electricity_offer = Port(ElectricityOffer, 'input_electricity_offer')
self.input_data = Port(port_type, 'input_data')
self.output_data = Port(port_type, 'output_data')
self.add_in_port(self.input_electricity_offer)
self.add_in_port(self.input_data)
self.add_out_port(self.output_data)
# Core Network Functions: classifies all the messages etc. and encapsulate them for the network
cnfs = CoreNetworkFunctions(lite)
self.add_component(cnfs)
self.add_coupling(self.input_data, cnfs.input_data)
self.add_coupling(cnfs.output_data, self.output_data)
congestion = edge_fed_config.congestion
slicing = edge_fed_config.edc_slicing
if lite:
sdnc = SoftwareDefinedNetworkControllerLite(core_config, aps, edc_locations,
service_ids, congestion, slicing, consumers)
self.add_component(sdnc)
self.input_remove_node = Port(str, 'input_remove_node')
self.input_node_location = Port(NodeLocation, 'input_node_location')
self.input_create_node = Port(NodeConfig, 'input_create_node')
self.add_in_port(self.input_remove_node)
self.add_in_port(self.input_node_location)
self.add_in_port(self.input_create_node)
self.add_coupling(self.input_remove_node, sdnc.input_remove_node)
self.add_coupling(self.input_node_location, sdnc.input_node_location)
self.add_coupling(self.input_create_node, sdnc.input_create_node)
else:
sdnc = SoftwareDefinedNetworkController(core_config, aps, edc_locations,
service_ids, congestion, slicing, consumers)
self.add_component(sdnc)
self.add_coupling(self.input_electricity_offer, sdnc.input_electricity_offer)
self.add_coupling(cnfs.output_power_consumption, sdnc.input_edc_report)
self.add_coupling(cnfs.output_datacenter_request, sdnc.input_datacenter_request)
self.add_coupling(sdnc.output_datacenter_response, cnfs.input_datacenter_response)
if edge_fed_config is not None:
edcs_controller = EdgeFederationController(edge_fed_config, service_ids)
self.add_component(edcs_controller)
self.add_coupling(self.input_electricity_offer, edcs_controller.input_electricity_offer)
self.add_coupling(cnfs.output_power_consumption, edcs_controller.input_edc_report)
self.add_coupling(edcs_controller.output_edc_slicing, sdnc.input_edc_slicing)
self.add_coupling(edcs_controller.output_dispatching, cnfs.input_dispatching)
self.add_coupling(edcs_controller.output_hot_standby, cnfs.input_hot_standby)
for service, config in services.items():
if config.estimator_name is not None:
estimator = DemandEstimator(service, config.estimator_name, **config.estimator_config)
self.add_component(estimator)
self.add_coupling(estimator.output_demand_estimation, edcs_controller.input_demand_estimation)
if not RadioAccessNetworkConfig.bypass_amf:
amf = AccessAndMobilityManagementFunction()
self.add_component(amf)
self.add_coupling(cnfs.output_amf_request, amf.input_request)
self.add_coupling(amf.output_response, cnfs.input_amf_response) | 0.851027 | 0.133924 |
from typing import overload
import cv2
import pprint
import mediapipe as mp
import time
import traceback
from cv2Decorator import cv2Decorator
class Face:
padding = 10
image_size = 200
def __init__(self):
self.trained_model =cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
self.ksize = (20,20)
def detectFace(self,frame,draw = False):
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
# extracting cordinates (x,y,width,height)
# (x,y,w,h) = face_coordinate[0]
for i in self.face_coordinate:
(x,y,w,h) = i
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
roiFrame = frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x]
# put back on
frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x] = self.actionOnImage(roiFrame,action='blur')
return frame
def actionOnImage(self,roiFrame,action='blur'):
"""make the roiImage - blur or dark"""
try :
if action == 'blur':
# blur the part of the image
roiFrame = cv2.blur(roiFrame, self.ksize, cv2.BORDER_DEFAULT)
else :
# make it black
roiFrame[:,:] = (0,0,0)
except :
pass
finally:
return roiFrame
def detectFaceAndShow(self,frame,draw = False):
fh,fw,fc = frame.shape
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
for index in range(len(self.face_coordinate)) :
(x,y,w,h) = self.face_coordinate[index]
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
self.padding = 60
roiFrame = frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x]
# # put back on
try :
frame[fh-self.image_size: fh,
fw-self.image_size: fw] = cv2.resize(roiFrame,(self.image_size,self.image_size))
except :
pass
return frame
def detectFaceAndCenter(self,frame,draw = False,padding = 100 ):
fh,fw,fc = frame.shape
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
if len(self.face_coordinate) >= 1 :
(x,y,w,h) = self.face_coordinate[0]
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
roiFrame = frame[y-padding:padding+h+y,
x-padding:padding+w+x]
# # put back on
try :
frame[:,:] = cv2.resize(roiFrame,(fw,fh))
except :
pass
return frame | Face/FaceDetecter.py | from typing import overload
import cv2
import pprint
import mediapipe as mp
import time
import traceback
from cv2Decorator import cv2Decorator
class Face:
padding = 10
image_size = 200
def __init__(self):
self.trained_model =cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
self.ksize = (20,20)
def detectFace(self,frame,draw = False):
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
# extracting cordinates (x,y,width,height)
# (x,y,w,h) = face_coordinate[0]
for i in self.face_coordinate:
(x,y,w,h) = i
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
roiFrame = frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x]
# put back on
frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x] = self.actionOnImage(roiFrame,action='blur')
return frame
def actionOnImage(self,roiFrame,action='blur'):
"""make the roiImage - blur or dark"""
try :
if action == 'blur':
# blur the part of the image
roiFrame = cv2.blur(roiFrame, self.ksize, cv2.BORDER_DEFAULT)
else :
# make it black
roiFrame[:,:] = (0,0,0)
except :
pass
finally:
return roiFrame
def detectFaceAndShow(self,frame,draw = False):
fh,fw,fc = frame.shape
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
for index in range(len(self.face_coordinate)) :
(x,y,w,h) = self.face_coordinate[index]
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
self.padding = 60
roiFrame = frame[y-self.padding:self.padding+h+y,
x-self.padding:self.padding+w+x]
# # put back on
try :
frame[fh-self.image_size: fh,
fw-self.image_size: fw] = cv2.resize(roiFrame,(self.image_size,self.image_size))
except :
pass
return frame
def detectFaceAndCenter(self,frame,draw = False,padding = 100 ):
fh,fw,fc = frame.shape
gray_img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
self.face_coordinate = self.trained_model.detectMultiScale(gray_img,1.3,5)
if len(self.face_coordinate) >= 1 :
(x,y,w,h) = self.face_coordinate[0]
# drawing rectangle on the image
if draw :
cv2.rectangle(frame,(x-self.padding,y-self.padding),(x+w+self.padding,y+h+self.padding),(0,255,0),2)
# crop
roiFrame = frame[y-padding:padding+h+y,
x-padding:padding+w+x]
# # put back on
try :
frame[:,:] = cv2.resize(roiFrame,(fw,fh))
except :
pass
return frame | 0.384565 | 0.137561 |
import typing
from decimal import Decimal
import pydantic
import stackprinter
stackprinter.set_excepthook(style="darkbg2")
from crypto_dom.kraken.definitions import ASSETCLASS, ASSET
# ============================================================
# DEPOSIT METHODS
# ============================================================
# doc: https://www.kraken.com/features/api#deposit-methods
URL = "https://api.kraken.com/0/private/DepositMethods"
METHOD = "POST"
# ------------------------------
# Sample Response
# ------------------------------
#[
# {
# 'fee': '0.0000000000',
# 'gen-address': True,
# 'limit': False,
# 'method': 'Ether (Hex)'
# }
# ]
# ------------------------------
# Request Model
# ------------------------------
class Request(pydantic.BaseModel):
"""Request model for endpoint POST https://api.kraken.com/0/private/DepositMethods
Model Fields:
-------------
aclass : str
Default = currency (optional)
asset : str enum
Asset being deposited
nonce : int
Always increasing unsigned 64 bit integer
"""
aclass: typing.Optional[ASSETCLASS]
asset: ASSET
nonce: pydantic.PositiveInt
# ------------------------------
# Response Model
# ------------------------------
class _Method(pydantic.BaseModel):
method: str # TODO should be Literal
limit: typing.Union[Decimal, bool]
fee: Decimal
address_setup_fee: typing.Optional[bool] = pydantic.Field(alias="address-setup-fee")
class _DepositMethodsResponse(pydantic.BaseModel):
# placeholder
data: typing.Tuple[_Method, ...]
# this class is just to be consistent with our API
class Response:
"""Validated Response for endpoint POST https://api.kraken.com/0/private/DepositMethods
Type: list of pydantic models
Model Fields:
-------------
method : str enum
Name of deposit method
limit : Union[Decimal, bool]
Maximum net amount that can be deposited right now, or false if no limit
fee: Decimal
address-setup-fee: bool
whether or not method has an address setup fee (optional)
"""
def __call__(self, response: dict):
_valid = _DepositMethodsResponse(data=response)
return _valid.data | src/crypto_dom/kraken/user_funding/deposit_methods.py | import typing
from decimal import Decimal
import pydantic
import stackprinter
stackprinter.set_excepthook(style="darkbg2")
from crypto_dom.kraken.definitions import ASSETCLASS, ASSET
# ============================================================
# DEPOSIT METHODS
# ============================================================
# doc: https://www.kraken.com/features/api#deposit-methods
URL = "https://api.kraken.com/0/private/DepositMethods"
METHOD = "POST"
# ------------------------------
# Sample Response
# ------------------------------
#[
# {
# 'fee': '0.0000000000',
# 'gen-address': True,
# 'limit': False,
# 'method': 'Ether (Hex)'
# }
# ]
# ------------------------------
# Request Model
# ------------------------------
class Request(pydantic.BaseModel):
"""Request model for endpoint POST https://api.kraken.com/0/private/DepositMethods
Model Fields:
-------------
aclass : str
Default = currency (optional)
asset : str enum
Asset being deposited
nonce : int
Always increasing unsigned 64 bit integer
"""
aclass: typing.Optional[ASSETCLASS]
asset: ASSET
nonce: pydantic.PositiveInt
# ------------------------------
# Response Model
# ------------------------------
class _Method(pydantic.BaseModel):
method: str # TODO should be Literal
limit: typing.Union[Decimal, bool]
fee: Decimal
address_setup_fee: typing.Optional[bool] = pydantic.Field(alias="address-setup-fee")
class _DepositMethodsResponse(pydantic.BaseModel):
# placeholder
data: typing.Tuple[_Method, ...]
# this class is just to be consistent with our API
class Response:
"""Validated Response for endpoint POST https://api.kraken.com/0/private/DepositMethods
Type: list of pydantic models
Model Fields:
-------------
method : str enum
Name of deposit method
limit : Union[Decimal, bool]
Maximum net amount that can be deposited right now, or false if no limit
fee: Decimal
address-setup-fee: bool
whether or not method has an address setup fee (optional)
"""
def __call__(self, response: dict):
_valid = _DepositMethodsResponse(data=response)
return _valid.data | 0.240329 | 0.134293 |
import requests
topics = {
"topics": [
{
"text": [
"Art_Event",
"Celebrities",
"Entertainment",
"Fashion",
"Food_Drink",
"Games",
"Literature",
"Math",
"Movies_TV",
"Music",
"News",
"Other",
"Pets_Animals",
"Phatic",
"Politics",
"Psychology",
"Religion",
"SciTech",
"Sex_Profanity",
"Sports",
"Travel_Geo",
"Weather_Time",
]
}
],
"dialogact_topics": [
[
"Other",
"Interactive",
"Phatic",
"Entertainment_Movies",
"Science_and_Technology",
"Sports",
"Entertainment_Music",
"Entertainment_General",
"Politics",
"Entertainment_Books",
]
],
}
true_requests = {
"movie": [
{"utterances_histories": [["what you know about movie"]], "response": ["not much but i love watching movies"]}
],
"music": [
{
"utterances_histories": [["what you know about music"]],
"response": [
"Well i look up odd facts about google and sometimes look up musicians and concerts",
"i am good i love music do you",
],
}
],
"book": [{"utterances_histories": [["what you know about book"]], "response": ["i read a lot what about you"]}],
"entertainment": [
{
"utterances_histories": [["what you know about entertainment"]],
"response": [
"not much but i love watching movies",
"I do like to watch TV its one of my favorite forms of entertainment",
],
}
],
"fashion": [
{
"utterances_histories": [["what you know about fashion"]],
"response": [
"i dress up , i like fashion shows",
"Yes I know a lot about clothing actually. In fact I'm wearing some right now LOL what about you",
],
}
],
"politics": [
{
"utterances_histories": [["what you know about politics"]],
"response": [
"i am passionate about politics , so i indulge in those topics , wbu ?",
"Sometimes I follow politics. What about you?",
],
}
],
"science_technology": [
{
"utterances_histories": [["what you know about science"]],
"response": ["i do not i study science mostly its fascinating"],
}
],
"sport": [
{
"utterances_histories": [["what you know about sport"]],
"response": ["I am great what about you? do you like football?", "i am great just never liked sports"],
}
],
"animals": [
{
"utterances_histories": [["what you know about animals"]],
"response": [
"my dog is just my pet but I do like learning about interesting facts about them",
"i am literally obsessed with animals",
],
}
],
}
def test_skill():
url = "http://0.0.0.0:8060/respond"
for topic_name, topic_examples in true_requests.items():
for example in topic_examples:
example.update(topics)
example["utterances_histories"] = [["hi", "how are you", "i am fine"] + example["utterances_histories"][0]]
response = requests.post(url, json=example).json()
print(response)
assert response[0][0] in example["response"], f"topic_name: {topic_name}\nexample: {example}"
print("SUCCESS!")
if __name__ == "__main__":
test_skill() | skills/topicalchat_convert_retrieval/test_server.py | import requests
topics = {
"topics": [
{
"text": [
"Art_Event",
"Celebrities",
"Entertainment",
"Fashion",
"Food_Drink",
"Games",
"Literature",
"Math",
"Movies_TV",
"Music",
"News",
"Other",
"Pets_Animals",
"Phatic",
"Politics",
"Psychology",
"Religion",
"SciTech",
"Sex_Profanity",
"Sports",
"Travel_Geo",
"Weather_Time",
]
}
],
"dialogact_topics": [
[
"Other",
"Interactive",
"Phatic",
"Entertainment_Movies",
"Science_and_Technology",
"Sports",
"Entertainment_Music",
"Entertainment_General",
"Politics",
"Entertainment_Books",
]
],
}
true_requests = {
"movie": [
{"utterances_histories": [["what you know about movie"]], "response": ["not much but i love watching movies"]}
],
"music": [
{
"utterances_histories": [["what you know about music"]],
"response": [
"Well i look up odd facts about google and sometimes look up musicians and concerts",
"i am good i love music do you",
],
}
],
"book": [{"utterances_histories": [["what you know about book"]], "response": ["i read a lot what about you"]}],
"entertainment": [
{
"utterances_histories": [["what you know about entertainment"]],
"response": [
"not much but i love watching movies",
"I do like to watch TV its one of my favorite forms of entertainment",
],
}
],
"fashion": [
{
"utterances_histories": [["what you know about fashion"]],
"response": [
"i dress up , i like fashion shows",
"Yes I know a lot about clothing actually. In fact I'm wearing some right now LOL what about you",
],
}
],
"politics": [
{
"utterances_histories": [["what you know about politics"]],
"response": [
"i am passionate about politics , so i indulge in those topics , wbu ?",
"Sometimes I follow politics. What about you?",
],
}
],
"science_technology": [
{
"utterances_histories": [["what you know about science"]],
"response": ["i do not i study science mostly its fascinating"],
}
],
"sport": [
{
"utterances_histories": [["what you know about sport"]],
"response": ["I am great what about you? do you like football?", "i am great just never liked sports"],
}
],
"animals": [
{
"utterances_histories": [["what you know about animals"]],
"response": [
"my dog is just my pet but I do like learning about interesting facts about them",
"i am literally obsessed with animals",
],
}
],
}
def test_skill():
url = "http://0.0.0.0:8060/respond"
for topic_name, topic_examples in true_requests.items():
for example in topic_examples:
example.update(topics)
example["utterances_histories"] = [["hi", "how are you", "i am fine"] + example["utterances_histories"][0]]
response = requests.post(url, json=example).json()
print(response)
assert response[0][0] in example["response"], f"topic_name: {topic_name}\nexample: {example}"
print("SUCCESS!")
if __name__ == "__main__":
test_skill() | 0.22448 | 0.460107 |
from flask import Blueprint, request
import config
from blueprints.private.settings import setting
from modules.registration import Registration
from modules.validations import Validation
PublicForm_bp = Blueprint("PublicForm_bp", __name__)
register = Registration()
validate = Validation()
def responseData(data):
return {"data": data}
@PublicForm_bp.post('/CreateAccount')
def CreateAccount():
fname = config.sanitize_Html(request.form['fname'])
lname = config.sanitize_Html(request.form['lname'])
email = config.sanitize_Html(request.form['email'])
password = config.sanitize_Html(request.form['password'])
referral = config.sanitize_Html(request.form['referral'])
if result := register.registration(fname, lname, email, password, referral):
return result
@PublicForm_bp.post('/Login')
def Login():
email = config.sanitize_Html(request.form['email'])
password = config.sanitize_Html(request.form['password'])
if result := register.login(email, password):
return result
@PublicForm_bp.post('/forgot-password')
def forgot_password():
email = config.sanitize_Html(request.form['email'])
if result := register.forgotPassword(email):
return result
@PublicForm_bp.post('/change-forgot-password')
def change_forgot_password():
email = validate.get_cookie_id("verify_email")
password1 = config.sanitize_Html(request.form['password1'])
password2 = config.sanitize_Html(request.form['password2'])
if data := setting.ChangeForgotPassword(email, password1, password2):
if data == "success":
return responseData(["success", "Password has been changed successfully, please Sign in again"])
elif data == "incorrect-password":
return responseData(["warning", "Password does not match"])
@PublicForm_bp.post('/verify-auth')
def verify_auth():
email = config.sanitize_Html(request.form['email'])
code = config.sanitize_Html(request.form['verification'])
if result := register.TwoAuthentificationVerify(email, code):
return result
@PublicForm_bp.post('/contact-message')
def contact_message():
if request.method == "POST":
name = config.sanitize_Html(request.form['name'])
email = config.sanitize_Html(request.form['email'])
subject = config.sanitize_Html(request.form['subject'])
message = config.sanitize_Html(request.form['message'])
if config.sendMail(config.admin_email, subject, f"My name is {name}\n\n {message}\n\n From - {email}\n"):
return "success" | blueprints/public/form.py | from flask import Blueprint, request
import config
from blueprints.private.settings import setting
from modules.registration import Registration
from modules.validations import Validation
PublicForm_bp = Blueprint("PublicForm_bp", __name__)
register = Registration()
validate = Validation()
def responseData(data):
return {"data": data}
@PublicForm_bp.post('/CreateAccount')
def CreateAccount():
fname = config.sanitize_Html(request.form['fname'])
lname = config.sanitize_Html(request.form['lname'])
email = config.sanitize_Html(request.form['email'])
password = config.sanitize_Html(request.form['password'])
referral = config.sanitize_Html(request.form['referral'])
if result := register.registration(fname, lname, email, password, referral):
return result
@PublicForm_bp.post('/Login')
def Login():
email = config.sanitize_Html(request.form['email'])
password = config.sanitize_Html(request.form['password'])
if result := register.login(email, password):
return result
@PublicForm_bp.post('/forgot-password')
def forgot_password():
email = config.sanitize_Html(request.form['email'])
if result := register.forgotPassword(email):
return result
@PublicForm_bp.post('/change-forgot-password')
def change_forgot_password():
email = validate.get_cookie_id("verify_email")
password1 = config.sanitize_Html(request.form['password1'])
password2 = config.sanitize_Html(request.form['password2'])
if data := setting.ChangeForgotPassword(email, password1, password2):
if data == "success":
return responseData(["success", "Password has been changed successfully, please Sign in again"])
elif data == "incorrect-password":
return responseData(["warning", "Password does not match"])
@PublicForm_bp.post('/verify-auth')
def verify_auth():
email = config.sanitize_Html(request.form['email'])
code = config.sanitize_Html(request.form['verification'])
if result := register.TwoAuthentificationVerify(email, code):
return result
@PublicForm_bp.post('/contact-message')
def contact_message():
if request.method == "POST":
name = config.sanitize_Html(request.form['name'])
email = config.sanitize_Html(request.form['email'])
subject = config.sanitize_Html(request.form['subject'])
message = config.sanitize_Html(request.form['message'])
if config.sendMail(config.admin_email, subject, f"My name is {name}\n\n {message}\n\n From - {email}\n"):
return "success" | 0.320715 | 0.094971 |
import time
import pygame
import pygame.freetype
from component import config
from .game_window import GameWindow
from .system import System
from .configure import Configure
from .heartbeat import Heartbeat
from .keyboard import Keyboard
from .moving import Moving
from .painting import Painting
from .respawn import Respawn
from .rotating import Rotating
from .mob_factory import MobFactory
from .scene_factory import SceneFactory
from logcat import LogCat
class Game(System):
def __init__(self):
super().__init__()
self._pygame_setup()
self._setup()
# self._player = Player(
# int((config.width - 32) / 2), int((config.height - 32) / 2)
#)
def _setup(self):
for system in (
Configure, Heartbeat, Keyboard, Moving, Painting, Rotating,
Respawn, MobFactory, SceneFactory,
):
system()
self.emit("cmd_configure", None)
self.emit("cmd_scene_change", None, scene=config.open_scene)
def _pygame_setup(self):
# 初始化 pyGame 引擎
pygame.init()
# 取得一個計時器物件
self._clock = pygame.time.Clock()
pygame.display.set_caption(config.name)
# 設定 pyGame 遊戲視窗 *大小*
win = pygame.display.set_mode((config.width, config.height))
# 準備遊戲視窗的 (黑色) 背景
background = pygame.Surface((config.width, config.height))
# background = win.copy()
# background.fill((0, 0, 0))
# 建立一個新的 GameWindow 物件
self._window = GameWindow(win, background)
# 設定 pyGame 使用的系統字型
self._font = pygame.freetype.SysFont("bradleyhanditc", 36)
def _loop(self):
game_over = False
# 遊戲主迴圈
while not game_over:
for e in pygame.event.get():
if (e.type == pygame.QUIT):
game_over = True
elif (
(e.type == pygame.KEYDOWN) and
(e.key == pygame.K_ESCAPE)
):
game_over = True
# elif (e.type == pygame.KEYDOWN):
#self.emit(
# "cmd_keyboard", self._player.entity, key=e.key
#)
self.emit(
"cmd_heartbeat",
None,
ticks=self._clock.get_time(),
fps=self._clock.get_fps()
)
self.emit("cmd_update", None)
self.emit("cmd_repaint", None, screen=self._window)
self._dispatcher.dispatch()
# 將遊戲視窗繪製 (貼) 到螢幕上
pygame.display.flip()
# 設定畫面更新率是 60 fps
self._clock.tick(60)
@LogCat.log_func
def start(self):
self._loop()
pygame.quit()
# game.py | system/game.py | import time
import pygame
import pygame.freetype
from component import config
from .game_window import GameWindow
from .system import System
from .configure import Configure
from .heartbeat import Heartbeat
from .keyboard import Keyboard
from .moving import Moving
from .painting import Painting
from .respawn import Respawn
from .rotating import Rotating
from .mob_factory import MobFactory
from .scene_factory import SceneFactory
from logcat import LogCat
class Game(System):
def __init__(self):
super().__init__()
self._pygame_setup()
self._setup()
# self._player = Player(
# int((config.width - 32) / 2), int((config.height - 32) / 2)
#)
def _setup(self):
for system in (
Configure, Heartbeat, Keyboard, Moving, Painting, Rotating,
Respawn, MobFactory, SceneFactory,
):
system()
self.emit("cmd_configure", None)
self.emit("cmd_scene_change", None, scene=config.open_scene)
def _pygame_setup(self):
# 初始化 pyGame 引擎
pygame.init()
# 取得一個計時器物件
self._clock = pygame.time.Clock()
pygame.display.set_caption(config.name)
# 設定 pyGame 遊戲視窗 *大小*
win = pygame.display.set_mode((config.width, config.height))
# 準備遊戲視窗的 (黑色) 背景
background = pygame.Surface((config.width, config.height))
# background = win.copy()
# background.fill((0, 0, 0))
# 建立一個新的 GameWindow 物件
self._window = GameWindow(win, background)
# 設定 pyGame 使用的系統字型
self._font = pygame.freetype.SysFont("bradleyhanditc", 36)
def _loop(self):
game_over = False
# 遊戲主迴圈
while not game_over:
for e in pygame.event.get():
if (e.type == pygame.QUIT):
game_over = True
elif (
(e.type == pygame.KEYDOWN) and
(e.key == pygame.K_ESCAPE)
):
game_over = True
# elif (e.type == pygame.KEYDOWN):
#self.emit(
# "cmd_keyboard", self._player.entity, key=e.key
#)
self.emit(
"cmd_heartbeat",
None,
ticks=self._clock.get_time(),
fps=self._clock.get_fps()
)
self.emit("cmd_update", None)
self.emit("cmd_repaint", None, screen=self._window)
self._dispatcher.dispatch()
# 將遊戲視窗繪製 (貼) 到螢幕上
pygame.display.flip()
# 設定畫面更新率是 60 fps
self._clock.tick(60)
@LogCat.log_func
def start(self):
self._loop()
pygame.quit()
# game.py | 0.192919 | 0.087681 |
from OpenGL.raw.GL.VERSION.GL_1_0 import glClearColor
from libraries import *
import glfw
from OpenGL.GL import GL_VERSION, glGetString, GLError, glEnable, GL_BLEND, glBlendFunc, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA
import numpy as np
import sys
import ctypes
import traceback
import pyrr
from tests import *
import time
def main():
if not glfw.init():
return -1
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
window = glfw.create_window(960, 540, "Window", None, None)
if not window:
glfw.terminate()
return -1
glfw.make_context_current(window)
glfw.swap_interval(1)
version = glGetString(GL_VERSION).decode('utf-8')
print(version)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
renderer = Renderer.Renderer()
gui = Gui.Gui(window)
testMenu = Test.TestMenu()
testMenu.RegisterTest("Clear Color", TestClearColor.TestClearColor)
testMenu.RegisterTest("Texture2D", TestTexture2D.TestTexture2D)
currentTest = testMenu
while not glfw.window_should_close(window):
glClearColor(0.0, 0.0, 0.0, 1.0)
renderer.Clear()
gui.NewFrame()
if currentTest:
currentTest.OnUpdate()
currentTest.OnRender()
Gui.begin("Tests")
if not currentTest == testMenu and Gui.button("<-"):
currentTest = testMenu
testMenu.m_CurrentTest = None
currentTest.OnImGuiRender()
if testMenu.m_CurrentTest and not (currentTest == testMenu.m_CurrentTest):
currentTest = testMenu.m_CurrentTest
Gui.end()
Gui.framerate()
gui.EndFrame()
glfw.swap_buffers(window)
glfw.poll_events()
del currentTest
del testMenu
gui.endGui()
glfw.terminate()
return 0
try:
main()
except GLError as Error:
tb = sys.exc_info()[-1]
info = traceback.extract_tb(tb)
print(Error)
print(f"[OpenGL Error] {(Error.err)} occurred at operation : {Error.baseOperation.__name__} at line : {info[1][1]}") | TestApplication.py | from OpenGL.raw.GL.VERSION.GL_1_0 import glClearColor
from libraries import *
import glfw
from OpenGL.GL import GL_VERSION, glGetString, GLError, glEnable, GL_BLEND, glBlendFunc, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA
import numpy as np
import sys
import ctypes
import traceback
import pyrr
from tests import *
import time
def main():
if not glfw.init():
return -1
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
window = glfw.create_window(960, 540, "Window", None, None)
if not window:
glfw.terminate()
return -1
glfw.make_context_current(window)
glfw.swap_interval(1)
version = glGetString(GL_VERSION).decode('utf-8')
print(version)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
renderer = Renderer.Renderer()
gui = Gui.Gui(window)
testMenu = Test.TestMenu()
testMenu.RegisterTest("Clear Color", TestClearColor.TestClearColor)
testMenu.RegisterTest("Texture2D", TestTexture2D.TestTexture2D)
currentTest = testMenu
while not glfw.window_should_close(window):
glClearColor(0.0, 0.0, 0.0, 1.0)
renderer.Clear()
gui.NewFrame()
if currentTest:
currentTest.OnUpdate()
currentTest.OnRender()
Gui.begin("Tests")
if not currentTest == testMenu and Gui.button("<-"):
currentTest = testMenu
testMenu.m_CurrentTest = None
currentTest.OnImGuiRender()
if testMenu.m_CurrentTest and not (currentTest == testMenu.m_CurrentTest):
currentTest = testMenu.m_CurrentTest
Gui.end()
Gui.framerate()
gui.EndFrame()
glfw.swap_buffers(window)
glfw.poll_events()
del currentTest
del testMenu
gui.endGui()
glfw.terminate()
return 0
try:
main()
except GLError as Error:
tb = sys.exc_info()[-1]
info = traceback.extract_tb(tb)
print(Error)
print(f"[OpenGL Error] {(Error.err)} occurred at operation : {Error.baseOperation.__name__} at line : {info[1][1]}") | 0.284477 | 0.133105 |
from collections import Mapping, OrderedDict
from xldlib.general import sequence
from xldlib.utils import serialization
from .table import TableModelMixin
__all__ = [
'FrozenTable',
'TableModel'
]
# OBJECTS
# -------
class FrozenTable(OrderedDict):
'''
Nearly immutable OrderedDict with a list per-key, producing a
table-like object with O(1) item lookups.
`__setitem__` has been modified, allowing replacement of columns
with new input data, while other methods (`update`, `clear`, ...)
have been blocked.
'''
def __init__(self, attrs, visible=None):
self.attrs = OrderedDict()
super(FrozenTable, self).__init__()
if visible is None:
visible = self.attrs
self._visible = visible
self.__update(attrs)
self._columns = list(self._visible)
self._list = [self[i] for i in self._visible.values()]
# PROPERTIES
@property
def blocked(self):
raise NotImplementedError
__delitem__ = clear = update = blocked
pop = popitem = setdefault = blocked
@property
def visible(self):
return self._visible
# MAGIC
def __setitem__(self, key, value):
'''
Set value if key in self (column-swap), else, block access
See `__setitem` for full arg specs.
'''
if key in self:
self._setitem(key, value)
else:
self.blocked()
def __getattr__(self, name):
'''
Override __getattr__ to provide convenient a.x access to columns
by column attribute names, otherwise, return class/instance
variable.
Args:
name (str): attribute name for column or class
'''
# do not use hasattr, as it calls `getattr`
if self.__dict__.get('attrs') and name in self.attrs:
key = self.attrs[name]
return self[key]
else:
return super(FrozenTable, self).__getattribute__(name)
# NON-PUBLIC
def __update(self, other):
'''
Non-public `update` method, which bypasses data integrity
checks during initialization.
Args:
other (dict, iterable): pass
'''
if isinstance(other, Mapping):
for key, attr in other.items():
self._add_column(key, attr)
elif isinstance(other, (list, tuple)):
for key, attr in other:
self._add_column(key, attr)
def _setitem(self, key, value, dict_setitem=OrderedDict.__setitem__):
'''
Modified, non-public, OrderedDict `__setitem__` implementation.
Fills column with data from value
Args:
key (str): column name
value (iterable): column data for each row
'''
self[key][:] = value
def _add_column(self, key, name, dict_setitem=OrderedDict.__setitem__):
'''
Non-public method to add column to table.
Args:
key (str): column name
name (iterable): attribute name for inst.name access
'''
dict_setitem(self, key, sequence.ExtendableList())
self.attrs[name] = key
@serialization.register("TableModel")
class TableModel(FrozenTable, TableModelMixin):
'''
FrozenTable with methods and properties for implementation as a table
model for a Qt table view.
Includes methods for row and column access, find/replace queries,
item deletion, and data serialization.
TableModel supports row-wise item setting by index-based or
name-based access.
`TableModel.columns[index][row] = 5`
`TableModel[name][row] = 5`
'''
# MAGIC
@serialization.tojson
def __json__(self):
'''
Serialize data for object reconstruction to JSON
Returns (dict): serialized data
'''
return {
'attrs': [(k, v) for k, v in self.attrs.items()],
'visible': [(k, v) for k, v in self.visible.items()],
'data': dict(self)
}
# CLASS METHODS
@classmethod
def loadjson(cls, data):
'''
Deserialize JSON data into object constructor
Args:
data (dict, mapping): serialized object data
Returns (TableModel): class instance
'''
attrs = data['attrs']
visible = OrderedDict(data['visible'])
inst = cls(attrs, visible)
for key, value in data['data'].items():
cls[key] = value
return inst
# PROPERTIES
@property
def length(self):
return max(len(i) for i in self.values())
@property
def rows(self):
return range(self.length)
@property
def columns(self):
return self._columns
@property
def row_count(self):
return self.length
@property
def column_count(self):
return len(self.columns)
@property
def list(self):
return self._list
# PUBLIC
def iterrows(self, columns=None, use_attrs=False):
'''
Row-wise iterator for table data
Args:
columns (None, iterable): columns to return data from
'''
if columns is None and not use_attrs:
columns = list(self)
elif columns is None and use_attrs:
columns = self.columns
for row in self.rows:
if use_attrs:
values = (getattr(self, k)[row] for k in columns)
else:
values = (self[k][row] for k in columns)
yield OrderedDict(zip(columns, values)) | xldlib/general/mapping/frozen.py | from collections import Mapping, OrderedDict
from xldlib.general import sequence
from xldlib.utils import serialization
from .table import TableModelMixin
__all__ = [
'FrozenTable',
'TableModel'
]
# OBJECTS
# -------
class FrozenTable(OrderedDict):
'''
Nearly immutable OrderedDict with a list per-key, producing a
table-like object with O(1) item lookups.
`__setitem__` has been modified, allowing replacement of columns
with new input data, while other methods (`update`, `clear`, ...)
have been blocked.
'''
def __init__(self, attrs, visible=None):
self.attrs = OrderedDict()
super(FrozenTable, self).__init__()
if visible is None:
visible = self.attrs
self._visible = visible
self.__update(attrs)
self._columns = list(self._visible)
self._list = [self[i] for i in self._visible.values()]
# PROPERTIES
@property
def blocked(self):
raise NotImplementedError
__delitem__ = clear = update = blocked
pop = popitem = setdefault = blocked
@property
def visible(self):
return self._visible
# MAGIC
def __setitem__(self, key, value):
'''
Set value if key in self (column-swap), else, block access
See `__setitem` for full arg specs.
'''
if key in self:
self._setitem(key, value)
else:
self.blocked()
def __getattr__(self, name):
'''
Override __getattr__ to provide convenient a.x access to columns
by column attribute names, otherwise, return class/instance
variable.
Args:
name (str): attribute name for column or class
'''
# do not use hasattr, as it calls `getattr`
if self.__dict__.get('attrs') and name in self.attrs:
key = self.attrs[name]
return self[key]
else:
return super(FrozenTable, self).__getattribute__(name)
# NON-PUBLIC
def __update(self, other):
'''
Non-public `update` method, which bypasses data integrity
checks during initialization.
Args:
other (dict, iterable): pass
'''
if isinstance(other, Mapping):
for key, attr in other.items():
self._add_column(key, attr)
elif isinstance(other, (list, tuple)):
for key, attr in other:
self._add_column(key, attr)
def _setitem(self, key, value, dict_setitem=OrderedDict.__setitem__):
'''
Modified, non-public, OrderedDict `__setitem__` implementation.
Fills column with data from value
Args:
key (str): column name
value (iterable): column data for each row
'''
self[key][:] = value
def _add_column(self, key, name, dict_setitem=OrderedDict.__setitem__):
'''
Non-public method to add column to table.
Args:
key (str): column name
name (iterable): attribute name for inst.name access
'''
dict_setitem(self, key, sequence.ExtendableList())
self.attrs[name] = key
@serialization.register("TableModel")
class TableModel(FrozenTable, TableModelMixin):
'''
FrozenTable with methods and properties for implementation as a table
model for a Qt table view.
Includes methods for row and column access, find/replace queries,
item deletion, and data serialization.
TableModel supports row-wise item setting by index-based or
name-based access.
`TableModel.columns[index][row] = 5`
`TableModel[name][row] = 5`
'''
# MAGIC
@serialization.tojson
def __json__(self):
'''
Serialize data for object reconstruction to JSON
Returns (dict): serialized data
'''
return {
'attrs': [(k, v) for k, v in self.attrs.items()],
'visible': [(k, v) for k, v in self.visible.items()],
'data': dict(self)
}
# CLASS METHODS
@classmethod
def loadjson(cls, data):
'''
Deserialize JSON data into object constructor
Args:
data (dict, mapping): serialized object data
Returns (TableModel): class instance
'''
attrs = data['attrs']
visible = OrderedDict(data['visible'])
inst = cls(attrs, visible)
for key, value in data['data'].items():
cls[key] = value
return inst
# PROPERTIES
@property
def length(self):
return max(len(i) for i in self.values())
@property
def rows(self):
return range(self.length)
@property
def columns(self):
return self._columns
@property
def row_count(self):
return self.length
@property
def column_count(self):
return len(self.columns)
@property
def list(self):
return self._list
# PUBLIC
def iterrows(self, columns=None, use_attrs=False):
'''
Row-wise iterator for table data
Args:
columns (None, iterable): columns to return data from
'''
if columns is None and not use_attrs:
columns = list(self)
elif columns is None and use_attrs:
columns = self.columns
for row in self.rows:
if use_attrs:
values = (getattr(self, k)[row] for k in columns)
else:
values = (self[k][row] for k in columns)
yield OrderedDict(zip(columns, values)) | 0.892202 | 0.243238 |
import random
import string
# Calculate max fitness based on population
def calculate_max_fitness():
i = board_size - 1
max_fitness = 0
while i > 0:
max_fitness += i
i -= 1
return max_fitness
# Generate a random individual based on the board size
def random_individual(board_size):
return [ random.randint(1, board_size) for _ in range(board_size) ]
# Define the fitness of an individual based on the number of pairs of non-attacking queens
def fitness(individual):
horizontal_collisions = sum([individual.count(queen)-1 for queen in individual])/2
diagonal_collisions = 0
n = len(individual)
left_diagonal = [0] * 2*n
right_diagonal = [0] * 2*n
for i in range(n):
left_diagonal[i + individual[i] - 1] += 1
right_diagonal[len(individual) - i + individual[i] - 2] += 1
for i in range(2*n-1):
counter = 0
if left_diagonal[i] > 1:
counter += left_diagonal[i]-1
if right_diagonal[i] > 1:
counter += right_diagonal[i]-1
diagonal_collisions += counter / (n-abs(i-n+1))
return int(max_fitness - (horizontal_collisions + diagonal_collisions))
# Calculate the probability of an individual based on his fitness compared to max_fitness
def probability(individual, fitness):
return fitness(individual) / max_fitness
# Take a random individual based on population by probabilitie
def random_pick(population, probabilities):
populationWithProbabilty = zip(population, probabilities)
total = sum(w for c, w in populationWithProbabilty)
r = random.uniform(0, total)
upto = 0
for c, w in zip(population, probabilities):
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
# Return one new individual, his genes are from two individual x and y with a random with a random separator
def reproduce(x, y):
n = len(x)
#c = 4
c = random.randint(0, n - 1)
return x[0:c] + y[c:n]
# Return an individual with one random gene modified between 1 and n (board_size)
def mutate(x):
n = len(x)
c = random.randint(0, n - 1)
m = random.randint(1, n)
x[c] = m
return x
# Loop of creation of new populations with random individuals
def genetic_queen(population, fitness):
# Probability of mutation
new_population = []
probabilities = [probability(n, fitness) for n in population]
for i in range(len(population)):
x = random_pick(population, probabilities)
y = random_pick(population, probabilities)
child = reproduce(x, y)
if random.random() < mutation_probability:
child = mutate(child)
new_population.append(child)
if fitness(child) == max_fitness: break
return new_population
# Print one individual with his genes, his probability and his fitness score
def print_individual(x):
print("{}, fitness = {}, probability = {:.6f}"
.format(str(x), fitness(x), probability(x, fitness)))
# Asks the user for main variables
def get_settings():
while True:
try:
board_size = int(input("Please choose the board size (default 8): "))
if board_size > 3:
break;
else:
print("Please enter a number > 4.")
except ValueError:
print("Please enter a valid number.")
while True:
try:
max_pop = int(input("Please choose the size of population (default 500): "))
if max_pop > 3:
break;
else:
print("Please enter a number > 1.")
except ValueError:
print("Please enter a valid number.")
while True:
try:
mutation_probability = float(input("Please choose the mutation probability (default 0.05): "))
if mutation_probability > 0 and mutation_probability < 1:
break;
else:
print("Please enter a number > 0 and < 1.")
except ValueError:
print("Please enter a valid number.")
return board_size, max_pop, mutation_probability
# Main loop
if __name__ == "__main__":
board_size, max_pop, mutation_probability = get_settings()
max_fitness = calculate_max_fitness()
population = [random_individual(board_size) for _ in range(max_pop)]
generation = 1
while not max_fitness in [fitness(x) for x in population]:
population = genetic_queen(population, fitness)
print("Generation = " + str(generation) + " | Maximum fitness = {}".format(max([fitness(n) for n in population])))
generation += 1
print("Solved in Generation {}!".format(generation-1))
for x in population:
if fitness(x) == max_fitness:
print_individual(x) | Genetic_8QueensProblem.py |
import random
import string
# Calculate max fitness based on population
def calculate_max_fitness():
i = board_size - 1
max_fitness = 0
while i > 0:
max_fitness += i
i -= 1
return max_fitness
# Generate a random individual based on the board size
def random_individual(board_size):
return [ random.randint(1, board_size) for _ in range(board_size) ]
# Define the fitness of an individual based on the number of pairs of non-attacking queens
def fitness(individual):
horizontal_collisions = sum([individual.count(queen)-1 for queen in individual])/2
diagonal_collisions = 0
n = len(individual)
left_diagonal = [0] * 2*n
right_diagonal = [0] * 2*n
for i in range(n):
left_diagonal[i + individual[i] - 1] += 1
right_diagonal[len(individual) - i + individual[i] - 2] += 1
for i in range(2*n-1):
counter = 0
if left_diagonal[i] > 1:
counter += left_diagonal[i]-1
if right_diagonal[i] > 1:
counter += right_diagonal[i]-1
diagonal_collisions += counter / (n-abs(i-n+1))
return int(max_fitness - (horizontal_collisions + diagonal_collisions))
# Calculate the probability of an individual based on his fitness compared to max_fitness
def probability(individual, fitness):
return fitness(individual) / max_fitness
# Take a random individual based on population by probabilitie
def random_pick(population, probabilities):
populationWithProbabilty = zip(population, probabilities)
total = sum(w for c, w in populationWithProbabilty)
r = random.uniform(0, total)
upto = 0
for c, w in zip(population, probabilities):
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
# Return one new individual, his genes are from two individual x and y with a random with a random separator
def reproduce(x, y):
n = len(x)
#c = 4
c = random.randint(0, n - 1)
return x[0:c] + y[c:n]
# Return an individual with one random gene modified between 1 and n (board_size)
def mutate(x):
n = len(x)
c = random.randint(0, n - 1)
m = random.randint(1, n)
x[c] = m
return x
# Loop of creation of new populations with random individuals
def genetic_queen(population, fitness):
# Probability of mutation
new_population = []
probabilities = [probability(n, fitness) for n in population]
for i in range(len(population)):
x = random_pick(population, probabilities)
y = random_pick(population, probabilities)
child = reproduce(x, y)
if random.random() < mutation_probability:
child = mutate(child)
new_population.append(child)
if fitness(child) == max_fitness: break
return new_population
# Print one individual with his genes, his probability and his fitness score
def print_individual(x):
print("{}, fitness = {}, probability = {:.6f}"
.format(str(x), fitness(x), probability(x, fitness)))
# Asks the user for main variables
def get_settings():
while True:
try:
board_size = int(input("Please choose the board size (default 8): "))
if board_size > 3:
break;
else:
print("Please enter a number > 4.")
except ValueError:
print("Please enter a valid number.")
while True:
try:
max_pop = int(input("Please choose the size of population (default 500): "))
if max_pop > 3:
break;
else:
print("Please enter a number > 1.")
except ValueError:
print("Please enter a valid number.")
while True:
try:
mutation_probability = float(input("Please choose the mutation probability (default 0.05): "))
if mutation_probability > 0 and mutation_probability < 1:
break;
else:
print("Please enter a number > 0 and < 1.")
except ValueError:
print("Please enter a valid number.")
return board_size, max_pop, mutation_probability
# Main loop
if __name__ == "__main__":
board_size, max_pop, mutation_probability = get_settings()
max_fitness = calculate_max_fitness()
population = [random_individual(board_size) for _ in range(max_pop)]
generation = 1
while not max_fitness in [fitness(x) for x in population]:
population = genetic_queen(population, fitness)
print("Generation = " + str(generation) + " | Maximum fitness = {}".format(max([fitness(n) for n in population])))
generation += 1
print("Solved in Generation {}!".format(generation-1))
for x in population:
if fitness(x) == max_fitness:
print_individual(x) | 0.522933 | 0.605099 |
import os
import mock
import pytest
from ddtrace import Tracer
from ddtrace.internal import agent
AGENT_VERSION = os.environ.get("AGENT_VERSION")
class TestTraceAcceptedByAgent:
def test_simple_trace_accepted_by_agent(self):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root"):
for _ in range(999):
with tracer.trace("child"):
pass
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"tags",
[
({"env": "my-env", "tag1": "some_str_1", "tag2": "some_str_2", "tag3": "some_str_3"}),
({"env": "test-env", b"tag1": "some_str_1", b"tag2": "some_str_2", b"tag3": "some_str_3"}),
({"env": "my-test-env", u"😐": "some_str_1", b"tag2": "some_str_2", "unicode": u"😐"}),
],
)
def test_trace_with_meta_accepted_by_agent(self, tags):
"""Meta tags should be text types."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root", service="test_encoding", resource="test_resource") as root:
root.set_tags(tags)
for _ in range(999):
with tracer.trace("child") as child:
child.set_tags(tags)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"metrics",
[
({"num1": 12345, "num2": 53421, "num3": 1, "num4": 10}),
({b"num1": 123.45, b"num2": 543.21, b"num3": 11.0, b"num4": 1.20}),
({u"😐": "123.45", b"num2": "1", "num3": "999.99", "num4": "12345"}),
],
)
def test_trace_with_metrics_accepted_by_agent(self, metrics):
"""Metric tags should be numeric types - i.e. int, float, long (py3), and str numbers."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.set_metrics(metrics)
for _ in range(999):
with tracer.trace("child") as child:
child.set_metrics(metrics)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't return 400 response for bad trace")
class TestTraceRejectedByAgent:
def _assert_bad_trace_refused_by_agent(self, mock_log):
"""Assert that agent refused a bad trace via log call."""
calls = [
mock.call(
"failed to send traces to Datadog Agent at %s: HTTP error status %s, reason %s",
agent.get_trace_url(),
400,
"Bad Request",
)
]
mock_log.error.assert_has_calls(calls)
def test_wrong_span_name_type_refused_by_agent(self):
"""Span names should be a text type."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace(123):
pass
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log)
@pytest.mark.parametrize(
"meta",
[
({"env": "my-env", "tag1": "some_str_1", "tag2": "some_str_2", "tag3": [1, 2, 3]}),
({"env": "test-env", b"tag1": {"wrong_type": True}, b"tag2": "some_str_2", b"tag3": "some_str_3"}),
({"env": "my-test-env", u"😐": "some_str_1", b"tag2": "some_str_2", "unicode": 12345}),
],
)
def test_trace_with_wrong_meta_types_refused_by_agent(self, meta):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.meta = meta
for _ in range(499):
with tracer.trace("child") as child:
child.meta = meta
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log)
@pytest.mark.parametrize(
"metrics",
[
({"num1": 12345, "num2": 53421, "num3": 1, "num4": "not-a-number"}),
({b"num1": 123.45, b"num2": [1, 2, 3], b"num3": 11.0, b"num4": 1.20}),
({u"😐": "123.45", b"num2": "1", "num3": {"is_number": False}, "num4": "12345"}),
],
)
def test_trace_with_wrong_metrics_types_refused_by_agent(self, metrics):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.metrics = metrics
for _ in range(499):
with tracer.trace("child") as child:
child.metrics = metrics
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log) | tests/integration/test_encoding.py | import os
import mock
import pytest
from ddtrace import Tracer
from ddtrace.internal import agent
AGENT_VERSION = os.environ.get("AGENT_VERSION")
class TestTraceAcceptedByAgent:
def test_simple_trace_accepted_by_agent(self):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root"):
for _ in range(999):
with tracer.trace("child"):
pass
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"tags",
[
({"env": "my-env", "tag1": "some_str_1", "tag2": "some_str_2", "tag3": "some_str_3"}),
({"env": "test-env", b"tag1": "some_str_1", b"tag2": "some_str_2", b"tag3": "some_str_3"}),
({"env": "my-test-env", u"😐": "some_str_1", b"tag2": "some_str_2", "unicode": u"😐"}),
],
)
def test_trace_with_meta_accepted_by_agent(self, tags):
"""Meta tags should be text types."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root", service="test_encoding", resource="test_resource") as root:
root.set_tags(tags)
for _ in range(999):
with tracer.trace("child") as child:
child.set_tags(tags)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"metrics",
[
({"num1": 12345, "num2": 53421, "num3": 1, "num4": 10}),
({b"num1": 123.45, b"num2": 543.21, b"num3": 11.0, b"num4": 1.20}),
({u"😐": "123.45", b"num2": "1", "num3": "999.99", "num4": "12345"}),
],
)
def test_trace_with_metrics_accepted_by_agent(self, metrics):
"""Metric tags should be numeric types - i.e. int, float, long (py3), and str numbers."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.set_metrics(metrics)
for _ in range(999):
with tracer.trace("child") as child:
child.set_metrics(metrics)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't return 400 response for bad trace")
class TestTraceRejectedByAgent:
def _assert_bad_trace_refused_by_agent(self, mock_log):
"""Assert that agent refused a bad trace via log call."""
calls = [
mock.call(
"failed to send traces to Datadog Agent at %s: HTTP error status %s, reason %s",
agent.get_trace_url(),
400,
"Bad Request",
)
]
mock_log.error.assert_has_calls(calls)
def test_wrong_span_name_type_refused_by_agent(self):
"""Span names should be a text type."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace(123):
pass
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log)
@pytest.mark.parametrize(
"meta",
[
({"env": "my-env", "tag1": "some_str_1", "tag2": "some_str_2", "tag3": [1, 2, 3]}),
({"env": "test-env", b"tag1": {"wrong_type": True}, b"tag2": "some_str_2", b"tag3": "some_str_3"}),
({"env": "my-test-env", u"😐": "some_str_1", b"tag2": "some_str_2", "unicode": 12345}),
],
)
def test_trace_with_wrong_meta_types_refused_by_agent(self, meta):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.meta = meta
for _ in range(499):
with tracer.trace("child") as child:
child.meta = meta
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log)
@pytest.mark.parametrize(
"metrics",
[
({"num1": 12345, "num2": 53421, "num3": 1, "num4": "not-a-number"}),
({b"num1": 123.45, b"num2": [1, 2, 3], b"num3": 11.0, b"num4": 1.20}),
({u"😐": "123.45", b"num2": "1", "num3": {"is_number": False}, "num4": "12345"}),
],
)
def test_trace_with_wrong_metrics_types_refused_by_agent(self, metrics):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with tracer.trace("root") as root:
root.metrics = metrics
for _ in range(499):
with tracer.trace("child") as child:
child.metrics = metrics
tracer.shutdown()
self._assert_bad_trace_refused_by_agent(log) | 0.618896 | 0.343369 |
from __future__ import absolute_import
from harness.test_base_remote import TestBaseRemote
from harness.decorators import (
cpp_only_test,
ordered_test
)
class TestBreakpointFileLineMultipleRSFiles(TestBaseRemote):
'''Tests the setting of a breakpoint on one of multiple RS files.'''
bundle_target = {
'java': 'MultipleRSFiles',
'jni': 'JNIMultipleRSFiles',
'cpp': 'CppMultipleRSFiles'
}
def _binary_name(self):
return {
'java': 'multiplersfiles',
'jni': 'multiplersfiles',
'cpp': 'CppMultipleRSFi'
}[self.app_type]
@ordered_test(0)
def test_breakpoint_fileline_multiple_files(self):
self.try_command('language renderscript status',
['Runtime Library discovered',
'Runtime Driver discovered'])
self.try_command('breakpoint set --file first.rs --line 28',
['(pending)'])
self.try_command('process continue',
['stopped',
'librs.first.so`first_kernel',
'at first.rs:28',
"name = '%s'" % self._binary_name(),
'stop reason = breakpoint 1'])
self.try_command('breakpoint set --file second.rs --line 23',
['Breakpoint 2',
'librs.second.so`second_kernel',
'second.rs:23'])
self.try_command('breakpoint list',
['first.rs',
'second.rs',
'resolved = 1',
'first.rs:28',
'second.rs:23'])
self.try_command('breakpoint delete 1',
['1 breakpoints deleted'])
self.try_command('process continue',
['stopped',
'librs.second.so`second_kernel',
'at second.rs:23',
"name = '%s'" % self._binary_name(),
'stop reason = breakpoint 2'])
self.try_command('process status',
['stopped',
'stop reason = breakpoint'])
@cpp_only_test()
@ordered_test('last')
def test_cpp_cleanup(self):
self.try_command('breakpoint delete 2', ['1 breakpoints deleted'])
self.try_command('process continue', ['exited with status = 0']) | android/android_9/frameworks/rs/tests/lldb/tests/testcases/test_breakpoint_fileline_multiple_rs_files.py | from __future__ import absolute_import
from harness.test_base_remote import TestBaseRemote
from harness.decorators import (
cpp_only_test,
ordered_test
)
class TestBreakpointFileLineMultipleRSFiles(TestBaseRemote):
'''Tests the setting of a breakpoint on one of multiple RS files.'''
bundle_target = {
'java': 'MultipleRSFiles',
'jni': 'JNIMultipleRSFiles',
'cpp': 'CppMultipleRSFiles'
}
def _binary_name(self):
return {
'java': 'multiplersfiles',
'jni': 'multiplersfiles',
'cpp': 'CppMultipleRSFi'
}[self.app_type]
@ordered_test(0)
def test_breakpoint_fileline_multiple_files(self):
self.try_command('language renderscript status',
['Runtime Library discovered',
'Runtime Driver discovered'])
self.try_command('breakpoint set --file first.rs --line 28',
['(pending)'])
self.try_command('process continue',
['stopped',
'librs.first.so`first_kernel',
'at first.rs:28',
"name = '%s'" % self._binary_name(),
'stop reason = breakpoint 1'])
self.try_command('breakpoint set --file second.rs --line 23',
['Breakpoint 2',
'librs.second.so`second_kernel',
'second.rs:23'])
self.try_command('breakpoint list',
['first.rs',
'second.rs',
'resolved = 1',
'first.rs:28',
'second.rs:23'])
self.try_command('breakpoint delete 1',
['1 breakpoints deleted'])
self.try_command('process continue',
['stopped',
'librs.second.so`second_kernel',
'at second.rs:23',
"name = '%s'" % self._binary_name(),
'stop reason = breakpoint 2'])
self.try_command('process status',
['stopped',
'stop reason = breakpoint'])
@cpp_only_test()
@ordered_test('last')
def test_cpp_cleanup(self):
self.try_command('breakpoint delete 2', ['1 breakpoints deleted'])
self.try_command('process continue', ['exited with status = 0']) | 0.547948 | 0.184437 |
import numpy as np
from sklearn.ensemble.forest import BaseForest
from sklearn.tree.tree import BaseDecisionTree
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.tree._tree import DTYPE
from sklearn.utils import check_X_y
class AdaBoostAbstainingClassifier(AdaBoostClassifier):
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostAbstainingClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
algorithm=algorithm,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse,
force_all_finite = False, dtype=dtype)
if sample_weight is None:
abstainrows = np.isnan(X).sum(axis=1) > 0
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1.
sample_weight[abstainrows] = 0.
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
def _boost_real(self, iboost, X, y, sample_weight):
raise NotImplementedError
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
abstaining = np.isnan(X).sum(axis=1) > 0
X_trans = X[~abstaining]
y_trans = y[~abstaining]
sw_trans = sample_weight[~abstaining]
estimator.fit(X_trans, y_trans, sample_weight=sw_trans)
y_predict = estimator.predict(X_trans)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances classified
incorrect = np.zeros(y.shape)
incorrect[~abstaining] = y_predict != y_trans
correct = np.zeros(y.shape)
correct[~abstaining] = y_predict == y_trans
n_classes = self.n_classes_
# incorrect, correct, abstaining weights
Wi = np.sum(incorrect) / X.shape[0]
Wc = np.sum(correct) / X.shape[0]
Wa = np.sum(abstaining) / X.shape[0]
Z = Wa + 2. * np.sqrt(Wc/Wi)
# Stop if classification is perfect
if estimator_error <= 0 and abstaining.sum() == 0:
return sample_weight, 1., 0.
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log(Wc/Wi) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error | rain/adaboost.py | import numpy as np
from sklearn.ensemble.forest import BaseForest
from sklearn.tree.tree import BaseDecisionTree
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.tree._tree import DTYPE
from sklearn.utils import check_X_y
class AdaBoostAbstainingClassifier(AdaBoostClassifier):
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostAbstainingClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
algorithm=algorithm,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse,
force_all_finite = False, dtype=dtype)
if sample_weight is None:
abstainrows = np.isnan(X).sum(axis=1) > 0
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1.
sample_weight[abstainrows] = 0.
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
def _boost_real(self, iboost, X, y, sample_weight):
raise NotImplementedError
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
abstaining = np.isnan(X).sum(axis=1) > 0
X_trans = X[~abstaining]
y_trans = y[~abstaining]
sw_trans = sample_weight[~abstaining]
estimator.fit(X_trans, y_trans, sample_weight=sw_trans)
y_predict = estimator.predict(X_trans)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances classified
incorrect = np.zeros(y.shape)
incorrect[~abstaining] = y_predict != y_trans
correct = np.zeros(y.shape)
correct[~abstaining] = y_predict == y_trans
n_classes = self.n_classes_
# incorrect, correct, abstaining weights
Wi = np.sum(incorrect) / X.shape[0]
Wc = np.sum(correct) / X.shape[0]
Wa = np.sum(abstaining) / X.shape[0]
Z = Wa + 2. * np.sqrt(Wc/Wi)
# Stop if classification is perfect
if estimator_error <= 0 and abstaining.sum() == 0:
return sample_weight, 1., 0.
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log(Wc/Wi) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error | 0.894525 | 0.548129 |
import requests
import re
import webbrowser
# Directions API / Static Maps API Documentation
# https://developers.google.com/maps/documentation/directions/overview
# https://developers.google.com/maps/documentation/maps-static/overview
class Coordinates:
def __init__(self, origin, destination):
'''Setup attributes, including API urls'''
self.origin = origin.replace(' ' ,'+')
self.destination = destination.replace(' ','+')
self.key = "API Key"
self.directions = "https://maps.googleapis.com/maps/api/directions/json?"
self.maps_static = "https://maps.googleapis.com/maps/api/staticmap?"
self.gps_coord_pairs = []
def return_coordinates(self):
'''Returns coordinates for a given route based on 'steps' on google maps
Ex: 'Turn left at this intersection' = gps coordinate'''
# Combine parameters with Directions API URL
coordinates = ('origin={}&destination={}&mode=walking&key={}'.
format(self.origin, self.destination, self.key)
)
request_1 = self.directions + coordinates
response_1 = requests.get(request_1)
directions = response_1.json()
# Get coordinates list (of dictionaries)
self.gps_coord = []
for i in directions['routes'][0]['legs'][0]['steps']:
self.gps_coord.append(i['start_location'])
# Turn into list of lists (Lat/Lng pairs)
self.gps_coord_pairs = []
for dictionary in self.gps_coord:
pair = list(dictionary.values())
self.gps_coord_pairs.append(pair)
return self.gps_coord_pairs
def split_coord(self):
'''Separates GPS pairs into two lists for Lat and Lng'''
lat_list = [i[0] for i in self.gps_coord_pairs]
lng_list = [i[1] for i in self.gps_coord_pairs]
print(lat_list, lng_list, sep='\n')
'''
This method is for future purposes.
The goal here is to create a bounding box for a given route by
calculating gps location in relation to other pairs in the route (N vs S vs W vs E)
def box_seg_1(self):
#Determine if x or y axis segment (N/S or E/W running bounding box)
lat_dif_1 = [lat_list[0] - lat_list[1]]
lng_dif_1 = [lng_list[0] - lng_list[1]]
OFFSET = 0.00006 # OFFSET allows for 13.4 meter wide box along axis (6.7m both sides of road)
if lat_dif_1 > lng_dif_1: # greater differnece in latitude indicate x-axis (north-south)
bb_pt1 = lat_list[0], lng_list[0] + OFFSET
bb_pt2 = lat_list[0], lng_list[0] - OFFSET
bb_pt3 = lat_list[1], lng_list[1] + OFFSET
bb_pt4 = lat_list[1], lng_list[1] - OFFSET
else: # greater difference in longitude indicates y-axis (east-west)
bb_pt1 = lat_list[0] + OFFSET, lng_list[0]
bb_pt2 = lat_list[0] - OFFSET, lng_list[0]
bb_pt3 = lat_list[1] + OFFSET, lng_list[1]
bb_pt4 = lat_list[1] - OFFSET, lng_list[1]
# Create bb_seg1 to set as static map poly-line parameters
bb_seg1 = bb_pt1, bb_pt2, bb_pt3, bb_pt4
print(bb_seg1)
#Need to build in loop function to perform on multiple legs of route
'''
def return_image(self):
'''Returns an image of the route produced from given origin/destination'''
# Clean gps_coord to use as parameter
gps_string = str(self.gps_coord)
gps_clean = gps_string.replace('},', '|')
coordinates_path_list = re.sub(r'[{}\'latng: \[\]]', '', gps_clean)
# Default Maps Static API parameters
size = '500x500'
scale = '2'
image_format = 'jpg'
maptype = 'satellite'
route_style = 'color:0xffffff50|weight:5|'
# Combine parameters with Maps Static API URL
parameters = ('size={}&scale={}&format={}&maptype={}&path={}{}&key={}'.
format(size, scale, image_format, maptype,
route_style, coordinates_path_list, self.key)
)
# Display Image in URL
request_2 = self.maps_static + parameters
webbrowser.open(request_2, new=0, autoraise=True) | google_apis.py | import requests
import re
import webbrowser
# Directions API / Static Maps API Documentation
# https://developers.google.com/maps/documentation/directions/overview
# https://developers.google.com/maps/documentation/maps-static/overview
class Coordinates:
def __init__(self, origin, destination):
'''Setup attributes, including API urls'''
self.origin = origin.replace(' ' ,'+')
self.destination = destination.replace(' ','+')
self.key = "API Key"
self.directions = "https://maps.googleapis.com/maps/api/directions/json?"
self.maps_static = "https://maps.googleapis.com/maps/api/staticmap?"
self.gps_coord_pairs = []
def return_coordinates(self):
'''Returns coordinates for a given route based on 'steps' on google maps
Ex: 'Turn left at this intersection' = gps coordinate'''
# Combine parameters with Directions API URL
coordinates = ('origin={}&destination={}&mode=walking&key={}'.
format(self.origin, self.destination, self.key)
)
request_1 = self.directions + coordinates
response_1 = requests.get(request_1)
directions = response_1.json()
# Get coordinates list (of dictionaries)
self.gps_coord = []
for i in directions['routes'][0]['legs'][0]['steps']:
self.gps_coord.append(i['start_location'])
# Turn into list of lists (Lat/Lng pairs)
self.gps_coord_pairs = []
for dictionary in self.gps_coord:
pair = list(dictionary.values())
self.gps_coord_pairs.append(pair)
return self.gps_coord_pairs
def split_coord(self):
'''Separates GPS pairs into two lists for Lat and Lng'''
lat_list = [i[0] for i in self.gps_coord_pairs]
lng_list = [i[1] for i in self.gps_coord_pairs]
print(lat_list, lng_list, sep='\n')
'''
This method is for future purposes.
The goal here is to create a bounding box for a given route by
calculating gps location in relation to other pairs in the route (N vs S vs W vs E)
def box_seg_1(self):
#Determine if x or y axis segment (N/S or E/W running bounding box)
lat_dif_1 = [lat_list[0] - lat_list[1]]
lng_dif_1 = [lng_list[0] - lng_list[1]]
OFFSET = 0.00006 # OFFSET allows for 13.4 meter wide box along axis (6.7m both sides of road)
if lat_dif_1 > lng_dif_1: # greater differnece in latitude indicate x-axis (north-south)
bb_pt1 = lat_list[0], lng_list[0] + OFFSET
bb_pt2 = lat_list[0], lng_list[0] - OFFSET
bb_pt3 = lat_list[1], lng_list[1] + OFFSET
bb_pt4 = lat_list[1], lng_list[1] - OFFSET
else: # greater difference in longitude indicates y-axis (east-west)
bb_pt1 = lat_list[0] + OFFSET, lng_list[0]
bb_pt2 = lat_list[0] - OFFSET, lng_list[0]
bb_pt3 = lat_list[1] + OFFSET, lng_list[1]
bb_pt4 = lat_list[1] - OFFSET, lng_list[1]
# Create bb_seg1 to set as static map poly-line parameters
bb_seg1 = bb_pt1, bb_pt2, bb_pt3, bb_pt4
print(bb_seg1)
#Need to build in loop function to perform on multiple legs of route
'''
def return_image(self):
'''Returns an image of the route produced from given origin/destination'''
# Clean gps_coord to use as parameter
gps_string = str(self.gps_coord)
gps_clean = gps_string.replace('},', '|')
coordinates_path_list = re.sub(r'[{}\'latng: \[\]]', '', gps_clean)
# Default Maps Static API parameters
size = '500x500'
scale = '2'
image_format = 'jpg'
maptype = 'satellite'
route_style = 'color:0xffffff50|weight:5|'
# Combine parameters with Maps Static API URL
parameters = ('size={}&scale={}&format={}&maptype={}&path={}{}&key={}'.
format(size, scale, image_format, maptype,
route_style, coordinates_path_list, self.key)
)
# Display Image in URL
request_2 = self.maps_static + parameters
webbrowser.open(request_2, new=0, autoraise=True) | 0.599954 | 0.287605 |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
SEED = 42
tf.set_random_seed(SEED)
class GAN():
def sample_Z(self, batch_size, n):
return np.random.uniform(-1., 1., size=(batch_size, n))
def __init__(self, num_features, num_historical_days, generator_input_size=200, is_train=True):
def get_batch_norm_with_global_normalization_vars(size):
v = tf.Variable(tf.ones([size]), dtype=tf.float32)
m = tf.Variable(tf.ones([size]), dtype=tf.float32)
beta = tf.Variable(tf.ones([size]), dtype=tf.float32)
gamma = tf.Variable(tf.ones([size]), dtype=tf.float32)
return v, m, beta, gamma
self.X = tf.placeholder(tf.float32, shape=[None, num_historical_days, num_features])
X = tf.reshape(self.X, [-1, num_historical_days, 1, num_features])
self.Z = tf.placeholder(tf.float32, shape=[None, generator_input_size])
generator_output_size = num_features*num_historical_days
with tf.variable_scope("generator"):
W1 = tf.Variable(tf.truncated_normal([generator_input_size, generator_output_size*10]))
b1 = tf.Variable(tf.truncated_normal([generator_output_size*10]))
h1 = tf.nn.sigmoid(tf.matmul(self.Z, W1) + b1)
# v1, m1, beta1, gamma1 = get_batch_norm_with_global_normalization_vars(generator_output_size*10)
# h1 = tf.nn.batch_norm_with_global_normalization(h1, v1, m1,
# beta1, gamma1, variance_epsilon=0.000001, scale_after_normalization=False)
W2 = tf.Variable(tf.truncated_normal([generator_output_size*10, generator_output_size*5]))
b2 = tf.Variable(tf.truncated_normal([generator_output_size*5]))
h2 = tf.nn.sigmoid(tf.matmul(h1, W2) + b2)
# v2, m2, beta2, gamma2 = get_batch_norm_with_global_normalization_vars(generator_output_size*5)
# h2 = tf.nn.batch_norm_with_global_normalization(h2, v2, m2,
# beta2, gamma2, variance_epsilon=0.000001, scale_after_normalization=False)
W3 = tf.Variable(tf.truncated_normal([generator_output_size*5, generator_output_size]))
b3 = tf.Variable(tf.truncated_normal([generator_output_size]))
g_log_prob = tf.matmul(h2, W3) + b3
g_log_prob = tf.reshape(g_log_prob, [-1, num_historical_days, 1, num_features])
self.gen_data = tf.reshape(g_log_prob, [-1, num_historical_days, num_features])
#g_log_prob = g_log_prob / tf.reshape(tf.reduce_max(g_log_prob, axis=1), [-1, 1, num_features, 1])
#g_prob = tf.nn.sigmoid(g_log_prob)
theta_G = [W1, b1, W2, b2, W3, b3]
with tf.variable_scope("discriminator"):
#[filter_height, filter_width, in_channels, out_channels]
k1 = tf.Variable(tf.truncated_normal([3, 1, num_features, 32],
stddev=0.1,seed=SEED, dtype=tf.float32))
b1 = tf.Variable(tf.zeros([32], dtype=tf.float32))
v1, m1, beta1, gamma1 = get_batch_norm_with_global_normalization_vars(32)
k2 = tf.Variable(tf.truncated_normal([3, 1, 32, 64],
stddev=0.1,seed=SEED, dtype=tf.float32))
b2 = tf.Variable(tf.zeros([64], dtype=tf.float32))
v2, m2, beta2, gamma2 = get_batch_norm_with_global_normalization_vars(64)
k3 = tf.Variable(tf.truncated_normal([3, 1, 64, 128],
stddev=0.1,seed=SEED, dtype=tf.float32))
b3 = tf.Variable(tf.zeros([128], dtype=tf.float32))
v3, m3, beta3, gamma3 = get_batch_norm_with_global_normalization_vars(128)
W1 = tf.Variable(tf.truncated_normal([18*1*128, 128]))
b4 = tf.Variable(tf.truncated_normal([128]))
v4, m4, beta4, gamma4 = get_batch_norm_with_global_normalization_vars(128)
W2 = tf.Variable(tf.truncated_normal([128, 1]))
theta_D = [k1, b1, k2, b2, k3, b3, W1, b4, W2]
def discriminator(X):
conv = tf.nn.conv2d(X,k1,strides=[1, 1, 1, 1],padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, b1))
pool = relu
# pool = tf.nn.avg_pool(relu, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
if is_train:
pool = tf.nn.dropout(pool, keep_prob = 0.8)
# pool = tf.nn.batch_norm_with_global_normalization(pool, v1, m1,
# beta1, gamma1, variance_epsilon=0.000001, scale_after_normalization=False)
print(pool)
conv = tf.nn.conv2d(pool, k2,strides=[1, 1, 1, 1],padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, b2))
pool = relu
#pool = tf.nn.avg_pool(relu, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
if is_train:
pool = tf.nn.dropout(pool, keep_prob = 0.8)
# pool = tf.nn.batch_norm_with_global_normalization(pool, v2, m2,
# beta2, gamma2, variance_epsilon=0.000001, scale_after_normalization=False)
print(pool)
conv = tf.nn.conv2d(pool, k3, strides=[1, 1, 1, 1], padding='VALID')
relu = tf.nn.relu(tf.nn.bias_add(conv, b3))
if is_train:
relu = tf.nn.dropout(relu, keep_prob=0.8)
# relu = tf.nn.batch_norm_with_global_normalization(relu, v3, m3,
# beta3, gamma3, variance_epsilon=0.000001, scale_after_normalization=False)
print(relu)
flattened_convolution_size = int(relu.shape[1]) * int(relu.shape[2]) * int(relu.shape[3])
print(flattened_convolution_size)
flattened_convolution = features = tf.reshape(relu, [-1, flattened_convolution_size])
if is_train:
flattened_convolution = tf.nn.dropout(flattened_convolution, keep_prob=0.8)
h1 = tf.nn.relu(tf.matmul(flattened_convolution, W1) + b4)
# h1 = tf.nn.batch_norm_with_global_normalization(h1, v4, m4,
# beta4, gamma4, variance_epsilon=0.000001, scale_after_normalization=False)
D_logit = tf.matmul(h1, W2)
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit, features
D_real, D_logit_real, self.features = discriminator(X)
D_fake, D_logit_fake, _ = discriminator(g_log_prob)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
self.D_l2_loss = (0.0001 * tf.add_n([tf.nn.l2_loss(t) for t in theta_D]) / len(theta_D))
self.D_loss = D_loss_real + D_loss_fake + self.D_l2_loss
self.G_l2_loss = (0.00001 * tf.add_n([tf.nn.l2_loss(t) for t in theta_G]) / len(theta_G))
self.G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake))) + self.G_l2_loss
self.D_solver = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.D_loss, var_list=theta_D)
self.G_solver = tf.train.AdamOptimizer(learning_rate=0.000055).minimize(self.G_loss, var_list=theta_G) | StockMarketGAN/gan.py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
SEED = 42
tf.set_random_seed(SEED)
class GAN():
def sample_Z(self, batch_size, n):
return np.random.uniform(-1., 1., size=(batch_size, n))
def __init__(self, num_features, num_historical_days, generator_input_size=200, is_train=True):
def get_batch_norm_with_global_normalization_vars(size):
v = tf.Variable(tf.ones([size]), dtype=tf.float32)
m = tf.Variable(tf.ones([size]), dtype=tf.float32)
beta = tf.Variable(tf.ones([size]), dtype=tf.float32)
gamma = tf.Variable(tf.ones([size]), dtype=tf.float32)
return v, m, beta, gamma
self.X = tf.placeholder(tf.float32, shape=[None, num_historical_days, num_features])
X = tf.reshape(self.X, [-1, num_historical_days, 1, num_features])
self.Z = tf.placeholder(tf.float32, shape=[None, generator_input_size])
generator_output_size = num_features*num_historical_days
with tf.variable_scope("generator"):
W1 = tf.Variable(tf.truncated_normal([generator_input_size, generator_output_size*10]))
b1 = tf.Variable(tf.truncated_normal([generator_output_size*10]))
h1 = tf.nn.sigmoid(tf.matmul(self.Z, W1) + b1)
# v1, m1, beta1, gamma1 = get_batch_norm_with_global_normalization_vars(generator_output_size*10)
# h1 = tf.nn.batch_norm_with_global_normalization(h1, v1, m1,
# beta1, gamma1, variance_epsilon=0.000001, scale_after_normalization=False)
W2 = tf.Variable(tf.truncated_normal([generator_output_size*10, generator_output_size*5]))
b2 = tf.Variable(tf.truncated_normal([generator_output_size*5]))
h2 = tf.nn.sigmoid(tf.matmul(h1, W2) + b2)
# v2, m2, beta2, gamma2 = get_batch_norm_with_global_normalization_vars(generator_output_size*5)
# h2 = tf.nn.batch_norm_with_global_normalization(h2, v2, m2,
# beta2, gamma2, variance_epsilon=0.000001, scale_after_normalization=False)
W3 = tf.Variable(tf.truncated_normal([generator_output_size*5, generator_output_size]))
b3 = tf.Variable(tf.truncated_normal([generator_output_size]))
g_log_prob = tf.matmul(h2, W3) + b3
g_log_prob = tf.reshape(g_log_prob, [-1, num_historical_days, 1, num_features])
self.gen_data = tf.reshape(g_log_prob, [-1, num_historical_days, num_features])
#g_log_prob = g_log_prob / tf.reshape(tf.reduce_max(g_log_prob, axis=1), [-1, 1, num_features, 1])
#g_prob = tf.nn.sigmoid(g_log_prob)
theta_G = [W1, b1, W2, b2, W3, b3]
with tf.variable_scope("discriminator"):
#[filter_height, filter_width, in_channels, out_channels]
k1 = tf.Variable(tf.truncated_normal([3, 1, num_features, 32],
stddev=0.1,seed=SEED, dtype=tf.float32))
b1 = tf.Variable(tf.zeros([32], dtype=tf.float32))
v1, m1, beta1, gamma1 = get_batch_norm_with_global_normalization_vars(32)
k2 = tf.Variable(tf.truncated_normal([3, 1, 32, 64],
stddev=0.1,seed=SEED, dtype=tf.float32))
b2 = tf.Variable(tf.zeros([64], dtype=tf.float32))
v2, m2, beta2, gamma2 = get_batch_norm_with_global_normalization_vars(64)
k3 = tf.Variable(tf.truncated_normal([3, 1, 64, 128],
stddev=0.1,seed=SEED, dtype=tf.float32))
b3 = tf.Variable(tf.zeros([128], dtype=tf.float32))
v3, m3, beta3, gamma3 = get_batch_norm_with_global_normalization_vars(128)
W1 = tf.Variable(tf.truncated_normal([18*1*128, 128]))
b4 = tf.Variable(tf.truncated_normal([128]))
v4, m4, beta4, gamma4 = get_batch_norm_with_global_normalization_vars(128)
W2 = tf.Variable(tf.truncated_normal([128, 1]))
theta_D = [k1, b1, k2, b2, k3, b3, W1, b4, W2]
def discriminator(X):
conv = tf.nn.conv2d(X,k1,strides=[1, 1, 1, 1],padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, b1))
pool = relu
# pool = tf.nn.avg_pool(relu, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
if is_train:
pool = tf.nn.dropout(pool, keep_prob = 0.8)
# pool = tf.nn.batch_norm_with_global_normalization(pool, v1, m1,
# beta1, gamma1, variance_epsilon=0.000001, scale_after_normalization=False)
print(pool)
conv = tf.nn.conv2d(pool, k2,strides=[1, 1, 1, 1],padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, b2))
pool = relu
#pool = tf.nn.avg_pool(relu, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
if is_train:
pool = tf.nn.dropout(pool, keep_prob = 0.8)
# pool = tf.nn.batch_norm_with_global_normalization(pool, v2, m2,
# beta2, gamma2, variance_epsilon=0.000001, scale_after_normalization=False)
print(pool)
conv = tf.nn.conv2d(pool, k3, strides=[1, 1, 1, 1], padding='VALID')
relu = tf.nn.relu(tf.nn.bias_add(conv, b3))
if is_train:
relu = tf.nn.dropout(relu, keep_prob=0.8)
# relu = tf.nn.batch_norm_with_global_normalization(relu, v3, m3,
# beta3, gamma3, variance_epsilon=0.000001, scale_after_normalization=False)
print(relu)
flattened_convolution_size = int(relu.shape[1]) * int(relu.shape[2]) * int(relu.shape[3])
print(flattened_convolution_size)
flattened_convolution = features = tf.reshape(relu, [-1, flattened_convolution_size])
if is_train:
flattened_convolution = tf.nn.dropout(flattened_convolution, keep_prob=0.8)
h1 = tf.nn.relu(tf.matmul(flattened_convolution, W1) + b4)
# h1 = tf.nn.batch_norm_with_global_normalization(h1, v4, m4,
# beta4, gamma4, variance_epsilon=0.000001, scale_after_normalization=False)
D_logit = tf.matmul(h1, W2)
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit, features
D_real, D_logit_real, self.features = discriminator(X)
D_fake, D_logit_fake, _ = discriminator(g_log_prob)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
self.D_l2_loss = (0.0001 * tf.add_n([tf.nn.l2_loss(t) for t in theta_D]) / len(theta_D))
self.D_loss = D_loss_real + D_loss_fake + self.D_l2_loss
self.G_l2_loss = (0.00001 * tf.add_n([tf.nn.l2_loss(t) for t in theta_G]) / len(theta_G))
self.G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake))) + self.G_l2_loss
self.D_solver = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.D_loss, var_list=theta_D)
self.G_solver = tf.train.AdamOptimizer(learning_rate=0.000055).minimize(self.G_loss, var_list=theta_G) | 0.68458 | 0.443239 |
import re
import time
import urllib
import random
import urlparse
from collections import namedtuple
from zitkino import log
from zitkino import http
from zitkino import parsers
from zitkino.models import Film
from . import BaseFilmID, BaseFilmService
class CsfdFilmID(BaseFilmID):
url_re = re.compile(r'/film/(\d+)')
FilmOrigin = namedtuple('FilmOrigin', ['year', 'length'])
FilmTitles = namedtuple('FilmTitles', ['main', 'orig', 'others'])
class CsfdSession(http.Session):
"""Deals with various ČSFD's network issues and eventually
tries to perform the same requests again.
"""
max_redirects = 5
default_referer = 'http://www.zitkino.cz'
def wait(self):
seconds = random.randrange(1, 5, 1)
log.debug('HTTP: Waiting for %d seconds.', seconds)
time.sleep(seconds)
def request(self, *args, **kwargs):
# set default referer - without this ČSFD can return an infinite loop
# of HTTP 302 redirects
headers = kwargs.get('headers', {})
headers['Referer'] = self.default_referer
kwargs['headers'] = headers
try:
self.wait()
return super(CsfdSession, self).request(*args, **kwargs)
except http.TooManyRedirects:
log.debug('HTTP: Too many redirects. Retrying.')
return self.request(*args, **kwargs)
except http.HTTPError as e:
if e.response.status_code in (502, 403):
log.debug('HTTP: 502 or 403 status code. Retrying.')
return self.request(*args, **kwargs)
raise
class CsfdFilmService(BaseFilmService):
name = u'ČSFD'
url_attr = 'url_csfd'
session_cls = CsfdSession
year_re = re.compile(r'(\d{4})')
length_re = re.compile(r'(\d+)\s*min')
def search(self, titles, year=None):
year = int(year) if year else None
for title in titles:
resp = self.session.get(
'http://www.csfd.cz/hledat/complete-films/?q='
+ urllib.quote_plus(unicode(title).encode('utf-8'))
)
# direct redirect to the film page
try:
CsfdFilmID.from_url(resp.url)
except ValueError:
pass
else:
return self.lookup(resp.url)
# results page
html = parsers.html(resp.content, base_url=resp.url)
results = self._iterparse_search_results(html, year)
for result in results:
if self._match_names(title, self._parse_matched_title(result)):
return self.lookup(self._parse_film_url(result))
return None # there is no match
def _iterparse_search_results(self, html, year=None):
for result in html.cssselect('#search-films .content li'):
if year:
# check year
year_el = result.cssselect_first('.film-year')
if year_el is not None:
year_text = year_el.text_content()
else:
year_text = result.cssselect_first('p').text_content()
if year != int(self.year_re.search(year_text).group(1)):
continue # skip this result
yield result
def _parse_matched_title(self, result):
title_el = result.cssselect_first('.search-name')
if title_el is not None:
return title_el.text_content().lstrip('(').rstrip(')')
return result.cssselect('.film')[0].text_content()
def _parse_film_url(self, result):
result.make_links_absolute()
return result.cssselect_first('.film').get('href')
def lookup(self, url):
try:
resp = self.session.get(url)
except http.HTTPError as e:
if e.response.status_code == 404:
return None # there is no match
raise
html = parsers.html(resp.content, base_url=resp.url)
titles = self._parse_titles(html)
origin = self._parse_origin(html)
return Film(
url_csfd=resp.url,
url_imdb=self._parse_imdb_url(html),
title_main=titles.main,
title_orig=titles.orig,
titles_search=titles.others,
year=origin.year,
directors=list(self._iterparse_directors(html)),
length=origin.length,
rating_csfd=self._parse_rating(html),
url_posters=[self._parse_poster_url(html)],
)
def _parse_imdb_url(self, html):
imdb_img = html.cssselect_first('.links img.imdb')
if imdb_img is not None:
imdb_a = next(imdb_img.iterancestors(tag='a'))
return imdb_a.get('href')
return None
def _parse_titles(self, html):
info = html.cssselect_first('.content .info')
# main title
h1_el = info.cssselect_first('h1')
main = h1_el.text.strip()
# other titles
title_el = html.cssselect_first('title')
title_text = title_el.text_content()
orig = None
others = []
for title in info.cssselect('.names h3'):
other = title.text.strip()
if re.search(r'/ ' + re.escape(other) + ' [\(\|]', title_text):
orig = other
others.append(other)
return FilmTitles(main, orig, others)
def _parse_origin(self, html):
info = html.cssselect_first('.content .info')
year = length = None
origin_text = info.cssselect_first('.origin').text.strip()
for origin_fragment in origin_text.split(','):
# year
year_match = self.year_re.search(origin_fragment)
if year_match:
year = int(year_match.group(1))
# length
length_match = self.length_re.search(origin_fragment)
if length_match:
length = int(length_match.group(1))
return FilmOrigin(year, length)
def _iterparse_directors(self, html):
info = html.cssselect_first('.content .info')
for creators_h4 in info.cssselect('.creators div h4'):
if u'Režie' in creators_h4.text_content():
wrapper = next(creators_h4.iterancestors(tag='div'))
for link in wrapper.cssselect('a'):
yield link.text_content()
def _parse_rating(self, html):
rating_text = html.cssselect_first('#rating h2').text_content()
rating_text = rating_text.rstrip('%')
if rating_text:
return int(rating_text)
return None
def _parse_poster_url(self, html):
img = html.cssselect_first('#poster img')
if img is None:
return None # no image?!
url = img.get('src')
if url.startswith('//'):
url = 'https:' + url
parts = urlparse.urlparse(url)
if 'assets' in parts.path:
return None # default image
# strip params so we get the largest image
parts = (parts.scheme, parts.netloc, parts.path, None, None, None)
return urlparse.urlunparse(parts) | zitkino/services/csfd.py |
import re
import time
import urllib
import random
import urlparse
from collections import namedtuple
from zitkino import log
from zitkino import http
from zitkino import parsers
from zitkino.models import Film
from . import BaseFilmID, BaseFilmService
class CsfdFilmID(BaseFilmID):
url_re = re.compile(r'/film/(\d+)')
FilmOrigin = namedtuple('FilmOrigin', ['year', 'length'])
FilmTitles = namedtuple('FilmTitles', ['main', 'orig', 'others'])
class CsfdSession(http.Session):
"""Deals with various ČSFD's network issues and eventually
tries to perform the same requests again.
"""
max_redirects = 5
default_referer = 'http://www.zitkino.cz'
def wait(self):
seconds = random.randrange(1, 5, 1)
log.debug('HTTP: Waiting for %d seconds.', seconds)
time.sleep(seconds)
def request(self, *args, **kwargs):
# set default referer - without this ČSFD can return an infinite loop
# of HTTP 302 redirects
headers = kwargs.get('headers', {})
headers['Referer'] = self.default_referer
kwargs['headers'] = headers
try:
self.wait()
return super(CsfdSession, self).request(*args, **kwargs)
except http.TooManyRedirects:
log.debug('HTTP: Too many redirects. Retrying.')
return self.request(*args, **kwargs)
except http.HTTPError as e:
if e.response.status_code in (502, 403):
log.debug('HTTP: 502 or 403 status code. Retrying.')
return self.request(*args, **kwargs)
raise
class CsfdFilmService(BaseFilmService):
name = u'ČSFD'
url_attr = 'url_csfd'
session_cls = CsfdSession
year_re = re.compile(r'(\d{4})')
length_re = re.compile(r'(\d+)\s*min')
def search(self, titles, year=None):
year = int(year) if year else None
for title in titles:
resp = self.session.get(
'http://www.csfd.cz/hledat/complete-films/?q='
+ urllib.quote_plus(unicode(title).encode('utf-8'))
)
# direct redirect to the film page
try:
CsfdFilmID.from_url(resp.url)
except ValueError:
pass
else:
return self.lookup(resp.url)
# results page
html = parsers.html(resp.content, base_url=resp.url)
results = self._iterparse_search_results(html, year)
for result in results:
if self._match_names(title, self._parse_matched_title(result)):
return self.lookup(self._parse_film_url(result))
return None # there is no match
def _iterparse_search_results(self, html, year=None):
for result in html.cssselect('#search-films .content li'):
if year:
# check year
year_el = result.cssselect_first('.film-year')
if year_el is not None:
year_text = year_el.text_content()
else:
year_text = result.cssselect_first('p').text_content()
if year != int(self.year_re.search(year_text).group(1)):
continue # skip this result
yield result
def _parse_matched_title(self, result):
title_el = result.cssselect_first('.search-name')
if title_el is not None:
return title_el.text_content().lstrip('(').rstrip(')')
return result.cssselect('.film')[0].text_content()
def _parse_film_url(self, result):
result.make_links_absolute()
return result.cssselect_first('.film').get('href')
def lookup(self, url):
try:
resp = self.session.get(url)
except http.HTTPError as e:
if e.response.status_code == 404:
return None # there is no match
raise
html = parsers.html(resp.content, base_url=resp.url)
titles = self._parse_titles(html)
origin = self._parse_origin(html)
return Film(
url_csfd=resp.url,
url_imdb=self._parse_imdb_url(html),
title_main=titles.main,
title_orig=titles.orig,
titles_search=titles.others,
year=origin.year,
directors=list(self._iterparse_directors(html)),
length=origin.length,
rating_csfd=self._parse_rating(html),
url_posters=[self._parse_poster_url(html)],
)
def _parse_imdb_url(self, html):
imdb_img = html.cssselect_first('.links img.imdb')
if imdb_img is not None:
imdb_a = next(imdb_img.iterancestors(tag='a'))
return imdb_a.get('href')
return None
def _parse_titles(self, html):
info = html.cssselect_first('.content .info')
# main title
h1_el = info.cssselect_first('h1')
main = h1_el.text.strip()
# other titles
title_el = html.cssselect_first('title')
title_text = title_el.text_content()
orig = None
others = []
for title in info.cssselect('.names h3'):
other = title.text.strip()
if re.search(r'/ ' + re.escape(other) + ' [\(\|]', title_text):
orig = other
others.append(other)
return FilmTitles(main, orig, others)
def _parse_origin(self, html):
info = html.cssselect_first('.content .info')
year = length = None
origin_text = info.cssselect_first('.origin').text.strip()
for origin_fragment in origin_text.split(','):
# year
year_match = self.year_re.search(origin_fragment)
if year_match:
year = int(year_match.group(1))
# length
length_match = self.length_re.search(origin_fragment)
if length_match:
length = int(length_match.group(1))
return FilmOrigin(year, length)
def _iterparse_directors(self, html):
info = html.cssselect_first('.content .info')
for creators_h4 in info.cssselect('.creators div h4'):
if u'Režie' in creators_h4.text_content():
wrapper = next(creators_h4.iterancestors(tag='div'))
for link in wrapper.cssselect('a'):
yield link.text_content()
def _parse_rating(self, html):
rating_text = html.cssselect_first('#rating h2').text_content()
rating_text = rating_text.rstrip('%')
if rating_text:
return int(rating_text)
return None
def _parse_poster_url(self, html):
img = html.cssselect_first('#poster img')
if img is None:
return None # no image?!
url = img.get('src')
if url.startswith('//'):
url = 'https:' + url
parts = urlparse.urlparse(url)
if 'assets' in parts.path:
return None # default image
# strip params so we get the largest image
parts = (parts.scheme, parts.netloc, parts.path, None, None, None)
return urlparse.urlunparse(parts) | 0.398992 | 0.094302 |
def check_match_format(matches):
matches = matches.upper()
if len(matches) != 5:
raise Exception("Matches must be 5 characters long!")
for letter in matches:
if letter not in ['Y', 'G', 'B']:
raise Exception ("Unrecognizable match format: Matches can only contain 'B', 'G', 'Y'!")
return matches
def format_guess(guess_word, matches):
matches = check_match_format(matches)
spot_matches = ''
letter_matches = ''
for ii in range(0,5):
if matches[ii] == 'G':
spot_matches += guess_word[ii]
letter_matches += guess_word[ii]
else:
spot_matches += '_'
if matches[ii] == 'Y':
letter_matches += guess_word[ii]
return spot_matches, letter_matches
def auto_elimination(guess, answer, possible_words):
pwords = tuple(possible_words)
for ii in range(0,5):
letter = guess[ii]
if letter == answer[ii]:
pwords = place_elimination(ii, letter, pwords, True)
elif letter in answer:
pwords = letter_elimination(letter, pwords, True)
pwords = place_elimination(ii, letter, pwords, False)
else:
pwords = letter_elimination(letter, pwords, False)
pwords = multi_letter_elimination(guess, answer, pwords)
return len(pwords)
def place_elimination(place, letter, pwords, positive_match):
if positive_match:
pwords = [word for word in pwords if word[place] == letter]
else:
pwords = [word for word in pwords if word[place] != letter]
return pwords
def letter_elimination(letter, pwords, positive_match):
if positive_match:
pwords = [word for word in pwords if letter in word]
else:
pwords = [word for word in pwords if letter not in word]
return pwords
def multi_letter_elimination(guess_word, letter_matches, pwords):
multi = ''.join({letter for letter in guess_word if guess_word.count(letter) > 1})
if not multi:
return pwords
for letter in multi:
gcount = guess_word.count(letter)
acount = letter_matches.count(letter)
if acount == 0:
continue
elif gcount > acount:
pwords = [word for word in pwords if word.count(letter) == acount]
else:
pwords = [word for word in pwords if word.count(letter) >= gcount]
return pwords | WordleGuesser/wordle/utils.py | def check_match_format(matches):
matches = matches.upper()
if len(matches) != 5:
raise Exception("Matches must be 5 characters long!")
for letter in matches:
if letter not in ['Y', 'G', 'B']:
raise Exception ("Unrecognizable match format: Matches can only contain 'B', 'G', 'Y'!")
return matches
def format_guess(guess_word, matches):
matches = check_match_format(matches)
spot_matches = ''
letter_matches = ''
for ii in range(0,5):
if matches[ii] == 'G':
spot_matches += guess_word[ii]
letter_matches += guess_word[ii]
else:
spot_matches += '_'
if matches[ii] == 'Y':
letter_matches += guess_word[ii]
return spot_matches, letter_matches
def auto_elimination(guess, answer, possible_words):
pwords = tuple(possible_words)
for ii in range(0,5):
letter = guess[ii]
if letter == answer[ii]:
pwords = place_elimination(ii, letter, pwords, True)
elif letter in answer:
pwords = letter_elimination(letter, pwords, True)
pwords = place_elimination(ii, letter, pwords, False)
else:
pwords = letter_elimination(letter, pwords, False)
pwords = multi_letter_elimination(guess, answer, pwords)
return len(pwords)
def place_elimination(place, letter, pwords, positive_match):
if positive_match:
pwords = [word for word in pwords if word[place] == letter]
else:
pwords = [word for word in pwords if word[place] != letter]
return pwords
def letter_elimination(letter, pwords, positive_match):
if positive_match:
pwords = [word for word in pwords if letter in word]
else:
pwords = [word for word in pwords if letter not in word]
return pwords
def multi_letter_elimination(guess_word, letter_matches, pwords):
multi = ''.join({letter for letter in guess_word if guess_word.count(letter) > 1})
if not multi:
return pwords
for letter in multi:
gcount = guess_word.count(letter)
acount = letter_matches.count(letter)
if acount == 0:
continue
elif gcount > acount:
pwords = [word for word in pwords if word.count(letter) == acount]
else:
pwords = [word for word in pwords if word.count(letter) >= gcount]
return pwords | 0.329392 | 0.458167 |
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Input, MaxPool2D
from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation
from tensorflow.keras.optimizers import Adam
# 10層CNNの構築
def cnn(input_shape, classes):
# 入力層
inputs = Input(shape=(input_shape[0], input_shape[1], 3))
# 1層目
x = Conv2D(32, (3, 3), padding='same', kernel_initializer='he_normal')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 2層目
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 3層目
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 4層目
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 5、6層目
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 7、8層目
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# 9、10層目
x = Dense(256, kernel_initializer='he_normal')(x)
x = Dense(classes, kernel_initializer='he_normal')(x)
outputs = Activation('softmax')(x)
return Model(inputs=inputs, outputs=outputs)
# 学習曲線のグラフを描き保存する
def plot_history(history):
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10, 4))
# [左側] metricsについてのグラフ
L_title = 'Accuracy_vs_Epoch'
axL.plot(history.history['accuracy'])
axL.plot(history.history['val_accuracy'])
axL.grid(True)
axL.set_title(L_title)
axL.set_ylabel('accuracy')
axL.set_xlabel('epoch')
axL.legend(['train', 'test'], loc='upper left')
# [右側] lossについてのグラフ
R_title = "Loss_vs_Epoch"
axR.plot(history.history['loss'])
axR.plot(history.history['val_loss'])
axR.grid(True)
axR.set_title(R_title)
axR.set_ylabel('loss')
axR.set_xlabel('epoch')
axR.legend(['train', 'test'], loc='upper left')
# グラフを画像として保存
fig.savefig('history.jpg')
plt.close()
def main():
directory = 'img' # 画像が保存されているフォルダ
df_train = pd.read_csv('train.csv') # 学習データの情報がかかれたDataFrame
df_validation = pd.read_csv('val.csv') # 検証データの情報がかかれたDataFrame
label_list = ['AMD', 'DR_DM', 'Gla', 'MH', 'Normal', 'RD', 'RP', 'RVO'] # ラベル名
image_size = (224, 224) # 入力画像サイズ
classes = len(label_list) # 分類クラス数
batch_size = 32 # バッチサイズ
epochs = 300 # エポック数
loss = 'categorical_crossentropy' # 損失関数
optimizer = Adam(lr=0.001, amsgrad=True) # 最適化関数
metrics = 'accuracy' # 評価方法
# ImageDataGenerator画像増幅のパラメータ
aug_params = {'rotation_range': 5,
'width_shift_range': 0.05,
'height_shift_range': 0.05,
'shear_range': 0.1,
'zoom_range': 0.05,
'horizontal_flip': True,
'vertical_flip': True}
# val_lossが最小になったときのみmodelを保存
mc_cb = ModelCheckpoint('model_weights.h5',
monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
# 学習が停滞したとき、学習率を0.2倍に
rl_cb = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=3,
verbose=1, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
# 学習が進まなくなったら、強制的に学習終了
es_cb = EarlyStopping(monitor='loss', min_delta=0,
patience=5, verbose=1, mode='auto')
# データの数に合わせて損失の重みを調整
weight_balanced = {}
for i, label in enumerate(label_list):
weight_balanced[i] = (df_train['label'] == label).sum()
max_count = max(weight_balanced.values())
for label in weight_balanced:
weight_balanced[label] = max_count / weight_balanced[label]
print(weight_balanced)
# ジェネレータの生成
## 学習データのジェネレータ
datagen = ImageDataGenerator(rescale=1./255, **aug_params)
train_generator = datagen.flow_from_dataframe(
dataframe=df_train, directory=directory,
x_col='filename', y_col='label',
target_size=image_size, class_mode='categorical',
classes=label_list,
batch_size=batch_size)
step_size_train = train_generator.n // train_generator.batch_size
## 検証データのジェネレータ
datagen = ImageDataGenerator(rescale=1./255)
validation_generator = datagen.flow_from_dataframe(
dataframe=df_validation, directory=directory,
x_col='filename', y_col='label',
target_size=image_size, class_mode='categorical',
classes=label_list,
batch_size=batch_size)
step_size_validation = validation_generator.n // validation_generator.batch_size
# ネットワーク構築
model = cnn(image_size, classes)
model.summary()
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
# 学習
history = model.fit_generator(
train_generator, steps_per_epoch=step_size_train,
epochs=epochs, verbose=1, callbacks=[mc_cb, rl_cb, es_cb],
validation_data=validation_generator,
validation_steps=step_size_validation,
class_weight=weight_balanced,
workers=3)
# 学習曲線の保存
plot_history(history)
if __name__ == "__main__":
main() | Image_classification/simple_cnn_classifier/train.py |
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Input, MaxPool2D
from tensorflow.keras.layers import Conv2D, Dense, BatchNormalization, Activation
from tensorflow.keras.optimizers import Adam
# 10層CNNの構築
def cnn(input_shape, classes):
# 入力層
inputs = Input(shape=(input_shape[0], input_shape[1], 3))
# 1層目
x = Conv2D(32, (3, 3), padding='same', kernel_initializer='he_normal')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 2層目
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 3層目
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 4層目
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 5、6層目
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# 7、8層目
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# 9、10層目
x = Dense(256, kernel_initializer='he_normal')(x)
x = Dense(classes, kernel_initializer='he_normal')(x)
outputs = Activation('softmax')(x)
return Model(inputs=inputs, outputs=outputs)
# 学習曲線のグラフを描き保存する
def plot_history(history):
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10, 4))
# [左側] metricsについてのグラフ
L_title = 'Accuracy_vs_Epoch'
axL.plot(history.history['accuracy'])
axL.plot(history.history['val_accuracy'])
axL.grid(True)
axL.set_title(L_title)
axL.set_ylabel('accuracy')
axL.set_xlabel('epoch')
axL.legend(['train', 'test'], loc='upper left')
# [右側] lossについてのグラフ
R_title = "Loss_vs_Epoch"
axR.plot(history.history['loss'])
axR.plot(history.history['val_loss'])
axR.grid(True)
axR.set_title(R_title)
axR.set_ylabel('loss')
axR.set_xlabel('epoch')
axR.legend(['train', 'test'], loc='upper left')
# グラフを画像として保存
fig.savefig('history.jpg')
plt.close()
def main():
directory = 'img' # 画像が保存されているフォルダ
df_train = pd.read_csv('train.csv') # 学習データの情報がかかれたDataFrame
df_validation = pd.read_csv('val.csv') # 検証データの情報がかかれたDataFrame
label_list = ['AMD', 'DR_DM', 'Gla', 'MH', 'Normal', 'RD', 'RP', 'RVO'] # ラベル名
image_size = (224, 224) # 入力画像サイズ
classes = len(label_list) # 分類クラス数
batch_size = 32 # バッチサイズ
epochs = 300 # エポック数
loss = 'categorical_crossentropy' # 損失関数
optimizer = Adam(lr=0.001, amsgrad=True) # 最適化関数
metrics = 'accuracy' # 評価方法
# ImageDataGenerator画像増幅のパラメータ
aug_params = {'rotation_range': 5,
'width_shift_range': 0.05,
'height_shift_range': 0.05,
'shear_range': 0.1,
'zoom_range': 0.05,
'horizontal_flip': True,
'vertical_flip': True}
# val_lossが最小になったときのみmodelを保存
mc_cb = ModelCheckpoint('model_weights.h5',
monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
# 学習が停滞したとき、学習率を0.2倍に
rl_cb = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=3,
verbose=1, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
# 学習が進まなくなったら、強制的に学習終了
es_cb = EarlyStopping(monitor='loss', min_delta=0,
patience=5, verbose=1, mode='auto')
# データの数に合わせて損失の重みを調整
weight_balanced = {}
for i, label in enumerate(label_list):
weight_balanced[i] = (df_train['label'] == label).sum()
max_count = max(weight_balanced.values())
for label in weight_balanced:
weight_balanced[label] = max_count / weight_balanced[label]
print(weight_balanced)
# ジェネレータの生成
## 学習データのジェネレータ
datagen = ImageDataGenerator(rescale=1./255, **aug_params)
train_generator = datagen.flow_from_dataframe(
dataframe=df_train, directory=directory,
x_col='filename', y_col='label',
target_size=image_size, class_mode='categorical',
classes=label_list,
batch_size=batch_size)
step_size_train = train_generator.n // train_generator.batch_size
## 検証データのジェネレータ
datagen = ImageDataGenerator(rescale=1./255)
validation_generator = datagen.flow_from_dataframe(
dataframe=df_validation, directory=directory,
x_col='filename', y_col='label',
target_size=image_size, class_mode='categorical',
classes=label_list,
batch_size=batch_size)
step_size_validation = validation_generator.n // validation_generator.batch_size
# ネットワーク構築
model = cnn(image_size, classes)
model.summary()
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
# 学習
history = model.fit_generator(
train_generator, steps_per_epoch=step_size_train,
epochs=epochs, verbose=1, callbacks=[mc_cb, rl_cb, es_cb],
validation_data=validation_generator,
validation_steps=step_size_validation,
class_weight=weight_balanced,
workers=3)
# 学習曲線の保存
plot_history(history)
if __name__ == "__main__":
main() | 0.66769 | 0.66324 |
import array
import os
from MSTClustering import *
from optparse import OptionParser
usage = 'test_MST.py reconFile [opt] <reconFile>'
parser = OptionParser()
parser.add_option('-o', '--output-file', type = str, dest = 'o',
default = None,
help = 'path to the output file')
parser.add_option('-x', '--max-num-xtals', type = int, dest = 'x',
default = 10000,
help = 'maximum number of xtal for running the MST')
parser.add_option('-m', '--max-num-clusters', type = int, dest = 'm',
default = 5,
help = 'maximum number of clusters in the output tree')
parser.add_option('-w', '--max-edge-weight', type = float, dest = 'w',
default = None,
help = 'threshold length for the MST clustering (in mm)')
parser.add_option('-n', '--num-events', type = int, dest = 'n',
default = 10000000,
help = 'number of events to be processed')
(opts, args) = parser.parse_args()
if len(args) == 0:
print 'Usage: %s' % usage
sys.exit('Please provide a recon input root file.')
elif len(args) > 2:
print 'Usage: %s' % usage
sys.exit('Too many arguments.')
inputFilePath = args[0]
outputFilePath = opts.o or inputFilePath.replace('recon.root', 'MSTClu.root')
if os.path.exists(outputFilePath):
sys.exit('Output file %s exists, remove it first.' % outputFilePath)
MAX_NUM_XTALS = opts.x
WEIGHT_THRESHOLD = opts.w
MAX_NUM_CLUSTERS = opts.m
reader = ReconReader(inputFilePath)
numEvents = min(opts.n, reader.getEntries())
if numEvents < 0:
numEvents = reader.getEntries()
outputFile = ROOT.TFile(outputFilePath, 'RECREATE')
outputTree = ROOT.TTree('MSTTuple', 'MSTTuple')
arrayDict = {}
BRANCH_DICT = {'EvtRun' : ('i', 1),
'EvtEventId' : ('i', 1),
'CalEnergyRaw' : ('f', 1),
'McEnergy' : ('f', 1),
'TkrNumTracks' : ('f', 1),
'CalCsIRLn' : ('f', 1),
'NumXtals' : ('i', 1),
'NumClusters' : ('i', 1),
'UberClusterNumXtals': ('f', 1),
'UberClusterEnergy' : ('f', 1),
'UberClusterMeanW' : ('f', 1),
'UberClusterRmsW' : ('f', 1),
'UberClusterMaxW' : ('f', 1),
'ClusterNumXtals' : ('i', MAX_NUM_CLUSTERS),
'ClusterEnergy' : ('f', MAX_NUM_CLUSTERS),
'ClusterMeanW' : ('f', MAX_NUM_CLUSTERS),
'ClusterRmsW' : ('f', MAX_NUM_CLUSTERS),
'ClusterMaxW' : ('f', MAX_NUM_CLUSTERS)
}
for (branchName, (branchType, branchSize)) in BRANCH_DICT.items():
a = array.array(branchType, [0]*branchSize)
arrayDict[branchName] = a
if branchSize == 1:
branchTitle = '%s/%s' % (branchName, branchType.upper())
else:
branchTitle = '%s[%d]/%s' %\
(branchName, branchSize, branchType.upper())
outputTree.Branch(branchName, a, branchTitle)
for i in xrange(numEvents):
reader.getEntry(i)
print '\nProcessing event %d/%d...' % (i, numEvents)
xtalCol = reader.getCalXtalRecCol()
numXtals = reader.getNumCalXtals()
arrayDict['EvtRun'][0] = reader.getMeritVariable('EvtRun')
arrayDict['EvtEventId'][0] = reader.getMeritVariable('EvtEventId')
arrayDict['CalEnergyRaw'][0] = reader.getMeritVariable('CalEnergyRaw')
arrayDict['McEnergy'][0] = reader.getMeritVariable('McEnergy')
arrayDict['TkrNumTracks'][0] = reader.getMeritVariable('TkrNumTracks')
arrayDict['CalCsIRLn'][0] = reader.getMeritVariable('CalCsIRLn')
arrayDict['NumXtals'][0] = numXtals
if numXtals <= MAX_NUM_XTALS:
clustering = MSTClustering(xtalCol, WEIGHT_THRESHOLD)
numClusters = clustering.getNumClusters()
arrayDict['NumClusters'][0] = numClusters
uberCluster = clustering.getUberCluster()
arrayDict['UberClusterNumXtals'][0] = uberCluster.getNumNodes()
arrayDict['UberClusterEnergy'][0] = uberCluster.EnergySum
arrayDict['UberClusterMeanW'][0] = uberCluster.getMeanEdgeWeight()
arrayDict['UberClusterRmsW'][0] = uberCluster.getRmsEdgeWeight()
arrayDict['UberClusterMaxW'][0] = uberCluster.getMaxEdgeWeight()
for cId in xrange(MAX_NUM_CLUSTERS):
if cId < numClusters:
c = clustering.getCluster(cId)
arrayDict['ClusterNumXtals'][cId] = c.getNumNodes()
arrayDict['ClusterEnergy'][cId] = c.EnergySum
arrayDict['ClusterMeanW'][cId] = c.getMeanEdgeWeight()
arrayDict['ClusterRmsW'][cId] = c.getRmsEdgeWeight()
arrayDict['ClusterMaxW'][cId] = c.getMaxEdgeWeight()
else:
arrayDict['ClusterNumXtals'][cId] = 0
arrayDict['ClusterEnergy'][cId] = 0.0
arrayDict['ClusterMeanW'][cId] = 0.0
arrayDict['ClusterRmsW'][cId] = 0.0
arrayDict['ClusterMaxW'][cId] = 0.0
else:
arrayDict['NumClusters'][0] = 0
arrayDict['UberClusterNumXtals'][0] = 0
arrayDict['UberClusterEnergy'][0] = 0.0
arrayDict['UberClusterMeanW'][0] = 0.0
arrayDict['UberClusterRmsW'][0] = 0.0
arrayDict['UberClusterMaxW'][0] = 0.0
for cId in xrange(MAX_NUM_CLUSTERS):
arrayDict['ClusterNumXtals'][cId] = 0
arrayDict['ClusterEnergy'][cId] = 0.0
arrayDict['ClusterMeanW'][cId] = 0.0
arrayDict['ClusterRmsW'][cId] = 0.0
arrayDict['ClusterMaxW'][cId] = 0.0
outputTree.Fill()
outputFile.Write()
outputFile.Close() | src/test/test_MST.py | import array
import os
from MSTClustering import *
from optparse import OptionParser
usage = 'test_MST.py reconFile [opt] <reconFile>'
parser = OptionParser()
parser.add_option('-o', '--output-file', type = str, dest = 'o',
default = None,
help = 'path to the output file')
parser.add_option('-x', '--max-num-xtals', type = int, dest = 'x',
default = 10000,
help = 'maximum number of xtal for running the MST')
parser.add_option('-m', '--max-num-clusters', type = int, dest = 'm',
default = 5,
help = 'maximum number of clusters in the output tree')
parser.add_option('-w', '--max-edge-weight', type = float, dest = 'w',
default = None,
help = 'threshold length for the MST clustering (in mm)')
parser.add_option('-n', '--num-events', type = int, dest = 'n',
default = 10000000,
help = 'number of events to be processed')
(opts, args) = parser.parse_args()
if len(args) == 0:
print 'Usage: %s' % usage
sys.exit('Please provide a recon input root file.')
elif len(args) > 2:
print 'Usage: %s' % usage
sys.exit('Too many arguments.')
inputFilePath = args[0]
outputFilePath = opts.o or inputFilePath.replace('recon.root', 'MSTClu.root')
if os.path.exists(outputFilePath):
sys.exit('Output file %s exists, remove it first.' % outputFilePath)
MAX_NUM_XTALS = opts.x
WEIGHT_THRESHOLD = opts.w
MAX_NUM_CLUSTERS = opts.m
reader = ReconReader(inputFilePath)
numEvents = min(opts.n, reader.getEntries())
if numEvents < 0:
numEvents = reader.getEntries()
outputFile = ROOT.TFile(outputFilePath, 'RECREATE')
outputTree = ROOT.TTree('MSTTuple', 'MSTTuple')
arrayDict = {}
BRANCH_DICT = {'EvtRun' : ('i', 1),
'EvtEventId' : ('i', 1),
'CalEnergyRaw' : ('f', 1),
'McEnergy' : ('f', 1),
'TkrNumTracks' : ('f', 1),
'CalCsIRLn' : ('f', 1),
'NumXtals' : ('i', 1),
'NumClusters' : ('i', 1),
'UberClusterNumXtals': ('f', 1),
'UberClusterEnergy' : ('f', 1),
'UberClusterMeanW' : ('f', 1),
'UberClusterRmsW' : ('f', 1),
'UberClusterMaxW' : ('f', 1),
'ClusterNumXtals' : ('i', MAX_NUM_CLUSTERS),
'ClusterEnergy' : ('f', MAX_NUM_CLUSTERS),
'ClusterMeanW' : ('f', MAX_NUM_CLUSTERS),
'ClusterRmsW' : ('f', MAX_NUM_CLUSTERS),
'ClusterMaxW' : ('f', MAX_NUM_CLUSTERS)
}
for (branchName, (branchType, branchSize)) in BRANCH_DICT.items():
a = array.array(branchType, [0]*branchSize)
arrayDict[branchName] = a
if branchSize == 1:
branchTitle = '%s/%s' % (branchName, branchType.upper())
else:
branchTitle = '%s[%d]/%s' %\
(branchName, branchSize, branchType.upper())
outputTree.Branch(branchName, a, branchTitle)
for i in xrange(numEvents):
reader.getEntry(i)
print '\nProcessing event %d/%d...' % (i, numEvents)
xtalCol = reader.getCalXtalRecCol()
numXtals = reader.getNumCalXtals()
arrayDict['EvtRun'][0] = reader.getMeritVariable('EvtRun')
arrayDict['EvtEventId'][0] = reader.getMeritVariable('EvtEventId')
arrayDict['CalEnergyRaw'][0] = reader.getMeritVariable('CalEnergyRaw')
arrayDict['McEnergy'][0] = reader.getMeritVariable('McEnergy')
arrayDict['TkrNumTracks'][0] = reader.getMeritVariable('TkrNumTracks')
arrayDict['CalCsIRLn'][0] = reader.getMeritVariable('CalCsIRLn')
arrayDict['NumXtals'][0] = numXtals
if numXtals <= MAX_NUM_XTALS:
clustering = MSTClustering(xtalCol, WEIGHT_THRESHOLD)
numClusters = clustering.getNumClusters()
arrayDict['NumClusters'][0] = numClusters
uberCluster = clustering.getUberCluster()
arrayDict['UberClusterNumXtals'][0] = uberCluster.getNumNodes()
arrayDict['UberClusterEnergy'][0] = uberCluster.EnergySum
arrayDict['UberClusterMeanW'][0] = uberCluster.getMeanEdgeWeight()
arrayDict['UberClusterRmsW'][0] = uberCluster.getRmsEdgeWeight()
arrayDict['UberClusterMaxW'][0] = uberCluster.getMaxEdgeWeight()
for cId in xrange(MAX_NUM_CLUSTERS):
if cId < numClusters:
c = clustering.getCluster(cId)
arrayDict['ClusterNumXtals'][cId] = c.getNumNodes()
arrayDict['ClusterEnergy'][cId] = c.EnergySum
arrayDict['ClusterMeanW'][cId] = c.getMeanEdgeWeight()
arrayDict['ClusterRmsW'][cId] = c.getRmsEdgeWeight()
arrayDict['ClusterMaxW'][cId] = c.getMaxEdgeWeight()
else:
arrayDict['ClusterNumXtals'][cId] = 0
arrayDict['ClusterEnergy'][cId] = 0.0
arrayDict['ClusterMeanW'][cId] = 0.0
arrayDict['ClusterRmsW'][cId] = 0.0
arrayDict['ClusterMaxW'][cId] = 0.0
else:
arrayDict['NumClusters'][0] = 0
arrayDict['UberClusterNumXtals'][0] = 0
arrayDict['UberClusterEnergy'][0] = 0.0
arrayDict['UberClusterMeanW'][0] = 0.0
arrayDict['UberClusterRmsW'][0] = 0.0
arrayDict['UberClusterMaxW'][0] = 0.0
for cId in xrange(MAX_NUM_CLUSTERS):
arrayDict['ClusterNumXtals'][cId] = 0
arrayDict['ClusterEnergy'][cId] = 0.0
arrayDict['ClusterMeanW'][cId] = 0.0
arrayDict['ClusterRmsW'][cId] = 0.0
arrayDict['ClusterMaxW'][cId] = 0.0
outputTree.Fill()
outputFile.Write()
outputFile.Close() | 0.214938 | 0.188063 |
from .common import Common, DefaultAnchor, DefaultDimension, DefaultBorder, BaseChart
from .termui.alignment import TopLeftAnchor, Dimension
from .termui.control import Border
from .context import ContextList
from .region import RegionList
from .ssh import SSHList
from .info import InfoDisplay
from .aws import AWS
from .commander import Commander, Filterer
from .resources import *
from .meta import CommanderOptionsLister
import os
import sys
import datetime
def awscheck():
return bool(Common.Session.context) and bool(Common.Session.region)
def open_context_lister():
ctxl = ContextList(
Common.Session.ui.top_block,
DefaultAnchor,
DefaultDimension,
border=DefaultBorder('context_list', 'Contexts'),
weight=0,
)
return [ctxl, ctxl.hotkey_display]
def open_region_lister():
regl = RegionList(
Common.Session.ui.top_block,
DefaultAnchor,
DefaultDimension,
border=DefaultBorder('region_list', 'Regions'),
weight = 0,
)
return [regl, regl.hotkey_display]
def open_ssh_lister():
return SSHList.opener()
def open_filterer():
if Common.Session.filterer is None:
return Filterer(
Common.Session.ui.top_block,
TopLeftAnchor(0, 8),
Dimension('100%', '3'),
Common.Session,
color=Common.color('search_bar_color'),
symbol_color=Common.color('search_bar_symbol_color'),
autocomplete_color=Common.color('search_bar_autocomplete_color'),
inactive_color=Common.color('search_bar_inactive_color'),
weight=-200,
border=Border(Common.border('search_bar'), Common.color('search_bar_border')),
)
else:
Common.Session.filterer.resume()
def open_commander():
return Commander(
Common.Session.ui.top_block,
TopLeftAnchor(0, 8),
Dimension('100%', '3'),
Common.Session,
color=Common.color('command_bar_color'),
symbol_color=Common.color('command_bar_symbol_color'),
autocomplete_color=Common.color('command_bar_autocomplete_color'),
ok_color=Common.color('command_bar_ok_color'),
error_color=Common.color('command_bar_error_color'),
weight=-200,
border=Border(Common.border('search_bar'), Common.color('search_bar_border')),
)
def main(*args, **kwargs):
# stderr hack
old_stderr = None
try:
if os.fstat(0) == os.fstat(1):
tg = open('error.log', 'w', buffering=1)
old_stderr = sys.stderr
sys.stderr = tg
Common.initialize()
Common.Session.service_provider = AWS()
Common.Session.replace_frame(open_context_lister())
Common.Session.info_display.commander_hook = open_commander
Common.Session.info_display.filterer_hook = open_filterer
Common.Session.commander_options = {
'ctx': open_context_lister,
'context': open_context_lister,
'region': open_region_lister,
'ssh': open_ssh_lister,
'lc': LCResourceLister.opener,
'launchconfiguration': LCResourceLister.opener,
'r53': R53ResourceLister.opener,
'route53': R53ResourceLister.opener,
'cfn': CFNResourceLister.opener,
'cloudformation': CFNResourceLister.opener,
'rds': RDSResourceLister.opener,
'lb': LBResourceLister.opener,
'elbv2': LBResourceLister.opener,
'loadbalancing': LBResourceLister.opener,
'ami': AMIResourceLister.opener,
'image': AMIResourceLister.opener,
'ebs': EBSResourceLister.opener,
'ec2': EC2ResourceLister.opener,
'instance': EC2ResourceLister.opener,
'asg': ASGResourceLister.opener,
'autoscaling': ASGResourceLister.opener,
'rt': RouteTableResourceLister.opener,
'route': RouteResourceLister.opener,
'routetable': RouteTableResourceLister.opener,
'sg': SGResourceLister.opener,
'securitygroup': SGResourceLister.opener,
'subnet': SubnetResourceLister.opener,
'tg': TargetGroupResourceLister.opener,
'targetgroup': TargetGroupResourceLister.opener,
'vpc': VPCResourceLister.opener,
'dsg': DBSubnetGroupResourceLister.opener,
'dbsubnetgroup': DBSubnetGroupResourceLister.opener,
's3': S3ResourceLister.opener,
'it': InstanceClassResourceLister.opener,
'instancetype': InstanceClassResourceLister.opener,
'instanceclass': InstanceClassResourceLister.opener,
'key': KeyPairResourceLister.opener,
'keypair': KeyPairResourceLister.opener,
'?': CommanderOptionsLister.opener,
'help': CommanderOptionsLister.opener,
}
Common.main()
finally:
if old_stderr is not None:
sys.stderr.close()
sys.stderr = old_stderr | src/awsc/main.py | from .common import Common, DefaultAnchor, DefaultDimension, DefaultBorder, BaseChart
from .termui.alignment import TopLeftAnchor, Dimension
from .termui.control import Border
from .context import ContextList
from .region import RegionList
from .ssh import SSHList
from .info import InfoDisplay
from .aws import AWS
from .commander import Commander, Filterer
from .resources import *
from .meta import CommanderOptionsLister
import os
import sys
import datetime
def awscheck():
return bool(Common.Session.context) and bool(Common.Session.region)
def open_context_lister():
ctxl = ContextList(
Common.Session.ui.top_block,
DefaultAnchor,
DefaultDimension,
border=DefaultBorder('context_list', 'Contexts'),
weight=0,
)
return [ctxl, ctxl.hotkey_display]
def open_region_lister():
regl = RegionList(
Common.Session.ui.top_block,
DefaultAnchor,
DefaultDimension,
border=DefaultBorder('region_list', 'Regions'),
weight = 0,
)
return [regl, regl.hotkey_display]
def open_ssh_lister():
return SSHList.opener()
def open_filterer():
if Common.Session.filterer is None:
return Filterer(
Common.Session.ui.top_block,
TopLeftAnchor(0, 8),
Dimension('100%', '3'),
Common.Session,
color=Common.color('search_bar_color'),
symbol_color=Common.color('search_bar_symbol_color'),
autocomplete_color=Common.color('search_bar_autocomplete_color'),
inactive_color=Common.color('search_bar_inactive_color'),
weight=-200,
border=Border(Common.border('search_bar'), Common.color('search_bar_border')),
)
else:
Common.Session.filterer.resume()
def open_commander():
return Commander(
Common.Session.ui.top_block,
TopLeftAnchor(0, 8),
Dimension('100%', '3'),
Common.Session,
color=Common.color('command_bar_color'),
symbol_color=Common.color('command_bar_symbol_color'),
autocomplete_color=Common.color('command_bar_autocomplete_color'),
ok_color=Common.color('command_bar_ok_color'),
error_color=Common.color('command_bar_error_color'),
weight=-200,
border=Border(Common.border('search_bar'), Common.color('search_bar_border')),
)
def main(*args, **kwargs):
# stderr hack
old_stderr = None
try:
if os.fstat(0) == os.fstat(1):
tg = open('error.log', 'w', buffering=1)
old_stderr = sys.stderr
sys.stderr = tg
Common.initialize()
Common.Session.service_provider = AWS()
Common.Session.replace_frame(open_context_lister())
Common.Session.info_display.commander_hook = open_commander
Common.Session.info_display.filterer_hook = open_filterer
Common.Session.commander_options = {
'ctx': open_context_lister,
'context': open_context_lister,
'region': open_region_lister,
'ssh': open_ssh_lister,
'lc': LCResourceLister.opener,
'launchconfiguration': LCResourceLister.opener,
'r53': R53ResourceLister.opener,
'route53': R53ResourceLister.opener,
'cfn': CFNResourceLister.opener,
'cloudformation': CFNResourceLister.opener,
'rds': RDSResourceLister.opener,
'lb': LBResourceLister.opener,
'elbv2': LBResourceLister.opener,
'loadbalancing': LBResourceLister.opener,
'ami': AMIResourceLister.opener,
'image': AMIResourceLister.opener,
'ebs': EBSResourceLister.opener,
'ec2': EC2ResourceLister.opener,
'instance': EC2ResourceLister.opener,
'asg': ASGResourceLister.opener,
'autoscaling': ASGResourceLister.opener,
'rt': RouteTableResourceLister.opener,
'route': RouteResourceLister.opener,
'routetable': RouteTableResourceLister.opener,
'sg': SGResourceLister.opener,
'securitygroup': SGResourceLister.opener,
'subnet': SubnetResourceLister.opener,
'tg': TargetGroupResourceLister.opener,
'targetgroup': TargetGroupResourceLister.opener,
'vpc': VPCResourceLister.opener,
'dsg': DBSubnetGroupResourceLister.opener,
'dbsubnetgroup': DBSubnetGroupResourceLister.opener,
's3': S3ResourceLister.opener,
'it': InstanceClassResourceLister.opener,
'instancetype': InstanceClassResourceLister.opener,
'instanceclass': InstanceClassResourceLister.opener,
'key': KeyPairResourceLister.opener,
'keypair': KeyPairResourceLister.opener,
'?': CommanderOptionsLister.opener,
'help': CommanderOptionsLister.opener,
}
Common.main()
finally:
if old_stderr is not None:
sys.stderr.close()
sys.stderr = old_stderr | 0.354321 | 0.048451 |
def array_part_loops_pruning(loops, config):
"""Apply pruning on array partitioning candidate loops.
At present, we apply the following heuristics:
- The product of all array_part loops should be greater than the total PE number
- TODO: Prune based on off-chip traffic
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
PE_lb = config["setting"][config["mode"]]["pruning"]["array_part"]["PE_num"][0]
for loop in loops:
if PE_lb == -1:
pruned_loops.append(loop)
else:
prod = 1
for l in loop:
if l > 1:
prod *= l
if prod < PE_lb:
continue
pruned_loops.append(loop)
return pruned_loops
def array_part_L2_loops_pruning(loops, config):
"""Apply pruning on L2 array partitioning candidate loops.
At present, we wpply the following heuristics:
- We only apply L2 array partitioning on parallel loops to save off-chip communication.
We examine from outer loops to inner loops. Once we meet a non-parallel loop,
we will stop from here, and set the tiling factors from here to below to maximum.
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
tuning = config["tuning"]
loop_stop = 0
for c in tuning["array_part_L2"]["coincident"]:
if not c:
break
loop_stop += 1
ubs = tuning["array_part_L2"]["tilable_loops"][loop_stop:]
for loop in loops:
# Examine [loop_stop:-1], only leave those that equal the upper bound
loop_cut = loop[loop_stop:]
if loop_cut != ubs:
continue
pruned_loops.append(loop)
return pruned_loops
def latency_hiding_loops_pruning(loops, config):
"""Apply pruning on latency hiding candidate loops.
At present, we apply the following heuristics:
- We compute the latency hiding register sizes and prune it when it is
greater or less than the pre-set threshold.
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
reg_size_lb = config["setting"][config["mode"]]["pruning"]["latency_hiding"][
"reg_size"
][0]
reg_size_ub = config["setting"][config["mode"]]["pruning"]["latency_hiding"][
"reg_size"
][1]
for loop in loops:
size = 1
for l in loop:
size *= l
if reg_size_lb != -1:
if size < reg_size_lb:
continue
if reg_size_ub != -1:
if size > reg_size_ub:
continue
pruned_loops.append(loop)
return pruned_loops
def SIMD_vectorization_PE_pruning(config):
"""Apply pruning based on the PE structures at the SIMD vectorization stage.
At present, we apply the following heuristics:
- We restrain the PE number within certain range
- We restrain the PE shape for 2D array
Parameters
----------
config: dict
Global configuration
Returns
-------
ret: boolean
If this configuration is to be pruned.
"""
tuning = config["tuning"]
ret = False
PE_num_lb = config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_num"
][0]
PE_num_ub = config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_num"
][1]
n_pe = 1
for dim in tuning["simd"]["sa_dims"]:
n_pe *= int(dim)
if PE_num_lb != -1:
if n_pe < PE_num_lb:
return True
if PE_num_ub != -1:
if n_pe > PE_num_ub:
return True
sa_dims = tuning["simd"]["sa_dims"]
if len(tuning["simd"]["sa_dims"]) > 1:
sa_dims.sort(reverse=True)
pe_ratio = sa_dims[0] / sa_dims[1]
if (
config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_ratio"
]
!= -1
):
if (
pe_ratio
> config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_ratio"
]
):
return True
return ret
def reorder_simd_loops(loops):
"""Reorder the simd loops for pruning.
The input loops contains a list of candidate loops.
For each candidate loop, it is in the format of [1, 1, X].
We will sort the loops based on the non-one element in descending order.
Parameters
----------
loops: list
A list containing all candidate SIMD loops to be evaluated.
"""
# Find the position of the non-one element.
pos = -1
for loop in loops:
for i in range(len(loop)):
if loop[i] != 1:
pos = i
break
if pos != -1:
break
if pos == -1:
# All the loops are ones.
return loops
loops.sort(key=lambda x: x[pos], reverse=True)
return loops
def SIMD_vectorization_latency_pruning(config):
"""Perform latency-based pruning at the SIMD vectorization stage.
We have already reordered the SIMD candidate loops in descending order.
Therefore, if the last design evaluated is slower than the opt design found
so far, there is no chance for the rest of candidates which has a smaller
SIMD factor to beat the opt design.
We will stop exploration for these loops and return.
Otherwise, if the resource usage is legal, we have already found a design that
achieves the least latency in the current group. For the other designs with
a smaller SIMD factor, their latency is no less than the current design.
We will stop exploration for these loops and return.
However, there a chance that the designs with a smaller SIMD factor acheives
the same latency but with less resource usage (for a comm bound design).
At present, we ignore such cases.
"""
last_design = config["monitor"]["last_design"]
if last_design["latency"] == -1:
# The current design is already slower than opt., stop exploration.
return True
else:
# The current design is resource-legal, stop exploration.
if not last_design["resource"]:
return True
return False | frontend/heterosa/optimizer_prune.py |
def array_part_loops_pruning(loops, config):
"""Apply pruning on array partitioning candidate loops.
At present, we apply the following heuristics:
- The product of all array_part loops should be greater than the total PE number
- TODO: Prune based on off-chip traffic
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
PE_lb = config["setting"][config["mode"]]["pruning"]["array_part"]["PE_num"][0]
for loop in loops:
if PE_lb == -1:
pruned_loops.append(loop)
else:
prod = 1
for l in loop:
if l > 1:
prod *= l
if prod < PE_lb:
continue
pruned_loops.append(loop)
return pruned_loops
def array_part_L2_loops_pruning(loops, config):
"""Apply pruning on L2 array partitioning candidate loops.
At present, we wpply the following heuristics:
- We only apply L2 array partitioning on parallel loops to save off-chip communication.
We examine from outer loops to inner loops. Once we meet a non-parallel loop,
we will stop from here, and set the tiling factors from here to below to maximum.
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
tuning = config["tuning"]
loop_stop = 0
for c in tuning["array_part_L2"]["coincident"]:
if not c:
break
loop_stop += 1
ubs = tuning["array_part_L2"]["tilable_loops"][loop_stop:]
for loop in loops:
# Examine [loop_stop:-1], only leave those that equal the upper bound
loop_cut = loop[loop_stop:]
if loop_cut != ubs:
continue
pruned_loops.append(loop)
return pruned_loops
def latency_hiding_loops_pruning(loops, config):
"""Apply pruning on latency hiding candidate loops.
At present, we apply the following heuristics:
- We compute the latency hiding register sizes and prune it when it is
greater or less than the pre-set threshold.
Parameters
----------
loops: list
A list of candidate loops
config:
Global configuration
"""
pruned_loops = []
reg_size_lb = config["setting"][config["mode"]]["pruning"]["latency_hiding"][
"reg_size"
][0]
reg_size_ub = config["setting"][config["mode"]]["pruning"]["latency_hiding"][
"reg_size"
][1]
for loop in loops:
size = 1
for l in loop:
size *= l
if reg_size_lb != -1:
if size < reg_size_lb:
continue
if reg_size_ub != -1:
if size > reg_size_ub:
continue
pruned_loops.append(loop)
return pruned_loops
def SIMD_vectorization_PE_pruning(config):
"""Apply pruning based on the PE structures at the SIMD vectorization stage.
At present, we apply the following heuristics:
- We restrain the PE number within certain range
- We restrain the PE shape for 2D array
Parameters
----------
config: dict
Global configuration
Returns
-------
ret: boolean
If this configuration is to be pruned.
"""
tuning = config["tuning"]
ret = False
PE_num_lb = config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_num"
][0]
PE_num_ub = config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_num"
][1]
n_pe = 1
for dim in tuning["simd"]["sa_dims"]:
n_pe *= int(dim)
if PE_num_lb != -1:
if n_pe < PE_num_lb:
return True
if PE_num_ub != -1:
if n_pe > PE_num_ub:
return True
sa_dims = tuning["simd"]["sa_dims"]
if len(tuning["simd"]["sa_dims"]) > 1:
sa_dims.sort(reverse=True)
pe_ratio = sa_dims[0] / sa_dims[1]
if (
config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_ratio"
]
!= -1
):
if (
pe_ratio
> config["setting"][config["mode"]]["pruning"]["SIMD_vectorization"][
"PE_ratio"
]
):
return True
return ret
def reorder_simd_loops(loops):
"""Reorder the simd loops for pruning.
The input loops contains a list of candidate loops.
For each candidate loop, it is in the format of [1, 1, X].
We will sort the loops based on the non-one element in descending order.
Parameters
----------
loops: list
A list containing all candidate SIMD loops to be evaluated.
"""
# Find the position of the non-one element.
pos = -1
for loop in loops:
for i in range(len(loop)):
if loop[i] != 1:
pos = i
break
if pos != -1:
break
if pos == -1:
# All the loops are ones.
return loops
loops.sort(key=lambda x: x[pos], reverse=True)
return loops
def SIMD_vectorization_latency_pruning(config):
"""Perform latency-based pruning at the SIMD vectorization stage.
We have already reordered the SIMD candidate loops in descending order.
Therefore, if the last design evaluated is slower than the opt design found
so far, there is no chance for the rest of candidates which has a smaller
SIMD factor to beat the opt design.
We will stop exploration for these loops and return.
Otherwise, if the resource usage is legal, we have already found a design that
achieves the least latency in the current group. For the other designs with
a smaller SIMD factor, their latency is no less than the current design.
We will stop exploration for these loops and return.
However, there a chance that the designs with a smaller SIMD factor acheives
the same latency but with less resource usage (for a comm bound design).
At present, we ignore such cases.
"""
last_design = config["monitor"]["last_design"]
if last_design["latency"] == -1:
# The current design is already slower than opt., stop exploration.
return True
else:
# The current design is resource-legal, stop exploration.
if not last_design["resource"]:
return True
return False | 0.704262 | 0.574395 |
import collections
import json
import operator
import os
from cros.factory.device import device_utils
from cros.factory.test import device_data
from cros.factory.test import session
from cros.factory.test.i18n import _
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.test import ui_templates
from cros.factory.test.utils import deploy_utils
from cros.factory.utils.arg_utils import Arg
# The config files should be placed in the py/test/pytests/probe/ folder.
LOCAL_CONFIG_DIR = os.path.dirname(os.path.abspath(__file__))
OPERATOR_MAP = {
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'in': lambda a, b: a in b}
def EvaluateRule(a, op_str, b):
return OPERATOR_MAP[op_str](a, b)
class ProbeTest(test_case.TestCase):
ARGS = [
Arg('config_file', str,
'Path to probe config file. This is interpreted as a path '
'relative to `test/pytests/probe` folder.'),
Arg('component_list', list,
'A list of components to be verified',
default=None),
Arg('overridden_rules', list,
'List of [category, cmp_function, value].',
default=[]),
Arg('show_ui', bool,
'Always show the result and prompt if set to True. Always not show '
'the result and prompt if set to False. Otherwise, only show the '
'result and prompt when the test fails.',
default=None),
]
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
self.factory_tools = deploy_utils.CreateFactoryTools(self._dut)
self.config_file_path = os.path.join(
LOCAL_CONFIG_DIR, self.args.config_file)
def runTest(self):
# Check the config file exists.
if not os.path.exists(self.config_file_path):
self.fail('Config file %s does not exist.' % self.config_file_path)
# Execute Probe.
cmd = ['probe', '-v', 'probe', '--config-file', self.config_file_path]
if self.args.component_list is not None:
cmd += ['--comps'] + self.args.component_list
session.console.info('Call the command: %s', ' '.join(cmd))
probed_results = json.loads(self.factory_tools.CheckOutput(cmd))
# Generate the rules of each category.
rule_map = {}
for category in probed_results:
expected_count = device_data.GetDeviceData(
device_data.JoinKeys(device_data.KEY_COMPONENT, 'has_' + category))
rule_map[category] = (
'==', int(expected_count) if expected_count is not None else 1)
for category, op_str, value in self.args.overridden_rules:
rule_map[category] = (op_str, value)
table_html = ui_templates.Table(rows=len(probed_results) + 1, cols=4)
title = ['Category', 'Probed Components', 'Rule', 'Status']
for idx, content in enumerate(title):
table_html.SetContent(0, idx, '<b>%s</b>' % content)
# Check every category meets the rule.
all_passed = True
for row_idx, category in enumerate(probed_results, 1):
count = len(probed_results[category])
op_str, value = rule_map[category]
status = OPERATOR_MAP[op_str](count, value)
all_passed &= status
# Set the table.
counter = collections.defaultdict(int)
for result in probed_results[category]:
counter[result['name']] += 1
comp_summary = '<br>'.join('%d %s found.' % (num_comp, comp_name)
for comp_name, num_comp in counter.items())
summary_str = comp_summary or 'No component found.'
rule_str = 'count (%s) %s %s' % (count, op_str, value)
status_str = 'passed' if status else 'failed'
session.console.info('Category "%s" %s %s, %s.',
category, summary_str, rule_str, status_str)
table_html.SetContent(row_idx, 0, category)
table_html.SetContent(row_idx, 1, summary_str)
table_html.SetContent(row_idx, 2, rule_str)
table_html.SetContent(
row_idx, 3, '<div class=test-status-{0}>{0}</div>'.format(status_str))
if self.args.show_ui is True or (self.args.show_ui is None and
not all_passed):
self.ui.SetState([
table_html.GenerateHTML(), '<span class="prompt">',
_('Press SPACE to continue'), '</span>'
])
self.ui.WaitKeysOnce(test_ui.SPACE_KEY)
if not all_passed:
self.fail() | py/test/pytests/probe/probe.py | import collections
import json
import operator
import os
from cros.factory.device import device_utils
from cros.factory.test import device_data
from cros.factory.test import session
from cros.factory.test.i18n import _
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.test import ui_templates
from cros.factory.test.utils import deploy_utils
from cros.factory.utils.arg_utils import Arg
# The config files should be placed in the py/test/pytests/probe/ folder.
LOCAL_CONFIG_DIR = os.path.dirname(os.path.abspath(__file__))
OPERATOR_MAP = {
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'in': lambda a, b: a in b}
def EvaluateRule(a, op_str, b):
return OPERATOR_MAP[op_str](a, b)
class ProbeTest(test_case.TestCase):
ARGS = [
Arg('config_file', str,
'Path to probe config file. This is interpreted as a path '
'relative to `test/pytests/probe` folder.'),
Arg('component_list', list,
'A list of components to be verified',
default=None),
Arg('overridden_rules', list,
'List of [category, cmp_function, value].',
default=[]),
Arg('show_ui', bool,
'Always show the result and prompt if set to True. Always not show '
'the result and prompt if set to False. Otherwise, only show the '
'result and prompt when the test fails.',
default=None),
]
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
self.factory_tools = deploy_utils.CreateFactoryTools(self._dut)
self.config_file_path = os.path.join(
LOCAL_CONFIG_DIR, self.args.config_file)
def runTest(self):
# Check the config file exists.
if not os.path.exists(self.config_file_path):
self.fail('Config file %s does not exist.' % self.config_file_path)
# Execute Probe.
cmd = ['probe', '-v', 'probe', '--config-file', self.config_file_path]
if self.args.component_list is not None:
cmd += ['--comps'] + self.args.component_list
session.console.info('Call the command: %s', ' '.join(cmd))
probed_results = json.loads(self.factory_tools.CheckOutput(cmd))
# Generate the rules of each category.
rule_map = {}
for category in probed_results:
expected_count = device_data.GetDeviceData(
device_data.JoinKeys(device_data.KEY_COMPONENT, 'has_' + category))
rule_map[category] = (
'==', int(expected_count) if expected_count is not None else 1)
for category, op_str, value in self.args.overridden_rules:
rule_map[category] = (op_str, value)
table_html = ui_templates.Table(rows=len(probed_results) + 1, cols=4)
title = ['Category', 'Probed Components', 'Rule', 'Status']
for idx, content in enumerate(title):
table_html.SetContent(0, idx, '<b>%s</b>' % content)
# Check every category meets the rule.
all_passed = True
for row_idx, category in enumerate(probed_results, 1):
count = len(probed_results[category])
op_str, value = rule_map[category]
status = OPERATOR_MAP[op_str](count, value)
all_passed &= status
# Set the table.
counter = collections.defaultdict(int)
for result in probed_results[category]:
counter[result['name']] += 1
comp_summary = '<br>'.join('%d %s found.' % (num_comp, comp_name)
for comp_name, num_comp in counter.items())
summary_str = comp_summary or 'No component found.'
rule_str = 'count (%s) %s %s' % (count, op_str, value)
status_str = 'passed' if status else 'failed'
session.console.info('Category "%s" %s %s, %s.',
category, summary_str, rule_str, status_str)
table_html.SetContent(row_idx, 0, category)
table_html.SetContent(row_idx, 1, summary_str)
table_html.SetContent(row_idx, 2, rule_str)
table_html.SetContent(
row_idx, 3, '<div class=test-status-{0}>{0}</div>'.format(status_str))
if self.args.show_ui is True or (self.args.show_ui is None and
not all_passed):
self.ui.SetState([
table_html.GenerateHTML(), '<span class="prompt">',
_('Press SPACE to continue'), '</span>'
])
self.ui.WaitKeysOnce(test_ui.SPACE_KEY)
if not all_passed:
self.fail() | 0.357119 | 0.200323 |
from typing import Tuple, Mapping
import random
import numpy as np
from .state import FockState
from piquasso.api.instruction import Instruction
from piquasso.api.result import Result
def vacuum(state: FockState, instruction: Instruction, shots: int) -> Result:
state.reset()
return Result(state=state)
def passive_linear(state: FockState, instruction: Instruction, shots: int) -> Result:
operator: np.ndarray = instruction._all_params["passive_block"]
fock_operator = state._space.get_passive_fock_operator(
operator,
modes=instruction.modes,
d=state._space.d,
permanent_function=state._config.permanent_function,
)
state._density_matrix = (
fock_operator @ state._density_matrix @ fock_operator.conjugate().transpose()
)
return Result(state=state)
def particle_number_measurement(
state: FockState, instruction: Instruction, shots: int
) -> Result:
reduced_state = state.reduced(instruction.modes)
probability_map = reduced_state.fock_probabilities_map
samples = random.choices(
population=list(probability_map.keys()),
weights=list(probability_map.values()),
k=shots,
)
# NOTE: We choose the last sample for multiple shots.
sample = samples[-1]
normalization = _get_normalization(probability_map, sample)
_project_to_subspace(
state=state,
subspace_basis=sample,
modes=instruction.modes,
normalization=normalization,
)
return Result(state=state, samples=samples)
def _get_normalization(
probability_map: Mapping[Tuple[int, ...], float], sample: Tuple[int, ...]
) -> float:
return 1 / probability_map[sample]
def _project_to_subspace(
state: FockState,
*,
subspace_basis: Tuple[int, ...],
modes: Tuple[int, ...],
normalization: float
) -> None:
projected_density_matrix = _get_projected_density_matrix(
state=state,
subspace_basis=subspace_basis,
modes=modes,
)
state._density_matrix = projected_density_matrix * normalization
def _get_projected_density_matrix(
state: FockState, *, subspace_basis: Tuple[int, ...], modes: Tuple[int, ...]
) -> np.ndarray:
new_density_matrix = state._get_empty()
index = state._space.get_projection_operator_indices(
subspace_basis=subspace_basis,
modes=modes,
)
new_density_matrix[index] = state._density_matrix[index]
return new_density_matrix
def create(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_creation_operator(instruction.modes)
state._density_matrix = operator @ state._density_matrix @ operator.transpose()
state.normalize()
return Result(state=state)
def annihilate(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_annihilation_operator(instruction.modes)
state._density_matrix = operator @ state._density_matrix @ operator.transpose()
state.normalize()
return Result(state=state)
def kerr(state: FockState, instruction: Instruction, shots: int) -> Result:
mode = instruction.modes[0]
xi = instruction._all_params["xi"]
for x in xi:
for index, (basis, dual_basis) in state._space.operator_basis:
number = basis[mode]
dual_number = dual_basis[mode]
coefficient = np.exp(
1j
* x
* (number * (2 * number + 1) - dual_number * (2 * dual_number + 1))
)
state._density_matrix[index] *= coefficient
return Result(state=state)
def cubic_phase(state: FockState, instruction: Instruction, shots: int) -> Result:
gamma = instruction._all_params["gamma"]
hbar = state._config.hbar
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_cubic_phase_operator(
gamma=gamma[index], hbar=hbar
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def cross_kerr(state: FockState, instruction: Instruction, shots: int) -> Result:
modes = instruction.modes
xi = instruction._all_params["xi"]
for index, (basis, dual_basis) in state._space.operator_basis:
coefficient = np.exp(
1j
* xi
* (
basis[modes[0]] * basis[modes[1]]
- dual_basis[modes[0]] * dual_basis[modes[1]]
)
)
state._density_matrix[index] *= coefficient
return Result(state=state)
def displacement(state: FockState, instruction: Instruction, shots: int) -> Result:
amplitudes = np.abs(instruction._all_params["displacement_vector"])
angles = np.angle(instruction._all_params["displacement_vector"])
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_displacement_operator(
r=amplitudes[index],
phi=angles[index],
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def squeezing(state: FockState, instruction: Instruction, shots: int) -> Result:
amplitudes = np.arccosh(np.diag(instruction._all_params["passive_block"]))
angles = np.angle(-np.diag(instruction._all_params["active_block"]))
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_squeezing_operator(
r=amplitudes[index],
phi=angles[index],
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def linear(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_linear_fock_operator(
modes=instruction.modes,
passive_block=instruction._all_params["passive_block"],
active_block=instruction._all_params["active_block"],
permanent_function=state._config.permanent_function,
)
state._density_matrix = (
operator @ state._density_matrix @ operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def density_matrix_instruction(
state: FockState, instruction: Instruction, shots: int
) -> Result:
_add_occupation_number_basis(state, **instruction.params)
return Result(state=state)
def _add_occupation_number_basis(
state: FockState,
*,
ket: Tuple[int, ...],
bra: Tuple[int, ...],
coefficient: complex
) -> None:
index = state._space.index(ket)
dual_index = state._space.index(bra)
state._density_matrix[index, dual_index] = coefficient | piquasso/_backends/fock/general/calculations.py |
from typing import Tuple, Mapping
import random
import numpy as np
from .state import FockState
from piquasso.api.instruction import Instruction
from piquasso.api.result import Result
def vacuum(state: FockState, instruction: Instruction, shots: int) -> Result:
state.reset()
return Result(state=state)
def passive_linear(state: FockState, instruction: Instruction, shots: int) -> Result:
operator: np.ndarray = instruction._all_params["passive_block"]
fock_operator = state._space.get_passive_fock_operator(
operator,
modes=instruction.modes,
d=state._space.d,
permanent_function=state._config.permanent_function,
)
state._density_matrix = (
fock_operator @ state._density_matrix @ fock_operator.conjugate().transpose()
)
return Result(state=state)
def particle_number_measurement(
state: FockState, instruction: Instruction, shots: int
) -> Result:
reduced_state = state.reduced(instruction.modes)
probability_map = reduced_state.fock_probabilities_map
samples = random.choices(
population=list(probability_map.keys()),
weights=list(probability_map.values()),
k=shots,
)
# NOTE: We choose the last sample for multiple shots.
sample = samples[-1]
normalization = _get_normalization(probability_map, sample)
_project_to_subspace(
state=state,
subspace_basis=sample,
modes=instruction.modes,
normalization=normalization,
)
return Result(state=state, samples=samples)
def _get_normalization(
probability_map: Mapping[Tuple[int, ...], float], sample: Tuple[int, ...]
) -> float:
return 1 / probability_map[sample]
def _project_to_subspace(
state: FockState,
*,
subspace_basis: Tuple[int, ...],
modes: Tuple[int, ...],
normalization: float
) -> None:
projected_density_matrix = _get_projected_density_matrix(
state=state,
subspace_basis=subspace_basis,
modes=modes,
)
state._density_matrix = projected_density_matrix * normalization
def _get_projected_density_matrix(
state: FockState, *, subspace_basis: Tuple[int, ...], modes: Tuple[int, ...]
) -> np.ndarray:
new_density_matrix = state._get_empty()
index = state._space.get_projection_operator_indices(
subspace_basis=subspace_basis,
modes=modes,
)
new_density_matrix[index] = state._density_matrix[index]
return new_density_matrix
def create(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_creation_operator(instruction.modes)
state._density_matrix = operator @ state._density_matrix @ operator.transpose()
state.normalize()
return Result(state=state)
def annihilate(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_annihilation_operator(instruction.modes)
state._density_matrix = operator @ state._density_matrix @ operator.transpose()
state.normalize()
return Result(state=state)
def kerr(state: FockState, instruction: Instruction, shots: int) -> Result:
mode = instruction.modes[0]
xi = instruction._all_params["xi"]
for x in xi:
for index, (basis, dual_basis) in state._space.operator_basis:
number = basis[mode]
dual_number = dual_basis[mode]
coefficient = np.exp(
1j
* x
* (number * (2 * number + 1) - dual_number * (2 * dual_number + 1))
)
state._density_matrix[index] *= coefficient
return Result(state=state)
def cubic_phase(state: FockState, instruction: Instruction, shots: int) -> Result:
gamma = instruction._all_params["gamma"]
hbar = state._config.hbar
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_cubic_phase_operator(
gamma=gamma[index], hbar=hbar
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def cross_kerr(state: FockState, instruction: Instruction, shots: int) -> Result:
modes = instruction.modes
xi = instruction._all_params["xi"]
for index, (basis, dual_basis) in state._space.operator_basis:
coefficient = np.exp(
1j
* xi
* (
basis[modes[0]] * basis[modes[1]]
- dual_basis[modes[0]] * dual_basis[modes[1]]
)
)
state._density_matrix[index] *= coefficient
return Result(state=state)
def displacement(state: FockState, instruction: Instruction, shots: int) -> Result:
amplitudes = np.abs(instruction._all_params["displacement_vector"])
angles = np.angle(instruction._all_params["displacement_vector"])
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_displacement_operator(
r=amplitudes[index],
phi=angles[index],
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def squeezing(state: FockState, instruction: Instruction, shots: int) -> Result:
amplitudes = np.arccosh(np.diag(instruction._all_params["passive_block"]))
angles = np.angle(-np.diag(instruction._all_params["active_block"]))
for index, mode in enumerate(instruction.modes):
operator = state._space.get_single_mode_squeezing_operator(
r=amplitudes[index],
phi=angles[index],
)
embedded_operator = state._space.embed_matrix(
operator,
modes=(mode,),
auxiliary_modes=state._get_auxiliary_modes(instruction.modes),
)
state._density_matrix = (
embedded_operator
@ state._density_matrix
@ embedded_operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def linear(state: FockState, instruction: Instruction, shots: int) -> Result:
operator = state._space.get_linear_fock_operator(
modes=instruction.modes,
passive_block=instruction._all_params["passive_block"],
active_block=instruction._all_params["active_block"],
permanent_function=state._config.permanent_function,
)
state._density_matrix = (
operator @ state._density_matrix @ operator.conjugate().transpose()
)
state.normalize()
return Result(state=state)
def density_matrix_instruction(
state: FockState, instruction: Instruction, shots: int
) -> Result:
_add_occupation_number_basis(state, **instruction.params)
return Result(state=state)
def _add_occupation_number_basis(
state: FockState,
*,
ket: Tuple[int, ...],
bra: Tuple[int, ...],
coefficient: complex
) -> None:
index = state._space.index(ket)
dual_index = state._space.index(bra)
state._density_matrix[index, dual_index] = coefficient | 0.904205 | 0.523177 |
import re
import sys
from copy import deepcopy
class Type:
"""Class defining a C++ type with optional template parameters"""
def __init__(self, name = "", templateParams = None):
self.name = name
if templateParams:
self.templateParams = templateParams
else:
self.templateParams = []
def trimNames(self):
self.name = self.name.strip()
for x in self.templateParams:
x.trimNames()
def isTemplate(self):
return len(self.templateParams) != 0
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def parseTypeString(inTypeString):
"""Generate a Type defining the passed template type (string)"""
curType = Type()
curStack = []
for c in inTypeString:
if c == '<':
curStack.append(curType)
curType = Type()
curStack[-1].templateParams.append(curType)
elif c == '>':
curType = curStack.pop()
elif c == ',':
curType = Type()
curStack[-1].templateParams.append(curType)
else:
curType.name += c
curType.trimNames()
return curType
def cleanType(inType):
if not inType.isTemplate():
return inType
inType.templateParams = [cleanType(param) for param in inType.templateParams]
if (inType.name == "list" or inType.name == "vector") and len(inType.templateParams) == 2:
if inType.templateParams[-1].name == "allocator" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "set" and len(inType.templateParams) == 3:
if inType.templateParams[-1].name == "allocator" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "set" and len(inType.templateParams) == 2:
if inType.templateParams[-1].name == "less" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "map" and len(inType.templateParams) == 4:
firstTypeConst = deepcopy(inType.templateParams[0])
firstTypeConst.name += " const"
pair = Type("pair", [firstTypeConst, inType.templateParams[1]])
if inType.templateParams[-1].name == "allocator" and pair == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "map" and len(inType.templateParams) == 3:
if inType.templateParams[-1].name == "less" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
return inType
def cleanStd(input, stripSTD = True):
output = re.sub(r"\b__cxx11::", "", input)
output = re.sub(r",\s*boost::detail::variant::void_\b", "", output)
output = re.sub(r"\bbasic_string<char(, std::char_traits<char>, std::allocator<char> )?>", "string", output)
if stripSTD:
output = re.sub(r"\bstd::", "", output)
return output
def formatType(curType, indent = ""):
"""Format the passed type"""
# When the type is not a template just return it
if not curType.isTemplate():
return indent + curType.name
# Split lines of subtypes if we have template template params or the number of template params exceeds a threshold
hasTemplateParam = any(x.isTemplate() for x in curType.templateParams)
splitLines = hasTemplateParam or len(curType.templateParams) > 2
result = indent + curType.name + "<"
if splitLines:
subIndent = indent + " "
formattedParams = [formatType(x, subIndent) for x in curType.templateParams]
result += "\n" + ",\n".join(formattedParams)
result += "\n" + indent + ">"
else:
formattedParams = [formatType(x) for x in curType.templateParams]
result += ", ".join(formattedParams) + ">"
return result
def formatTypeString(inTypeString, clean = True):
"""Format the passed type string"""
type = parseTypeString(inTypeString)
if clean:
type = cleanType(type)
formated = formatType(type)
return formated
def findMatchingBrace(string, startPos):
openBrace = string[startPos]
if openBrace == "<":
closeBrace = ">"
elif openBrace == "(":
closeBrace = ")"
elif openBrace == "{":
closeBrace = "}"
else:
raise "invalid brace"
numOpen = 0
curPos = startPos
for c in string[startPos:]:
if c == openBrace:
numOpen += 1
elif c == closeBrace:
numOpen -= 1
if numOpen == 0:
return curPos
curPos += 1
raise "no matching brace"
def formatTypes(inString):
outString = inString
templateRE = re.compile(r"(const )?(\w|:)+(<)")
curPos = 0
while True:
match = templateRE.search(inString, curPos)
if not match:
break
endPos = findMatchingBrace(inString, match.start(3)) + 1
typeStr = inString[match.start():endPos]
formattedMatch = "\n" + formatTypeString(typeStr)
outString = outString.replace(typeStr, formattedMatch)
curPos = endPos
return outString | fmt_cpp_output/templateFilt.py | import re
import sys
from copy import deepcopy
class Type:
"""Class defining a C++ type with optional template parameters"""
def __init__(self, name = "", templateParams = None):
self.name = name
if templateParams:
self.templateParams = templateParams
else:
self.templateParams = []
def trimNames(self):
self.name = self.name.strip()
for x in self.templateParams:
x.trimNames()
def isTemplate(self):
return len(self.templateParams) != 0
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def parseTypeString(inTypeString):
"""Generate a Type defining the passed template type (string)"""
curType = Type()
curStack = []
for c in inTypeString:
if c == '<':
curStack.append(curType)
curType = Type()
curStack[-1].templateParams.append(curType)
elif c == '>':
curType = curStack.pop()
elif c == ',':
curType = Type()
curStack[-1].templateParams.append(curType)
else:
curType.name += c
curType.trimNames()
return curType
def cleanType(inType):
if not inType.isTemplate():
return inType
inType.templateParams = [cleanType(param) for param in inType.templateParams]
if (inType.name == "list" or inType.name == "vector") and len(inType.templateParams) == 2:
if inType.templateParams[-1].name == "allocator" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "set" and len(inType.templateParams) == 3:
if inType.templateParams[-1].name == "allocator" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "set" and len(inType.templateParams) == 2:
if inType.templateParams[-1].name == "less" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "map" and len(inType.templateParams) == 4:
firstTypeConst = deepcopy(inType.templateParams[0])
firstTypeConst.name += " const"
pair = Type("pair", [firstTypeConst, inType.templateParams[1]])
if inType.templateParams[-1].name == "allocator" and pair == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
if inType.name == "map" and len(inType.templateParams) == 3:
if inType.templateParams[-1].name == "less" and inType.templateParams[0] == inType.templateParams[-1].templateParams[0]:
inType.templateParams.pop()
return inType
def cleanStd(input, stripSTD = True):
output = re.sub(r"\b__cxx11::", "", input)
output = re.sub(r",\s*boost::detail::variant::void_\b", "", output)
output = re.sub(r"\bbasic_string<char(, std::char_traits<char>, std::allocator<char> )?>", "string", output)
if stripSTD:
output = re.sub(r"\bstd::", "", output)
return output
def formatType(curType, indent = ""):
"""Format the passed type"""
# When the type is not a template just return it
if not curType.isTemplate():
return indent + curType.name
# Split lines of subtypes if we have template template params or the number of template params exceeds a threshold
hasTemplateParam = any(x.isTemplate() for x in curType.templateParams)
splitLines = hasTemplateParam or len(curType.templateParams) > 2
result = indent + curType.name + "<"
if splitLines:
subIndent = indent + " "
formattedParams = [formatType(x, subIndent) for x in curType.templateParams]
result += "\n" + ",\n".join(formattedParams)
result += "\n" + indent + ">"
else:
formattedParams = [formatType(x) for x in curType.templateParams]
result += ", ".join(formattedParams) + ">"
return result
def formatTypeString(inTypeString, clean = True):
"""Format the passed type string"""
type = parseTypeString(inTypeString)
if clean:
type = cleanType(type)
formated = formatType(type)
return formated
def findMatchingBrace(string, startPos):
openBrace = string[startPos]
if openBrace == "<":
closeBrace = ">"
elif openBrace == "(":
closeBrace = ")"
elif openBrace == "{":
closeBrace = "}"
else:
raise "invalid brace"
numOpen = 0
curPos = startPos
for c in string[startPos:]:
if c == openBrace:
numOpen += 1
elif c == closeBrace:
numOpen -= 1
if numOpen == 0:
return curPos
curPos += 1
raise "no matching brace"
def formatTypes(inString):
outString = inString
templateRE = re.compile(r"(const )?(\w|:)+(<)")
curPos = 0
while True:
match = templateRE.search(inString, curPos)
if not match:
break
endPos = findMatchingBrace(inString, match.start(3)) + 1
typeStr = inString[match.start():endPos]
formattedMatch = "\n" + formatTypeString(typeStr)
outString = outString.replace(typeStr, formattedMatch)
curPos = endPos
return outString | 0.250821 | 0.463687 |
import pandas
import zipfile
import re
import os
from datetime import datetime
import time
import uuid
DEST_DIR = ".\\Destination"
SOURCE_DIR = '.\\Source'
SNP_SYMBOLS_FILE_PATH = ".\\snp500.txt"
DAILY_TRADE_MINUTE_TIMESTAMP = 57480000
def process_source_dir(source_dir, dest_dir, snp_symbols):
files_by_zip = {}
zip_files = get_files_in_folder(source_dir)
for curr_file in zip_files:
file_path = os.path.join(source_dir, curr_file)
files_by_zip[file_path] = get_files_from_zip_by_date(file_path)
for zip_file in files_by_zip:
zip_file_obj = zipfile.ZipFile(zip_file)
for curr_date in files_by_zip[zip_file]:
date_info = files_by_zip[zip_file][curr_date]
stock_quotes_file = date_info['stockquotes']
stock_quotes_data = pandas.read_csv(zip_file_obj.open(stock_quotes_file))
stocks_start = time.time()
process_stocks_file(stock_quotes_data, date_info['year'], date_info['month'], date_info['day'],
dest_dir, snp_symbols)
stocks_end = time.time()
print(f'Processing stocks took {stocks_end - stocks_start} seconds')
options_file = date_info['options']
options_data = pandas.read_csv(zip_file_obj.open(options_file))
process_options_file(options_data, date_info['year'], date_info['month'], date_info['day'],
dest_dir, snp_symbols)
print(f'Processing options took {time.time() - stocks_end} seconds')
break
break
def get_snp_symbols(snp_500_filename):
# snp_500_filename = "./snp500.txt"
snp_set = set()
with open(snp_500_filename) as snp_file:
for symbol in snp_file:
snp_set.add(symbol.strip())
return snp_set
def get_zip_files(zip_path):
result = []
with zipfile.ZipFile(zip_path, "r") as f:
for name in f.namelist():
result.append(name)
return result
def get_files_in_folder(folder_path):
result = []
for filename in os.listdir(folder_path):
result.append(filename)
return result
def get_files_from_zip_by_date(zip_path):
files_in_date = {}
csv_files_from_zip = get_zip_files(zip_path)
for curr_csv in csv_files_from_zip:
m = re.search('(.+)_(\d\d\d\d)(\d\d)(\d\d)\.+', curr_csv)
file_type = m.group(1)
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
date_key = f'{year}_{month}_{day}'
if file_type in ['stockquotes', 'options']:
if date_key not in files_in_date:
files_in_date[date_key] = {'year': year, 'month': month, 'day': day}
files_in_date[date_key][file_type] = curr_csv
return files_in_date
def process_stocks_file(stocks_data, year, month, day, dest_folder, snp_symbols):
print(f'Handling stocks for {day}/{month}/{year}')
for index, row in stocks_data.iterrows():
symbol = row['symbol']
if symbol in snp_symbols:
print(f'Handling the stock {symbol} at {day}/{month}/{year}')
open_price = row['open'] * 10000
high_price = row['high'] * 10000
low_price = row['low'] * 10000
close_price = row['close'] * 10000
volume = row['volume']
zip_dir = os.path.join(dest_folder, 'equity', 'usa', 'minute', symbol.lower())
dir_created = True
try:
if not os.path.exists(zip_dir):
os.makedirs(zip_dir)
except Exception as e:
print("directory exception:", e)
dir_created = False
if dir_created:
zip_path = os.path.join(zip_dir, f'{year}{month:02}{day:02}_trade.zip')
zip_file_handle = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
stockquote_filename = f'{year}{month:02}{day:02}_{symbol.lower()}_minute_trade.csv'
zip_file_handle.writestr(stockquote_filename,
f'{DAILY_TRADE_MINUTE_TIMESTAMP},{open_price},{high_price},{low_price},'
f'{close_price},{volume}')
zip_file_handle.close()
def process_options_file(options_data, year, month, day, dest_folder, snp_symbols):
print(f'Handling options for {day}/{month}/{year}')
file_prefix = f'{year}{month:02}{day:02}'
format_str = "{}"
zip_format_string = f'{file_prefix}_{format_str}_american.zip'
curr_stock_symbol = ''
open_interest_zip_handle = None
quote_zip_handle = None
trade_zip_handle = None
option_index = 0
for index, row in options_data.iterrows():
if option_index > 50:
break
stock_symbol = row['UnderlyingSymbol']
if stock_symbol in snp_symbols:
if stock_symbol != curr_stock_symbol:
print(f'Handling the options for {stock_symbol} on {day}/{month}/{year}')
if open_interest_zip_handle:
open_interest_zip_handle.close()
if quote_zip_handle:
quote_zip_handle.close()
if trade_zip_handle:
trade_zip_handle.close()
output_path = os.path.join(dest_folder, 'option', 'usa', 'minute', stock_symbol.lower())
dir_created = True
try:
if not os.path.exists(output_path):
os.makedirs(output_path)
except Exception as e:
print("directory exception:", e)
dir_created = False
if dir_created:
option_index += 1
curr_stock_symbol = stock_symbol
open_interest_zip_path = os.path.join(output_path, zip_format_string.format("openinterest"))
open_interest_zip_handle = zipfile.ZipFile(open_interest_zip_path, 'w', zipfile.ZIP_DEFLATED)
quote_zip_path = os.path.join(output_path, zip_format_string.format("quote"))
quote_zip_handle = zipfile.ZipFile(quote_zip_path, 'w', zipfile.ZIP_DEFLATED)
trade_zip_path = os.path.join(output_path, zip_format_string.format("trade"))
trade_zip_handle = zipfile.ZipFile(trade_zip_path, 'w', zipfile.ZIP_DEFLATED)
if open_interest_zip_handle and quote_zip_handle and trade_zip_handle:
expiration_date = datetime.strptime(row['Expiration'], "%m/%d/%Y")
csv_file_template = f'{file_prefix}_{stock_symbol.lower()}_minute_{format_str}_american_' \
f'{row["Type"]}_{int(float(row["Strike"]) * 10000)}_{expiration_date.year}' \
f'{expiration_date.month:02}{expiration_date.day:02}.csv'
open_interest_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{row["OpenInterest"]}'
open_interest_csv = csv_file_template.format("openinterest")
open_interest_zip_handle.writestr(open_interest_csv, open_interest_row)
option_quote_bid = row['Bid'] * 10000
option_quote_ask = row['Ask'] * 10000
option_quote_half_volume = int(row['Volume'] / 2)
iv = row['IV']
quote_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{option_quote_bid},{option_quote_bid},{option_quote_bid}' \
f',{option_quote_bid},{option_quote_half_volume},{option_quote_ask},{option_quote_ask},' \
f'{option_quote_ask},{option_quote_ask},{option_quote_half_volume},{iv}'
quote_csv = csv_file_template.format("quote")
quote_zip_handle.writestr(quote_csv, quote_row)
option_trade_last = row['Last'] * 10000
trade_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{option_trade_last},{option_trade_last},' \
f'{option_trade_last},{option_trade_last},{row["Volume"]}'
trade_csv = csv_file_template.format("trade")
trade_zip_handle.writestr(trade_csv, trade_row)
if open_interest_zip_handle:
open_interest_zip_handle.close()
if quote_zip_handle:
quote_zip_handle.close()
if trade_zip_handle:
trade_zip_handle.close()
if __name__ == '__main__':
start_time = time.time()
snp_500_symbols = get_snp_symbols(SNP_SYMBOLS_FILE_PATH)
process_source_dir(SOURCE_DIR, DEST_DIR, snp_500_symbols)
end_time = time.time()
print("Processing took", end_time - start_time, "seconds") | NormalizeData/ReadData.py | import pandas
import zipfile
import re
import os
from datetime import datetime
import time
import uuid
DEST_DIR = ".\\Destination"
SOURCE_DIR = '.\\Source'
SNP_SYMBOLS_FILE_PATH = ".\\snp500.txt"
DAILY_TRADE_MINUTE_TIMESTAMP = 57480000
def process_source_dir(source_dir, dest_dir, snp_symbols):
files_by_zip = {}
zip_files = get_files_in_folder(source_dir)
for curr_file in zip_files:
file_path = os.path.join(source_dir, curr_file)
files_by_zip[file_path] = get_files_from_zip_by_date(file_path)
for zip_file in files_by_zip:
zip_file_obj = zipfile.ZipFile(zip_file)
for curr_date in files_by_zip[zip_file]:
date_info = files_by_zip[zip_file][curr_date]
stock_quotes_file = date_info['stockquotes']
stock_quotes_data = pandas.read_csv(zip_file_obj.open(stock_quotes_file))
stocks_start = time.time()
process_stocks_file(stock_quotes_data, date_info['year'], date_info['month'], date_info['day'],
dest_dir, snp_symbols)
stocks_end = time.time()
print(f'Processing stocks took {stocks_end - stocks_start} seconds')
options_file = date_info['options']
options_data = pandas.read_csv(zip_file_obj.open(options_file))
process_options_file(options_data, date_info['year'], date_info['month'], date_info['day'],
dest_dir, snp_symbols)
print(f'Processing options took {time.time() - stocks_end} seconds')
break
break
def get_snp_symbols(snp_500_filename):
# snp_500_filename = "./snp500.txt"
snp_set = set()
with open(snp_500_filename) as snp_file:
for symbol in snp_file:
snp_set.add(symbol.strip())
return snp_set
def get_zip_files(zip_path):
result = []
with zipfile.ZipFile(zip_path, "r") as f:
for name in f.namelist():
result.append(name)
return result
def get_files_in_folder(folder_path):
result = []
for filename in os.listdir(folder_path):
result.append(filename)
return result
def get_files_from_zip_by_date(zip_path):
files_in_date = {}
csv_files_from_zip = get_zip_files(zip_path)
for curr_csv in csv_files_from_zip:
m = re.search('(.+)_(\d\d\d\d)(\d\d)(\d\d)\.+', curr_csv)
file_type = m.group(1)
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
date_key = f'{year}_{month}_{day}'
if file_type in ['stockquotes', 'options']:
if date_key not in files_in_date:
files_in_date[date_key] = {'year': year, 'month': month, 'day': day}
files_in_date[date_key][file_type] = curr_csv
return files_in_date
def process_stocks_file(stocks_data, year, month, day, dest_folder, snp_symbols):
print(f'Handling stocks for {day}/{month}/{year}')
for index, row in stocks_data.iterrows():
symbol = row['symbol']
if symbol in snp_symbols:
print(f'Handling the stock {symbol} at {day}/{month}/{year}')
open_price = row['open'] * 10000
high_price = row['high'] * 10000
low_price = row['low'] * 10000
close_price = row['close'] * 10000
volume = row['volume']
zip_dir = os.path.join(dest_folder, 'equity', 'usa', 'minute', symbol.lower())
dir_created = True
try:
if not os.path.exists(zip_dir):
os.makedirs(zip_dir)
except Exception as e:
print("directory exception:", e)
dir_created = False
if dir_created:
zip_path = os.path.join(zip_dir, f'{year}{month:02}{day:02}_trade.zip')
zip_file_handle = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
stockquote_filename = f'{year}{month:02}{day:02}_{symbol.lower()}_minute_trade.csv'
zip_file_handle.writestr(stockquote_filename,
f'{DAILY_TRADE_MINUTE_TIMESTAMP},{open_price},{high_price},{low_price},'
f'{close_price},{volume}')
zip_file_handle.close()
def process_options_file(options_data, year, month, day, dest_folder, snp_symbols):
print(f'Handling options for {day}/{month}/{year}')
file_prefix = f'{year}{month:02}{day:02}'
format_str = "{}"
zip_format_string = f'{file_prefix}_{format_str}_american.zip'
curr_stock_symbol = ''
open_interest_zip_handle = None
quote_zip_handle = None
trade_zip_handle = None
option_index = 0
for index, row in options_data.iterrows():
if option_index > 50:
break
stock_symbol = row['UnderlyingSymbol']
if stock_symbol in snp_symbols:
if stock_symbol != curr_stock_symbol:
print(f'Handling the options for {stock_symbol} on {day}/{month}/{year}')
if open_interest_zip_handle:
open_interest_zip_handle.close()
if quote_zip_handle:
quote_zip_handle.close()
if trade_zip_handle:
trade_zip_handle.close()
output_path = os.path.join(dest_folder, 'option', 'usa', 'minute', stock_symbol.lower())
dir_created = True
try:
if not os.path.exists(output_path):
os.makedirs(output_path)
except Exception as e:
print("directory exception:", e)
dir_created = False
if dir_created:
option_index += 1
curr_stock_symbol = stock_symbol
open_interest_zip_path = os.path.join(output_path, zip_format_string.format("openinterest"))
open_interest_zip_handle = zipfile.ZipFile(open_interest_zip_path, 'w', zipfile.ZIP_DEFLATED)
quote_zip_path = os.path.join(output_path, zip_format_string.format("quote"))
quote_zip_handle = zipfile.ZipFile(quote_zip_path, 'w', zipfile.ZIP_DEFLATED)
trade_zip_path = os.path.join(output_path, zip_format_string.format("trade"))
trade_zip_handle = zipfile.ZipFile(trade_zip_path, 'w', zipfile.ZIP_DEFLATED)
if open_interest_zip_handle and quote_zip_handle and trade_zip_handle:
expiration_date = datetime.strptime(row['Expiration'], "%m/%d/%Y")
csv_file_template = f'{file_prefix}_{stock_symbol.lower()}_minute_{format_str}_american_' \
f'{row["Type"]}_{int(float(row["Strike"]) * 10000)}_{expiration_date.year}' \
f'{expiration_date.month:02}{expiration_date.day:02}.csv'
open_interest_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{row["OpenInterest"]}'
open_interest_csv = csv_file_template.format("openinterest")
open_interest_zip_handle.writestr(open_interest_csv, open_interest_row)
option_quote_bid = row['Bid'] * 10000
option_quote_ask = row['Ask'] * 10000
option_quote_half_volume = int(row['Volume'] / 2)
iv = row['IV']
quote_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{option_quote_bid},{option_quote_bid},{option_quote_bid}' \
f',{option_quote_bid},{option_quote_half_volume},{option_quote_ask},{option_quote_ask},' \
f'{option_quote_ask},{option_quote_ask},{option_quote_half_volume},{iv}'
quote_csv = csv_file_template.format("quote")
quote_zip_handle.writestr(quote_csv, quote_row)
option_trade_last = row['Last'] * 10000
trade_row = f'{DAILY_TRADE_MINUTE_TIMESTAMP},{option_trade_last},{option_trade_last},' \
f'{option_trade_last},{option_trade_last},{row["Volume"]}'
trade_csv = csv_file_template.format("trade")
trade_zip_handle.writestr(trade_csv, trade_row)
if open_interest_zip_handle:
open_interest_zip_handle.close()
if quote_zip_handle:
quote_zip_handle.close()
if trade_zip_handle:
trade_zip_handle.close()
if __name__ == '__main__':
start_time = time.time()
snp_500_symbols = get_snp_symbols(SNP_SYMBOLS_FILE_PATH)
process_source_dir(SOURCE_DIR, DEST_DIR, snp_500_symbols)
end_time = time.time()
print("Processing took", end_time - start_time, "seconds") | 0.136033 | 0.08698 |
import os
import argparse
import time
import yaml
import ast
from functools import reduce
from PIL import Image
import cv2
import numpy as np
import glob
import paddle
from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride
from visualize import visualize_box_mask
from paddle.inference import Config
from paddle.inference import create_predictor
# Global dictionary
SUPPORT_MODELS = {
'YOLO',
'RCNN',
'SSD',
'FCOS',
'SOLOv2',
'TTFNet',
}
class Detector(object):
"""
Args:
config (object): config of model, defined by `Config(model_dir)`
model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml
use_gpu (bool): whether use gpu
"""
def __init__(self, pred_config, model_dir, use_gpu=False):
self.pred_config = pred_config
self.predictor = load_predictor(
model_dir,
min_subgraph_size=self.pred_config.min_subgraph_size,
use_gpu=use_gpu)
def preprocess(self, im):
preprocess_ops = []
for op_info in self.pred_config.preprocess_infos:
new_op_info = op_info.copy()
op_type = new_op_info.pop('type')
preprocess_ops.append(eval(op_type)(**new_op_info))
im, im_info = preprocess(im, preprocess_ops,
self.pred_config.input_shape)
inputs = create_inputs(im, im_info)
return inputs
def postprocess(self, np_boxes, np_masks, inputs, threshold=0.5):
# postprocess output of predictor
results = {}
if self.pred_config.arch in ['Face']:
h, w = inputs['im_shape']
scale_y, scale_x = inputs['scale_factor']
w, h = float(h) / scale_y, float(w) / scale_x
np_boxes[:, 2] *= h
np_boxes[:, 3] *= w
np_boxes[:, 4] *= h
np_boxes[:, 5] *= w
results['boxes'] = np_boxes
if np_masks is not None:
results['masks'] = np_masks
return results
def predict(self,
image,
threshold=0.5,
warmup=0,
repeats=1,
run_benchmark=False):
'''
Args:
image (str/np.ndarray): path of image/ np.ndarray read by cv2
threshold (float): threshold of predicted box' score
Returns:
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
inputs = self.preprocess(image)
np_boxes, np_masks = None, None
input_names = self.predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_handle(input_names[i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
for i in range(warmup):
self.predictor.run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_handle(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.pred_config.mask:
masks_tensor = self.predictor.get_output_handle(output_names[2])
np_masks = masks_tensor.copy_to_cpu()
t1 = time.time()
for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_handle(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.pred_config.mask:
masks_tensor = self.predictor.get_output_handle(output_names[2])
np_masks = masks_tensor.copy_to_cpu()
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
# do not perform postprocess in benchmark mode
results = []
if not run_benchmark:
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
print('[WARNNING] No object detected.')
results = {'boxes': np.array([])}
else:
results = self.postprocess(
np_boxes, np_masks, inputs, threshold=threshold)
return results
def create_inputs(im, im_info):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = np.array((im, )).astype('float32')
inputs['im_shape'] = np.array((im_info['im_shape'], )).astype('float32')
inputs['scale_factor'] = np.array(
(im_info['scale_factor'], )).astype('float32')
return inputs
class PredictConfig():
"""set config of preprocess, postprocess and visualize
Args:
model_dir (str): root path of model.yml
"""
def __init__(self, model_dir):
# parsing Yaml config for Preprocess
deploy_file = os.path.join(model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
self.check_model(yml_conf)
self.arch = yml_conf['arch']
self.preprocess_infos = yml_conf['Preprocess']
self.min_subgraph_size = yml_conf['min_subgraph_size']
self.labels = yml_conf['label_list']
self.mask = False
if 'mask' in yml_conf:
self.mask = yml_conf['mask']
self.input_shape = yml_conf['image_shape']
self.print_config()
def check_model(self, yml_conf):
"""
Raises:
ValueError: loaded model not in supported model type
"""
for support_model in SUPPORT_MODELS:
if support_model in yml_conf['arch']:
return True
raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
'arch'], SUPPORT_MODELS))
def print_config(self):
print('----------- Model Configuration -----------')
print('%s: %s' % ('Model Arch', self.arch))
print('%s: ' % ('Transform Order'))
for op_info in self.preprocess_infos:
print('--%s: %s' % ('transform op', op_info['type']))
print('--------------------------------------------')
def load_predictor(model_dir, batch_size=1, use_gpu=False, min_subgraph_size=3):
"""set AnalysisConfig, generate AnalysisPredictor
Args:
model_dir (str): root path of __model__ and __params__
use_gpu (bool): whether use gpu
Returns:
predictor (PaddlePredictor): AnalysisPredictor
Raises:
ValueError: predict by TensorRT need use_gpu == True.
"""
run_mode = 'trt_int8'
if not use_gpu and not run_mode == 'fluid':
raise ValueError(
"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}"
.format(run_mode, use_gpu))
config = Config(
os.path.join(model_dir, 'model.pdmodel'),
os.path.join(model_dir, 'model.pdiparams'))
precision_map = {
'trt_int8': Config.Precision.Int8,
'trt_fp32': Config.Precision.Float32,
'trt_fp16': Config.Precision.Half
}
if use_gpu:
# initial GPU memory(M), device ID
config.enable_use_gpu(200, 0)
# optimize graph and fuse op
config.switch_ir_optim(True)
else:
config.disable_gpu()
if run_mode in precision_map.keys():
config.enable_tensorrt_engine(
workspace_size=1 << 10,
max_batch_size=batch_size,
min_subgraph_size=min_subgraph_size,
precision_mode=precision_map[run_mode],
use_static=False,
use_calib_mode=True)
# disable print log when predict
config.disable_glog_info()
# enable shared memory
config.enable_memory_optim()
# disable feed, fetch OP, needed by zero_copy_run
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor
def print_arguments(args):
print('----------- Running Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------')
def predict_image_dir(detector):
for image_file in glob.glob(FLAGS.image_dir + '/*.jpg'):
print('image_file is', image_file)
results = detector.predict(image_file, threshold=0.5)
def main():
pred_config = PredictConfig(FLAGS.model_dir)
detector = Detector(pred_config, FLAGS.model_dir, use_gpu=FLAGS.use_gpu)
# predict from image
if FLAGS.image_dir != '':
predict_image_dir(detector)
if __name__ == '__main__':
paddle.enable_static()
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--model_dir",
type=str,
default=None,
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."),
required=True)
parser.add_argument(
"--image_dir", type=str, default='', help="Directory of image file.")
parser.add_argument(
"--use_gpu",
type=ast.literal_eval,
default=False,
help="Whether to predict with GPU.")
print('err?')
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
FLAGS = parser.parse_args()
print_arguments(FLAGS)
main() | dygraph/deploy/python/trt_int8_calib.py |
import os
import argparse
import time
import yaml
import ast
from functools import reduce
from PIL import Image
import cv2
import numpy as np
import glob
import paddle
from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride
from visualize import visualize_box_mask
from paddle.inference import Config
from paddle.inference import create_predictor
# Global dictionary
SUPPORT_MODELS = {
'YOLO',
'RCNN',
'SSD',
'FCOS',
'SOLOv2',
'TTFNet',
}
class Detector(object):
"""
Args:
config (object): config of model, defined by `Config(model_dir)`
model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml
use_gpu (bool): whether use gpu
"""
def __init__(self, pred_config, model_dir, use_gpu=False):
self.pred_config = pred_config
self.predictor = load_predictor(
model_dir,
min_subgraph_size=self.pred_config.min_subgraph_size,
use_gpu=use_gpu)
def preprocess(self, im):
preprocess_ops = []
for op_info in self.pred_config.preprocess_infos:
new_op_info = op_info.copy()
op_type = new_op_info.pop('type')
preprocess_ops.append(eval(op_type)(**new_op_info))
im, im_info = preprocess(im, preprocess_ops,
self.pred_config.input_shape)
inputs = create_inputs(im, im_info)
return inputs
def postprocess(self, np_boxes, np_masks, inputs, threshold=0.5):
# postprocess output of predictor
results = {}
if self.pred_config.arch in ['Face']:
h, w = inputs['im_shape']
scale_y, scale_x = inputs['scale_factor']
w, h = float(h) / scale_y, float(w) / scale_x
np_boxes[:, 2] *= h
np_boxes[:, 3] *= w
np_boxes[:, 4] *= h
np_boxes[:, 5] *= w
results['boxes'] = np_boxes
if np_masks is not None:
results['masks'] = np_masks
return results
def predict(self,
image,
threshold=0.5,
warmup=0,
repeats=1,
run_benchmark=False):
'''
Args:
image (str/np.ndarray): path of image/ np.ndarray read by cv2
threshold (float): threshold of predicted box' score
Returns:
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
inputs = self.preprocess(image)
np_boxes, np_masks = None, None
input_names = self.predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_handle(input_names[i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
for i in range(warmup):
self.predictor.run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_handle(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.pred_config.mask:
masks_tensor = self.predictor.get_output_handle(output_names[2])
np_masks = masks_tensor.copy_to_cpu()
t1 = time.time()
for i in range(repeats):
self.predictor.run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_handle(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.pred_config.mask:
masks_tensor = self.predictor.get_output_handle(output_names[2])
np_masks = masks_tensor.copy_to_cpu()
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
# do not perform postprocess in benchmark mode
results = []
if not run_benchmark:
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
print('[WARNNING] No object detected.')
results = {'boxes': np.array([])}
else:
results = self.postprocess(
np_boxes, np_masks, inputs, threshold=threshold)
return results
def create_inputs(im, im_info):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = np.array((im, )).astype('float32')
inputs['im_shape'] = np.array((im_info['im_shape'], )).astype('float32')
inputs['scale_factor'] = np.array(
(im_info['scale_factor'], )).astype('float32')
return inputs
class PredictConfig():
"""set config of preprocess, postprocess and visualize
Args:
model_dir (str): root path of model.yml
"""
def __init__(self, model_dir):
# parsing Yaml config for Preprocess
deploy_file = os.path.join(model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
self.check_model(yml_conf)
self.arch = yml_conf['arch']
self.preprocess_infos = yml_conf['Preprocess']
self.min_subgraph_size = yml_conf['min_subgraph_size']
self.labels = yml_conf['label_list']
self.mask = False
if 'mask' in yml_conf:
self.mask = yml_conf['mask']
self.input_shape = yml_conf['image_shape']
self.print_config()
def check_model(self, yml_conf):
"""
Raises:
ValueError: loaded model not in supported model type
"""
for support_model in SUPPORT_MODELS:
if support_model in yml_conf['arch']:
return True
raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
'arch'], SUPPORT_MODELS))
def print_config(self):
print('----------- Model Configuration -----------')
print('%s: %s' % ('Model Arch', self.arch))
print('%s: ' % ('Transform Order'))
for op_info in self.preprocess_infos:
print('--%s: %s' % ('transform op', op_info['type']))
print('--------------------------------------------')
def load_predictor(model_dir, batch_size=1, use_gpu=False, min_subgraph_size=3):
"""set AnalysisConfig, generate AnalysisPredictor
Args:
model_dir (str): root path of __model__ and __params__
use_gpu (bool): whether use gpu
Returns:
predictor (PaddlePredictor): AnalysisPredictor
Raises:
ValueError: predict by TensorRT need use_gpu == True.
"""
run_mode = 'trt_int8'
if not use_gpu and not run_mode == 'fluid':
raise ValueError(
"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}"
.format(run_mode, use_gpu))
config = Config(
os.path.join(model_dir, 'model.pdmodel'),
os.path.join(model_dir, 'model.pdiparams'))
precision_map = {
'trt_int8': Config.Precision.Int8,
'trt_fp32': Config.Precision.Float32,
'trt_fp16': Config.Precision.Half
}
if use_gpu:
# initial GPU memory(M), device ID
config.enable_use_gpu(200, 0)
# optimize graph and fuse op
config.switch_ir_optim(True)
else:
config.disable_gpu()
if run_mode in precision_map.keys():
config.enable_tensorrt_engine(
workspace_size=1 << 10,
max_batch_size=batch_size,
min_subgraph_size=min_subgraph_size,
precision_mode=precision_map[run_mode],
use_static=False,
use_calib_mode=True)
# disable print log when predict
config.disable_glog_info()
# enable shared memory
config.enable_memory_optim()
# disable feed, fetch OP, needed by zero_copy_run
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor
def print_arguments(args):
print('----------- Running Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------')
def predict_image_dir(detector):
for image_file in glob.glob(FLAGS.image_dir + '/*.jpg'):
print('image_file is', image_file)
results = detector.predict(image_file, threshold=0.5)
def main():
pred_config = PredictConfig(FLAGS.model_dir)
detector = Detector(pred_config, FLAGS.model_dir, use_gpu=FLAGS.use_gpu)
# predict from image
if FLAGS.image_dir != '':
predict_image_dir(detector)
if __name__ == '__main__':
paddle.enable_static()
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--model_dir",
type=str,
default=None,
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."),
required=True)
parser.add_argument(
"--image_dir", type=str, default='', help="Directory of image file.")
parser.add_argument(
"--use_gpu",
type=ast.literal_eval,
default=False,
help="Whether to predict with GPU.")
print('err?')
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
FLAGS = parser.parse_args()
print_arguments(FLAGS)
main() | 0.695958 | 0.152221 |
import queue as Q
import numpy as np
import math
from util import *
class Graph:
def __init__(self, file = None, numVertex = 0):
self.V = numVertex
self.E = 0
self.maxWeight = 0
self.file = file
self.edges = {}
self.adj_matrix = [[0 for x in range(self.V)] for y in range(self.V)]
self.adj_list = {}
if(file != None):
self.read_file(file)
self.subtrees = {}
def read_file(self, file):
with open(file,'r') as file:
self.V, self.E = map(int,file.readline().split())
self.adj_matrix = [[0 for x in range(self.V)] for y in range(self.V)]
for i in range(self.E):
i, j, c = map(int,file.readline().split())
self.addEdge(i-1, j-1, c)
self.addEdge(j-1, i-1, c)
self.maxWeight += c
def addEdge(self, i, j, c):
'''Adiciona ou altera o valor da aresta i,j'''
self.edges[(i,j)] = c
self.edges[(j,i)] = c
self.adj_list.setdefault(i,{})[j] = c
self.adj_list.setdefault(j,{})[i] = c
self.adj_matrix[i][j] = c
self.adj_matrix[j][i] = c
def removeEdge(self, i, j):
self.edges.pop((i,j),None)
self.edges.pop((j,i),None)
self.adj_list[i].pop(j,None)
self.adj_list[j].pop(i,None)
self.adj_matrix[i][j] = 0
self.adj_matrix[j][i] = 0
def mst(self, v=0, roots=[], marked = None ):
st_root = v
if(len(roots)>1):
st_root = roots[0]
v = roots[0]
tree_cost = 0
used = 0
if(marked != None):
inMST = marked
used = sum(marked)
else:
inMST = [False]*self.V
key = {}
tree = {}
heap = Q.PriorityQueue()
for i in range(self.V):
tree[i] = -1
key[i] = math.inf
key[v] = 0
heap.put((key[v], v))
while used != self.V:
h = heap.get()
u = h[1]
w = h[0]
if(inMST[u] == True):
continue
inMST[u] = True
used += 1
tree_cost+=w
for adj in self.adj_list[u].items():
v = adj[0]
w = adj[1]
if (inMST[v] == False) and (key[v]>w):
key[v] = w
heap.put((key[v],v))
tree[v] = u
spanning_tree = {
'root': st_root,
'cost': tree_cost,
'edges': set()
}
for v in roots:
tree[v] = -1
for edge in tree.items():
if(edge[1]==-1):
continue
spanning_tree['edges'].add((edge[1],edge[0]))
return spanning_tree
def rooted_trees(self, roots):
self.subtrees = {}
rootEdges = []
for i in range(len(roots)-1):
for j in range(i+1,len(roots)):
rootEdges.append((roots[i], roots[j]))
rootEdges.append((roots[j], roots[i]))
self.addEdge(roots[i], roots[j], 0)
bottleneck_tree = self.mst(roots = roots)
for root in roots:
self.subtrees[root] = {
'cost': 0,
'edges': set(),
}
q = Q.Queue()
q.put(root)
while not q.empty():
v = q.get()
for edge in bottleneck_tree['edges']:
if(v in edge) and (edge not in self.subtrees[root]['edges'] ):
self.subtrees[root]['edges'].add(edge)
self.subtrees[root]['cost'] += self.adj_matrix[edge[0]][edge[1]]
if(v == edge[0]):
q.put(edge[1])
else:
q.put(edge[0])
for i in range(len(roots)-1):
for j in range(i+1,len(roots)):
self.removeEdge(roots[i], roots[j])
return self.subtrees, bottleneck_tree
def greedy_random_sf(self, roots):
self.subtrees = {}
heapDict = {}
inST = [False]*self.V
used = len(roots)
key = {}
cur_tree = roots[0]
for root in roots:
inST[root] = True
key[root] = [math.inf]*self.V
heapDict[root] = Q.PriorityQueue()
self.subtrees[root] = {
'cost':0,
'edges': set()
}
for adj in self.adj_list[root].items():
# item = (cost, (v, parent[v])
key[root][adj[0]] = adj[1]
item = (adj[1],(adj[0],root))
heapDict[root].put(item)
while used != self.V:
item = heapDict[cur_tree].get()
c = item[0]
v,parent = item[1]
if(inST[v]):
continue
inST[v] = True
used+=1
self.subtrees[cur_tree]['cost']+=c
self.subtrees[cur_tree]['edges'].add((parent, v))
for adj in self.adj_list[v].items():
i = adj[0]
c = adj[1]
if (not inST[i]) and (key[cur_tree][i]>c):
key[cur_tree][i] = c
item = (c,(i,v))
heapDict[cur_tree].put(item)
for root in roots:
cost = self.subtrees[root]['cost']
if(cost<= self.subtrees[cur_tree]['cost']):
cur_tree = root
return self.subtrees
def improved_random_sf(self, roots):
self.subtrees = {}
heapDict = {}
inST = [False]*self.V
used = len(roots)
key = {}
cur_tree = roots[0]
target = 0
for root in roots:
inST[root] = True
key[root] = [math.inf]*self.V
heapDict[root] = Q.PriorityQueue()
self.subtrees[root] = {
'cost':0,
'edges': set()
}
for adj in self.adj_list[root].items():
# item = (cost, (v, root))
key[root][adj[0]] = adj[1]
item = (adj[1],(adj[0],root))
heapDict[root].put(item)
#selecting first tree
possible_trees = set()
for root in roots:
if(len(heapDict[root].queue) > 0):
min_vertex = heapDict[root].queue[0]
item = (root, min_vertex[0])
possible_trees.add(item)
cur_tree = min_set(possible_trees)[0]
possible_trees.clear()
while used != self.V:
item = heapDict[cur_tree].get()
c = item[0]
v,parent = item[1]
if(inST[v]):
continue
inST[v] = True
used+=1
self.subtrees[cur_tree]['cost']+=c
self.subtrees[cur_tree]['edges'].add((parent, v))
for adj in self.adj_list[v].items():
i = adj[0]
c = adj[1]
if (not inST[i]) and (key[cur_tree][i]>c):
key[cur_tree][i] = c
item = (c,(i,v))
heapDict[cur_tree].put(item)
for root in roots:
if(len(heapDict[root].queue) > 0):
min_vertex = heapDict[root].queue[0]
item = (root,self.subtrees[root]['cost']+min_vertex[0])
possible_trees.add(item)
cur_tree = min_set(possible_trees)[0]
possible_trees.clear()
return self.subtrees
def get_prob(self, roots):
probs = []
for i in range(self.V):
dist_sum = 0
prob = [0]*len(roots)
if(i in roots):
prob[roots.index(i)] = 1
probs.append(prob)
continue
for idx, r in enumerate(roots):
dist_sum += self.adj_list[i].setdefault(r,0)
prob[idx] = self.adj_list[i].setdefault(r,0)
for idx in range(len(prob)):
if(prob[idx]!=0):
prob[idx] = (dist_sum - prob[idx])/((len(prob)-1)*dist_sum)
probs.append(prob)
return probs
def mst_from_probs(self, roots, probs):
vertex_list = {}
spanning_tree = {}
for root in roots:
spanning_tree[root] = None
vertex_list[root] = [True]*self.V
for idx, prob in enumerate(probs):
tree = np.random.choice(roots, p = prob)
vertex_list[tree][idx] = False
for root in roots:
spanning_tree[root] = self.mst(root, marked = vertex_list[root])
return spanning_tree
def make_complete(self):
for i in range(self.V-1):
for j in range(i+1,self.V):
if((i, j) not in self.edges and (j,i) not in self.edges):
self.addEdge(i, j, self.maxWeight+1)
def make_uncomplete(self):
for i in range(self.V-1):
for j in range(i+1,self.V):
if(self.adj_matrix[i][j]==self.maxWeight+1):
self.removeEdge(i, j)
def __repr__(self):
ret = 'Grafo\n\t |V| => '+str(self.V)+'\n\t |E| => '+str(self.E)+'\n Matrix de adjacencia\n'
for i in self.adj_matrix:
ret += str(i)+'\n'
return ret | src/grafo.py | import queue as Q
import numpy as np
import math
from util import *
class Graph:
def __init__(self, file = None, numVertex = 0):
self.V = numVertex
self.E = 0
self.maxWeight = 0
self.file = file
self.edges = {}
self.adj_matrix = [[0 for x in range(self.V)] for y in range(self.V)]
self.adj_list = {}
if(file != None):
self.read_file(file)
self.subtrees = {}
def read_file(self, file):
with open(file,'r') as file:
self.V, self.E = map(int,file.readline().split())
self.adj_matrix = [[0 for x in range(self.V)] for y in range(self.V)]
for i in range(self.E):
i, j, c = map(int,file.readline().split())
self.addEdge(i-1, j-1, c)
self.addEdge(j-1, i-1, c)
self.maxWeight += c
def addEdge(self, i, j, c):
'''Adiciona ou altera o valor da aresta i,j'''
self.edges[(i,j)] = c
self.edges[(j,i)] = c
self.adj_list.setdefault(i,{})[j] = c
self.adj_list.setdefault(j,{})[i] = c
self.adj_matrix[i][j] = c
self.adj_matrix[j][i] = c
def removeEdge(self, i, j):
self.edges.pop((i,j),None)
self.edges.pop((j,i),None)
self.adj_list[i].pop(j,None)
self.adj_list[j].pop(i,None)
self.adj_matrix[i][j] = 0
self.adj_matrix[j][i] = 0
def mst(self, v=0, roots=[], marked = None ):
st_root = v
if(len(roots)>1):
st_root = roots[0]
v = roots[0]
tree_cost = 0
used = 0
if(marked != None):
inMST = marked
used = sum(marked)
else:
inMST = [False]*self.V
key = {}
tree = {}
heap = Q.PriorityQueue()
for i in range(self.V):
tree[i] = -1
key[i] = math.inf
key[v] = 0
heap.put((key[v], v))
while used != self.V:
h = heap.get()
u = h[1]
w = h[0]
if(inMST[u] == True):
continue
inMST[u] = True
used += 1
tree_cost+=w
for adj in self.adj_list[u].items():
v = adj[0]
w = adj[1]
if (inMST[v] == False) and (key[v]>w):
key[v] = w
heap.put((key[v],v))
tree[v] = u
spanning_tree = {
'root': st_root,
'cost': tree_cost,
'edges': set()
}
for v in roots:
tree[v] = -1
for edge in tree.items():
if(edge[1]==-1):
continue
spanning_tree['edges'].add((edge[1],edge[0]))
return spanning_tree
def rooted_trees(self, roots):
self.subtrees = {}
rootEdges = []
for i in range(len(roots)-1):
for j in range(i+1,len(roots)):
rootEdges.append((roots[i], roots[j]))
rootEdges.append((roots[j], roots[i]))
self.addEdge(roots[i], roots[j], 0)
bottleneck_tree = self.mst(roots = roots)
for root in roots:
self.subtrees[root] = {
'cost': 0,
'edges': set(),
}
q = Q.Queue()
q.put(root)
while not q.empty():
v = q.get()
for edge in bottleneck_tree['edges']:
if(v in edge) and (edge not in self.subtrees[root]['edges'] ):
self.subtrees[root]['edges'].add(edge)
self.subtrees[root]['cost'] += self.adj_matrix[edge[0]][edge[1]]
if(v == edge[0]):
q.put(edge[1])
else:
q.put(edge[0])
for i in range(len(roots)-1):
for j in range(i+1,len(roots)):
self.removeEdge(roots[i], roots[j])
return self.subtrees, bottleneck_tree
def greedy_random_sf(self, roots):
self.subtrees = {}
heapDict = {}
inST = [False]*self.V
used = len(roots)
key = {}
cur_tree = roots[0]
for root in roots:
inST[root] = True
key[root] = [math.inf]*self.V
heapDict[root] = Q.PriorityQueue()
self.subtrees[root] = {
'cost':0,
'edges': set()
}
for adj in self.adj_list[root].items():
# item = (cost, (v, parent[v])
key[root][adj[0]] = adj[1]
item = (adj[1],(adj[0],root))
heapDict[root].put(item)
while used != self.V:
item = heapDict[cur_tree].get()
c = item[0]
v,parent = item[1]
if(inST[v]):
continue
inST[v] = True
used+=1
self.subtrees[cur_tree]['cost']+=c
self.subtrees[cur_tree]['edges'].add((parent, v))
for adj in self.adj_list[v].items():
i = adj[0]
c = adj[1]
if (not inST[i]) and (key[cur_tree][i]>c):
key[cur_tree][i] = c
item = (c,(i,v))
heapDict[cur_tree].put(item)
for root in roots:
cost = self.subtrees[root]['cost']
if(cost<= self.subtrees[cur_tree]['cost']):
cur_tree = root
return self.subtrees
def improved_random_sf(self, roots):
self.subtrees = {}
heapDict = {}
inST = [False]*self.V
used = len(roots)
key = {}
cur_tree = roots[0]
target = 0
for root in roots:
inST[root] = True
key[root] = [math.inf]*self.V
heapDict[root] = Q.PriorityQueue()
self.subtrees[root] = {
'cost':0,
'edges': set()
}
for adj in self.adj_list[root].items():
# item = (cost, (v, root))
key[root][adj[0]] = adj[1]
item = (adj[1],(adj[0],root))
heapDict[root].put(item)
#selecting first tree
possible_trees = set()
for root in roots:
if(len(heapDict[root].queue) > 0):
min_vertex = heapDict[root].queue[0]
item = (root, min_vertex[0])
possible_trees.add(item)
cur_tree = min_set(possible_trees)[0]
possible_trees.clear()
while used != self.V:
item = heapDict[cur_tree].get()
c = item[0]
v,parent = item[1]
if(inST[v]):
continue
inST[v] = True
used+=1
self.subtrees[cur_tree]['cost']+=c
self.subtrees[cur_tree]['edges'].add((parent, v))
for adj in self.adj_list[v].items():
i = adj[0]
c = adj[1]
if (not inST[i]) and (key[cur_tree][i]>c):
key[cur_tree][i] = c
item = (c,(i,v))
heapDict[cur_tree].put(item)
for root in roots:
if(len(heapDict[root].queue) > 0):
min_vertex = heapDict[root].queue[0]
item = (root,self.subtrees[root]['cost']+min_vertex[0])
possible_trees.add(item)
cur_tree = min_set(possible_trees)[0]
possible_trees.clear()
return self.subtrees
def get_prob(self, roots):
probs = []
for i in range(self.V):
dist_sum = 0
prob = [0]*len(roots)
if(i in roots):
prob[roots.index(i)] = 1
probs.append(prob)
continue
for idx, r in enumerate(roots):
dist_sum += self.adj_list[i].setdefault(r,0)
prob[idx] = self.adj_list[i].setdefault(r,0)
for idx in range(len(prob)):
if(prob[idx]!=0):
prob[idx] = (dist_sum - prob[idx])/((len(prob)-1)*dist_sum)
probs.append(prob)
return probs
def mst_from_probs(self, roots, probs):
vertex_list = {}
spanning_tree = {}
for root in roots:
spanning_tree[root] = None
vertex_list[root] = [True]*self.V
for idx, prob in enumerate(probs):
tree = np.random.choice(roots, p = prob)
vertex_list[tree][idx] = False
for root in roots:
spanning_tree[root] = self.mst(root, marked = vertex_list[root])
return spanning_tree
def make_complete(self):
for i in range(self.V-1):
for j in range(i+1,self.V):
if((i, j) not in self.edges and (j,i) not in self.edges):
self.addEdge(i, j, self.maxWeight+1)
def make_uncomplete(self):
for i in range(self.V-1):
for j in range(i+1,self.V):
if(self.adj_matrix[i][j]==self.maxWeight+1):
self.removeEdge(i, j)
def __repr__(self):
ret = 'Grafo\n\t |V| => '+str(self.V)+'\n\t |E| => '+str(self.E)+'\n Matrix de adjacencia\n'
for i in self.adj_matrix:
ret += str(i)+'\n'
return ret | 0.152568 | 0.22482 |
from optparse import OptionParser
import os.path
import sys
import urllib2
from config import AWS_KEY, SECRET_KEY
from amazonproduct.api import API
ASIN = 'ASIN'
EAN = 'EAN'
SKU = 'SKU'
UPC = 'UPC'
def fetch_image(url, dest_path):
"""
Downloads image and saves it to ``dest_path``.
"""
fp = open(dest_path, 'wb')
fp.write(urllib2.urlopen(url).read())
fp.close()
if __name__ == '__main__':
parser = OptionParser(__doc__.strip())
parser.set_defaults(id_type=EAN, verbose=True)
parser.add_option('--ean', action='store_const', dest='id_type', const=EAN,
help='ID is an European Article Number (EAN) [default]')
parser.add_option('--asin', action='store_const', dest='id_type', const=ASIN,
help='ID is an Amazon Standard Identification Number (ASIN).')
parser.add_option('--upc', action='store_const', dest='id_type', const=UPC,
help='ID is an Universal Product Code (UPC).')
parser.add_option('--sku', action='store_const', dest='id_type', const=SKU,
help='ID is an Stock Keeping Unit (SKU).')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output.')
(options, ids) = parser.parse_args(sys.argv[1:])
if len(ids) == 0:
parser.error('No IDs specified!')
api = API(AWS_KEY, SECRET_KEY, 'de')
params = {
'ResponseGroup' : 'Images',
'SearchIndex' : 'All',
'IdType' : options.id_type,
}
# When IdType equals ASIN, SearchIndex cannot be present.
if options.id_type == ASIN:
del params['SearchIndex']
for id in ids:
id = id.replace('-', '')
if options.verbose: print 'Fetching info for %s...' % id
root = api.item_lookup(id, **params)
#~ from lxml import etree
#~ print etree.tostring(root, pretty_print=True)
url = root.Items.Item.LargeImage.URL.pyval
name, ext = os.path.splitext(url)
path = '%s%s' % (id, ext)
if options.verbose: print 'Downloading %s to %s ...' % (url, path)
fetch_image(url, path) | examples/get-cover.py | from optparse import OptionParser
import os.path
import sys
import urllib2
from config import AWS_KEY, SECRET_KEY
from amazonproduct.api import API
ASIN = 'ASIN'
EAN = 'EAN'
SKU = 'SKU'
UPC = 'UPC'
def fetch_image(url, dest_path):
"""
Downloads image and saves it to ``dest_path``.
"""
fp = open(dest_path, 'wb')
fp.write(urllib2.urlopen(url).read())
fp.close()
if __name__ == '__main__':
parser = OptionParser(__doc__.strip())
parser.set_defaults(id_type=EAN, verbose=True)
parser.add_option('--ean', action='store_const', dest='id_type', const=EAN,
help='ID is an European Article Number (EAN) [default]')
parser.add_option('--asin', action='store_const', dest='id_type', const=ASIN,
help='ID is an Amazon Standard Identification Number (ASIN).')
parser.add_option('--upc', action='store_const', dest='id_type', const=UPC,
help='ID is an Universal Product Code (UPC).')
parser.add_option('--sku', action='store_const', dest='id_type', const=SKU,
help='ID is an Stock Keeping Unit (SKU).')
parser.add_option('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output.')
(options, ids) = parser.parse_args(sys.argv[1:])
if len(ids) == 0:
parser.error('No IDs specified!')
api = API(AWS_KEY, SECRET_KEY, 'de')
params = {
'ResponseGroup' : 'Images',
'SearchIndex' : 'All',
'IdType' : options.id_type,
}
# When IdType equals ASIN, SearchIndex cannot be present.
if options.id_type == ASIN:
del params['SearchIndex']
for id in ids:
id = id.replace('-', '')
if options.verbose: print 'Fetching info for %s...' % id
root = api.item_lookup(id, **params)
#~ from lxml import etree
#~ print etree.tostring(root, pretty_print=True)
url = root.Items.Item.LargeImage.URL.pyval
name, ext = os.path.splitext(url)
path = '%s%s' % (id, ext)
if options.verbose: print 'Downloading %s to %s ...' % (url, path)
fetch_image(url, path) | 0.273089 | 0.070017 |
import itertools
import pytest
import uuid
from web3 import Web3
from web3._utils.caching import (
generate_cache_key,
)
from web3.middleware import (
construct_error_generator_middleware,
construct_result_generator_middleware,
construct_simple_cache_middleware,
)
from web3.providers.base import (
BaseProvider,
)
@pytest.fixture
def w3_base():
return Web3(provider=BaseProvider(), middlewares=[])
@pytest.fixture
def result_generator_middleware():
return construct_result_generator_middleware({
'fake_endpoint': lambda *_: str(uuid.uuid4()),
'not_whitelisted': lambda *_: str(uuid.uuid4()),
})
@pytest.fixture
def w3(w3_base, result_generator_middleware):
w3_base.middleware_onion.add(result_generator_middleware)
return w3_base
def test_simple_cache_middleware_pulls_from_cache(w3):
def cache_class():
return {
generate_cache_key(('fake_endpoint', [1])): {'result': 'value-a'},
}
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=cache_class,
rpc_whitelist={'fake_endpoint'},
))
assert w3.manager.request_blocking('fake_endpoint', [1]) == 'value-a'
def test_simple_cache_middleware_populates_cache(w3):
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
result = w3.manager.request_blocking('fake_endpoint', [])
assert w3.manager.request_blocking('fake_endpoint', []) == result
assert w3.manager.request_blocking('fake_endpoint', [1]) != result
def test_simple_cache_middleware_does_not_cache_none_responses(w3_base):
counter = itertools.count()
w3 = w3_base
def result_cb(method, params):
next(counter)
return None
w3.middleware_onion.add(construct_result_generator_middleware({
'fake_endpoint': result_cb,
}))
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
w3.manager.request_blocking('fake_endpoint', [])
w3.manager.request_blocking('fake_endpoint', [])
assert next(counter) == 2
def test_simple_cache_middleware_does_not_cache_error_responses(w3_base):
w3 = w3_base
w3.middleware_onion.add(construct_error_generator_middleware({
'fake_endpoint': lambda *_: 'msg-{0}'.format(str(uuid.uuid4())),
}))
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
with pytest.raises(ValueError) as err_a:
w3.manager.request_blocking('fake_endpoint', [])
with pytest.raises(ValueError) as err_b:
w3.manager.request_blocking('fake_endpoint', [])
assert str(err_a) != str(err_b)
def test_simple_cache_middleware_does_not_cache_endpoints_not_in_whitelist(w3):
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
result_a = w3.manager.request_blocking('not_whitelisted', [])
result_b = w3.manager.request_blocking('not_whitelisted', [])
assert result_a != result_b | tests/core/middleware/test_simple_cache_middleware.py | import itertools
import pytest
import uuid
from web3 import Web3
from web3._utils.caching import (
generate_cache_key,
)
from web3.middleware import (
construct_error_generator_middleware,
construct_result_generator_middleware,
construct_simple_cache_middleware,
)
from web3.providers.base import (
BaseProvider,
)
@pytest.fixture
def w3_base():
return Web3(provider=BaseProvider(), middlewares=[])
@pytest.fixture
def result_generator_middleware():
return construct_result_generator_middleware({
'fake_endpoint': lambda *_: str(uuid.uuid4()),
'not_whitelisted': lambda *_: str(uuid.uuid4()),
})
@pytest.fixture
def w3(w3_base, result_generator_middleware):
w3_base.middleware_onion.add(result_generator_middleware)
return w3_base
def test_simple_cache_middleware_pulls_from_cache(w3):
def cache_class():
return {
generate_cache_key(('fake_endpoint', [1])): {'result': 'value-a'},
}
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=cache_class,
rpc_whitelist={'fake_endpoint'},
))
assert w3.manager.request_blocking('fake_endpoint', [1]) == 'value-a'
def test_simple_cache_middleware_populates_cache(w3):
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
result = w3.manager.request_blocking('fake_endpoint', [])
assert w3.manager.request_blocking('fake_endpoint', []) == result
assert w3.manager.request_blocking('fake_endpoint', [1]) != result
def test_simple_cache_middleware_does_not_cache_none_responses(w3_base):
counter = itertools.count()
w3 = w3_base
def result_cb(method, params):
next(counter)
return None
w3.middleware_onion.add(construct_result_generator_middleware({
'fake_endpoint': result_cb,
}))
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
w3.manager.request_blocking('fake_endpoint', [])
w3.manager.request_blocking('fake_endpoint', [])
assert next(counter) == 2
def test_simple_cache_middleware_does_not_cache_error_responses(w3_base):
w3 = w3_base
w3.middleware_onion.add(construct_error_generator_middleware({
'fake_endpoint': lambda *_: 'msg-{0}'.format(str(uuid.uuid4())),
}))
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
with pytest.raises(ValueError) as err_a:
w3.manager.request_blocking('fake_endpoint', [])
with pytest.raises(ValueError) as err_b:
w3.manager.request_blocking('fake_endpoint', [])
assert str(err_a) != str(err_b)
def test_simple_cache_middleware_does_not_cache_endpoints_not_in_whitelist(w3):
w3.middleware_onion.add(construct_simple_cache_middleware(
cache_class=dict,
rpc_whitelist={'fake_endpoint'},
))
result_a = w3.manager.request_blocking('not_whitelisted', [])
result_b = w3.manager.request_blocking('not_whitelisted', [])
assert result_a != result_b | 0.47926 | 0.140572 |
import requests
import os
from pandas.io.json import json_normalize
import datetime
import base64
## import API use key, API user and base URL from Jenkins variables
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key is missing")
try:
api_user = os.environ['AUTH_API_USER']
except KeyError:
print("API-user is missing")
try:
base_url = os.environ['BASE_URL']
except KeyError:
print("Base URL is missing")
kysymykset=[]
url = "https://"+base_url+"/api/export/v1/kysymykset"
loadtime=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Accept'] = 'application/json'
### encode API user and API key tothe request headers
tmp = "%s:%s" % (api_user, api_key)
reqheaders['Authorization'] = "Basic %s" % base64.b64encode(tmp.encode('utf-8')).decode('utf-8')
def keycheck(x,y):
if x in y:
return y[x]
else:
return None
def makerow_kysymykset():
return {
"kysymys_en": None,
"kysymysryhma_en": None,
"kysymysryhma_sv": None,
"jatkokysymys": None,
"jarjestys": None,
"kysymysryhma_fi": None,
"kategoria": None,
"kategoria.rahoitusmallikysymys": None,
"kategoria.taustakysymyksen_tyyppi": None,
"jatkokysymys_kysymysid": None,
"kysymysryhmaid": None,
"kysymysid": None,
"vastaustyyppi": None,
"kysymys_sv": None,
"valtakunnallinen": None,
"kysymys_fi": None,
"loadtime": None,
"source": None
}
## Not checking the status just downloading
##
##
response = requests.get(url, headers=reqheaders).json()
for kysymys in response ['data']:
if kysymys["kategoria"] != None:
row = makerow_kysymykset()
row["kysymys_en"] = keycheck("kysymys_en",kysymys)
row["kysymysryhma_en"] = keycheck("kysymysryhma_en",kysymys)
row["kysymysryhma_sv"] = keycheck("kysymysryhma_sv",kysymys)
if "jarjestys" in kysymys:
if kysymys["jarjestys"] == None:
row["jarjestys"] = -1
else:
row["jarjestys"] = kysymys["jarjestys"]
else:
row["jarjestys"] = -1
row["jatkokysymys"] = keycheck("jatkokysymys",kysymys)
row["kysymysryhma_fi"] = keycheck("kysymysryhma_fi",kysymys)
row["kategoria"] = keycheck("kategoria",kysymys)
row["kategoria.rahoitusmallikysymys"] = keycheck("rahoitusmallikysymys",kysymys["kategoria"])
row["kategoria.taustakysymyksen_tyyppi"] = keycheck("taustakysymyksen_tyyppi",kysymys["kategoria"])
row["jatkokysymys_kysymysid"] = keycheck("jatkokysymys_kysymysid",kysymys)
row["kysymysryhmaid"] = keycheck("kysymysryhmaid",kysymys)
row["kysymysid"] = keycheck("kysymysid",kysymys)
row["vastaustyyppi"] = keycheck("vastaustyyppi",kysymys)
row["kysymys_sv"] = keycheck("kysymys_sv",kysymys)
row["valtakunnallinen"] = keycheck("valtakunnallinen",kysymys)
row["kysymys_fi"] = keycheck("kysymys_fi",kysymys)
row["source"] = url
row["loadtime"] = str(loadtime)
kysymykset.append(row)
else:
row = makerow_kysymykset()
row["kysymys_en"] = keycheck("kysymys_en",kysymys)
row["kysymysryhma_en"] = keycheck("kysymysryhma_en",kysymys)
row["kysymysryhma_sv"] = keycheck("kysymysryhma_sv",kysymys)
if "jarjestys" in kysymys:
if kysymys["jarjestys"] == None:
row["jarjestys"] = -1
else:
row["jarjestys"] = kysymys["jarjestys"]
else:
row["jarjestys"] = -1
row["jatkokysymys"] = keycheck("jatkokysymys",kysymys)
row["kysymysryhma_fi"] = keycheck("kysymysryhma_fi",kysymys)
row["jatkokysymys_kysymysid"] = keycheck("jatkokysymys_kysymysid",kysymys)
row["kysymysryhmaid"] = keycheck("kysymysryhmaid",kysymys)
row["kysymysid"] = keycheck("kysymysid",kysymys)
row["vastaustyyppi"] = keycheck("vastaustyyppi",kysymys)
row["kysymys_sv"] = keycheck("kysymys_sv",kysymys)
row["valtakunnallinen"] = keycheck("valtakunnallinen",kysymys)
row["kysymys_fi"] = keycheck("kysymys_fi",kysymys)
row["source"] = url
row["loadtime"] = str(loadtime)
kysymykset.append(row)
data = json_normalize(kysymykset)
# DATA to csv for import to MSSQL - can be used also for BULK inserting
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/kysymykset.csv', sep=';', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$') | pdi_integrations/arvo/python_scripts/get_arvo_kysymykset.py | import requests
import os
from pandas.io.json import json_normalize
import datetime
import base64
## import API use key, API user and base URL from Jenkins variables
try:
api_key = os.environ['AUTH_API_KEY']
except KeyError:
print("API-key is missing")
try:
api_user = os.environ['AUTH_API_USER']
except KeyError:
print("API-user is missing")
try:
base_url = os.environ['BASE_URL']
except KeyError:
print("Base URL is missing")
kysymykset=[]
url = "https://"+base_url+"/api/export/v1/kysymykset"
loadtime=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Accept'] = 'application/json'
### encode API user and API key tothe request headers
tmp = "%s:%s" % (api_user, api_key)
reqheaders['Authorization'] = "Basic %s" % base64.b64encode(tmp.encode('utf-8')).decode('utf-8')
def keycheck(x,y):
if x in y:
return y[x]
else:
return None
def makerow_kysymykset():
return {
"kysymys_en": None,
"kysymysryhma_en": None,
"kysymysryhma_sv": None,
"jatkokysymys": None,
"jarjestys": None,
"kysymysryhma_fi": None,
"kategoria": None,
"kategoria.rahoitusmallikysymys": None,
"kategoria.taustakysymyksen_tyyppi": None,
"jatkokysymys_kysymysid": None,
"kysymysryhmaid": None,
"kysymysid": None,
"vastaustyyppi": None,
"kysymys_sv": None,
"valtakunnallinen": None,
"kysymys_fi": None,
"loadtime": None,
"source": None
}
## Not checking the status just downloading
##
##
response = requests.get(url, headers=reqheaders).json()
for kysymys in response ['data']:
if kysymys["kategoria"] != None:
row = makerow_kysymykset()
row["kysymys_en"] = keycheck("kysymys_en",kysymys)
row["kysymysryhma_en"] = keycheck("kysymysryhma_en",kysymys)
row["kysymysryhma_sv"] = keycheck("kysymysryhma_sv",kysymys)
if "jarjestys" in kysymys:
if kysymys["jarjestys"] == None:
row["jarjestys"] = -1
else:
row["jarjestys"] = kysymys["jarjestys"]
else:
row["jarjestys"] = -1
row["jatkokysymys"] = keycheck("jatkokysymys",kysymys)
row["kysymysryhma_fi"] = keycheck("kysymysryhma_fi",kysymys)
row["kategoria"] = keycheck("kategoria",kysymys)
row["kategoria.rahoitusmallikysymys"] = keycheck("rahoitusmallikysymys",kysymys["kategoria"])
row["kategoria.taustakysymyksen_tyyppi"] = keycheck("taustakysymyksen_tyyppi",kysymys["kategoria"])
row["jatkokysymys_kysymysid"] = keycheck("jatkokysymys_kysymysid",kysymys)
row["kysymysryhmaid"] = keycheck("kysymysryhmaid",kysymys)
row["kysymysid"] = keycheck("kysymysid",kysymys)
row["vastaustyyppi"] = keycheck("vastaustyyppi",kysymys)
row["kysymys_sv"] = keycheck("kysymys_sv",kysymys)
row["valtakunnallinen"] = keycheck("valtakunnallinen",kysymys)
row["kysymys_fi"] = keycheck("kysymys_fi",kysymys)
row["source"] = url
row["loadtime"] = str(loadtime)
kysymykset.append(row)
else:
row = makerow_kysymykset()
row["kysymys_en"] = keycheck("kysymys_en",kysymys)
row["kysymysryhma_en"] = keycheck("kysymysryhma_en",kysymys)
row["kysymysryhma_sv"] = keycheck("kysymysryhma_sv",kysymys)
if "jarjestys" in kysymys:
if kysymys["jarjestys"] == None:
row["jarjestys"] = -1
else:
row["jarjestys"] = kysymys["jarjestys"]
else:
row["jarjestys"] = -1
row["jatkokysymys"] = keycheck("jatkokysymys",kysymys)
row["kysymysryhma_fi"] = keycheck("kysymysryhma_fi",kysymys)
row["jatkokysymys_kysymysid"] = keycheck("jatkokysymys_kysymysid",kysymys)
row["kysymysryhmaid"] = keycheck("kysymysryhmaid",kysymys)
row["kysymysid"] = keycheck("kysymysid",kysymys)
row["vastaustyyppi"] = keycheck("vastaustyyppi",kysymys)
row["kysymys_sv"] = keycheck("kysymys_sv",kysymys)
row["valtakunnallinen"] = keycheck("valtakunnallinen",kysymys)
row["kysymys_fi"] = keycheck("kysymys_fi",kysymys)
row["source"] = url
row["loadtime"] = str(loadtime)
kysymykset.append(row)
data = json_normalize(kysymykset)
# DATA to csv for import to MSSQL - can be used also for BULK inserting
data.to_csv(path_or_buf='D:/pdi_integrations/data/arvo/kysymykset.csv', sep=';', na_rep='',
header=True, index=False, mode='w', encoding='utf-8-sig', quoting=2,
quotechar='"', line_terminator='\n' , escapechar='$') | 0.082993 | 0.076236 |
import paho.mqtt.client as mqttClient
import mariadb
import decoder
import json
import os
import datetime
from threading import Event
# The Things Network MQQT broker credentials
broker_endpoint = os.getenv("BROKER_ADDRESS")
port = os.getenv("BROKER_PORT")
user = os.getenv("BROKER_USER")
password = os.getenv("BROKER_PASSWORD")
# Database environment variables
db_user = os.getenv("DB_USER")
db_password = <PASSWORD>("DB_PASSWORD")
db_endpoint = os.getenv("DB_ENDPOINT")
db_port = os.getenv("DB_PORT")
db_db = os.getenv("DB_DB")
# Database tables
db_json_table = os.getenv("DB_JSON_TABLE", "raw_json")
db_metadata_table = os.getenv("DB_METADATA_TABLE", "metadata")
db_positional_table = os.getenv("DB_POSITIONAL_TABLE", "positional")
db_sensor_data_table = os.getenv("DB_SENSOR_DATA_TABLE", "sensor_data")
db_transmissional_data_table = os.getenv("DB_TRANSMISSIONAL_DATA_TABLE", "transmissional_data")
# Check if we have all needed environment keys
if not any([broker_endpoint, port, user, password, db_user, db_password, db_endpoint, db_port, db_db, db_json_table]):
print("Missing environment variables, check your docker compose file.")
os._exit(1)
try:
# Try connecting to the database
conn = mariadb.connect(
user=db_user,
password=<PASSWORD>,
host=db_endpoint,
port=int(db_port),
database=db_db
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
os._exit(1)
def on_connect(client, userdata, flags, rc):
if rc == 0:
# Connection succesfull
print("Connected to broker!")
# Subscribe to all events
print("Subscribing to main topic...")
client.subscribe("#")
print("Subcribed!")
else:
print(f"Connection failed (rc: {rc})")
def ingest(payload_json):
# Convert json object to string to dump into our database
payload_json_str = str(json.dumps(payload_json, indent=4, sort_keys=False))
try:
# Get cursor and write to table
cursor = conn.cursor()
# Insert raw JSON into raw_json table
cursor.execute(
f"INSERT INTO {db_json_table} (id, json) VALUES (?, ?)", (0, payload_json_str)
)
# Commit to database
conn.commit()
except mariadb.Error as e:
print(f"Error inserting JSON message: {e}")
conn.rollback()
return
# Get next ID from our metadata table
try:
cursor = conn.cursor()
cursor.execute(
f"SELECT id FROM {db_metadata_table} ORDER BY id DESC LIMIT 1"
)
last_id = cursor.fetchall() # This will look like [(n,)]
if not last_id: # rows = [] There are no new rows
next_id = 0
else:
next_id = int(last_id[0][0]) + 1
except mariadb.Error as e:
print(f"Error calculating ID: {e}")
# We have no idea what the next id should be, just error out and log
return
# timestamp timestamp [0000-00-00 00:00:00]
timestamp = payload_json["received_at"].split(".")[0].replace("T", " ")
try:
uplink_msg = payload_json["uplink_message"]
except KeyError:
print("Received a message without uplink message... ignoring it.")
return
payload = payload_json["uplink_message"]["frm_payload"]
# device_id tinytext
device_id = payload_json["end_device_ids"]["device_id"]
# TODO: Decode weather data based on device or payload type?
decoded_payload = decoder.decode(device_id, payload)
# The payload doesn't match the device or the device is unknown
if not any(decoded_payload):
print(f"Decoder for device {device_id} returned blank, not storing to database!")
return
try:
# Get cursor and write to table
cursor = conn.cursor()
# This is our decoded data from the payload
decoded = decoded_payload[1]
if decoded_payload[0] == "lht":
cursor.execute(
f"INSERT INTO {db_sensor_data_table} (id, light_log_scale, light_lux, temperature, humidity, pressure, battery_status, battery_voltage, work_mode) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(next_id, None, decoded["light"], decoded["temp"], decoded["humidity"], None, decoded["battery_status"], decoded["battery_voltage"], decoded["mode"])
)
elif decoded_payload[0] in [ "py", "lopy" ]:
cursor.execute(
f"INSERT INTO {db_sensor_data_table} (id, light_log_scale, light_lux, temperature, humidity, pressure, battery_status, battery_voltage, work_mode) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(next_id, decoded["light"], None, decoded["temp"], None, decoded["pressure"], None, None, None)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting sensor_data: {e}")
return
# We push these values to the metadata table:
# application_id tinytext
application_id = payload_json["end_device_ids"]["application_ids"]["application_id"]
# gateway_id tinytext
gateway_id = payload_json["uplink_message"]["rx_metadata"][0]["gateway_ids"]["gateway_id"]
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_metadata_table} (id, timestamp, device, application, gateway) VALUES (?, ?, ?, ?, ?)",
(next_id, timestamp, device_id, application_id, gateway_id)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting metadata: {e}")
return
# latitude float
# print(f"The payload_json = {payload_json}")
latitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["latitude"]
# longitude float
longitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["longitude"]
try:
# altitude float
altitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["altitude"]
except KeyError:
# This sensor doesn't have altitude for some reason, just set it to to None
altitude = None
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_positional_table} (id, latitude, longitude, altitude) VALUES (?, ?, ?, ?)",
(next_id, latitude, longitude, altitude)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting positional data: {e}")
return
# rssi
rssi = payload_json["uplink_message"]["rx_metadata"][0]["rssi"]
try:
# snr
snr = float(payload_json["uplink_message"]["rx_metadata"][0]["snr"])
except KeyError:
# This message doesn't have SNR for some reason, ignore it
snr = None # If you put None here SQL will get upset and throw an error
# spreading_factor
spreading_factor = payload_json["uplink_message"]["settings"]["data_rate"]["lora"]["spreading_factor"]
# consumed_airtime
consumed_airtime = payload_json["uplink_message"]["consumed_airtime"].replace("s", "")
# bandwidth
bandwidth = payload_json["uplink_message"]["settings"]["data_rate"]["lora"]["bandwidth"]
# frequency
frequency = payload_json["uplink_message"]["settings"]["frequency"]
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_transmissional_data_table} (id, rssi, snr, spreading_factor, consumed_airtime, bandwidth, frequency) VALUES (?, ?, ?, ?, ?, ?, ?)",
(next_id, int(rssi), snr, int(spreading_factor), float(consumed_airtime), int(bandwidth), int(frequency))
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting transmissional data: {e}")
return
# If something failed, this won't be reached, so we're sure everything's safe now
conn.commit()
print("{} Added new data to database!".format(datetime.datetime.now().strftime("%H:%M:%S %d-%b-%Y")))
def on_message(client, userdata, message):
try:
payload_json = json.loads(message.payload)
except ValueError as e:
print(f"Error parsing message from {client}")
return
ingest(payload_json)
client = mqttClient.Client() # create new instance
# Use HTTPS with 8883
client.tls_set()
# Authenticate to TTN and setup callback functions
client.username_pw_set(user, password=password) # set username and password
client.on_connect = on_connect # attach function to callback
client.on_message = on_message # attach function to callback
# Connect and start event loop
client.connect(broker_endpoint, int(port), 60) # connect to broker
client.loop_start() # start the loop
while True:
Event().wait() | mqtt.py | import paho.mqtt.client as mqttClient
import mariadb
import decoder
import json
import os
import datetime
from threading import Event
# The Things Network MQQT broker credentials
broker_endpoint = os.getenv("BROKER_ADDRESS")
port = os.getenv("BROKER_PORT")
user = os.getenv("BROKER_USER")
password = os.getenv("BROKER_PASSWORD")
# Database environment variables
db_user = os.getenv("DB_USER")
db_password = <PASSWORD>("DB_PASSWORD")
db_endpoint = os.getenv("DB_ENDPOINT")
db_port = os.getenv("DB_PORT")
db_db = os.getenv("DB_DB")
# Database tables
db_json_table = os.getenv("DB_JSON_TABLE", "raw_json")
db_metadata_table = os.getenv("DB_METADATA_TABLE", "metadata")
db_positional_table = os.getenv("DB_POSITIONAL_TABLE", "positional")
db_sensor_data_table = os.getenv("DB_SENSOR_DATA_TABLE", "sensor_data")
db_transmissional_data_table = os.getenv("DB_TRANSMISSIONAL_DATA_TABLE", "transmissional_data")
# Check if we have all needed environment keys
if not any([broker_endpoint, port, user, password, db_user, db_password, db_endpoint, db_port, db_db, db_json_table]):
print("Missing environment variables, check your docker compose file.")
os._exit(1)
try:
# Try connecting to the database
conn = mariadb.connect(
user=db_user,
password=<PASSWORD>,
host=db_endpoint,
port=int(db_port),
database=db_db
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
os._exit(1)
def on_connect(client, userdata, flags, rc):
if rc == 0:
# Connection succesfull
print("Connected to broker!")
# Subscribe to all events
print("Subscribing to main topic...")
client.subscribe("#")
print("Subcribed!")
else:
print(f"Connection failed (rc: {rc})")
def ingest(payload_json):
# Convert json object to string to dump into our database
payload_json_str = str(json.dumps(payload_json, indent=4, sort_keys=False))
try:
# Get cursor and write to table
cursor = conn.cursor()
# Insert raw JSON into raw_json table
cursor.execute(
f"INSERT INTO {db_json_table} (id, json) VALUES (?, ?)", (0, payload_json_str)
)
# Commit to database
conn.commit()
except mariadb.Error as e:
print(f"Error inserting JSON message: {e}")
conn.rollback()
return
# Get next ID from our metadata table
try:
cursor = conn.cursor()
cursor.execute(
f"SELECT id FROM {db_metadata_table} ORDER BY id DESC LIMIT 1"
)
last_id = cursor.fetchall() # This will look like [(n,)]
if not last_id: # rows = [] There are no new rows
next_id = 0
else:
next_id = int(last_id[0][0]) + 1
except mariadb.Error as e:
print(f"Error calculating ID: {e}")
# We have no idea what the next id should be, just error out and log
return
# timestamp timestamp [0000-00-00 00:00:00]
timestamp = payload_json["received_at"].split(".")[0].replace("T", " ")
try:
uplink_msg = payload_json["uplink_message"]
except KeyError:
print("Received a message without uplink message... ignoring it.")
return
payload = payload_json["uplink_message"]["frm_payload"]
# device_id tinytext
device_id = payload_json["end_device_ids"]["device_id"]
# TODO: Decode weather data based on device or payload type?
decoded_payload = decoder.decode(device_id, payload)
# The payload doesn't match the device or the device is unknown
if not any(decoded_payload):
print(f"Decoder for device {device_id} returned blank, not storing to database!")
return
try:
# Get cursor and write to table
cursor = conn.cursor()
# This is our decoded data from the payload
decoded = decoded_payload[1]
if decoded_payload[0] == "lht":
cursor.execute(
f"INSERT INTO {db_sensor_data_table} (id, light_log_scale, light_lux, temperature, humidity, pressure, battery_status, battery_voltage, work_mode) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(next_id, None, decoded["light"], decoded["temp"], decoded["humidity"], None, decoded["battery_status"], decoded["battery_voltage"], decoded["mode"])
)
elif decoded_payload[0] in [ "py", "lopy" ]:
cursor.execute(
f"INSERT INTO {db_sensor_data_table} (id, light_log_scale, light_lux, temperature, humidity, pressure, battery_status, battery_voltage, work_mode) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(next_id, decoded["light"], None, decoded["temp"], None, decoded["pressure"], None, None, None)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting sensor_data: {e}")
return
# We push these values to the metadata table:
# application_id tinytext
application_id = payload_json["end_device_ids"]["application_ids"]["application_id"]
# gateway_id tinytext
gateway_id = payload_json["uplink_message"]["rx_metadata"][0]["gateway_ids"]["gateway_id"]
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_metadata_table} (id, timestamp, device, application, gateway) VALUES (?, ?, ?, ?, ?)",
(next_id, timestamp, device_id, application_id, gateway_id)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting metadata: {e}")
return
# latitude float
# print(f"The payload_json = {payload_json}")
latitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["latitude"]
# longitude float
longitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["longitude"]
try:
# altitude float
altitude = payload_json["uplink_message"]["rx_metadata"][0]["location"]["altitude"]
except KeyError:
# This sensor doesn't have altitude for some reason, just set it to to None
altitude = None
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_positional_table} (id, latitude, longitude, altitude) VALUES (?, ?, ?, ?)",
(next_id, latitude, longitude, altitude)
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting positional data: {e}")
return
# rssi
rssi = payload_json["uplink_message"]["rx_metadata"][0]["rssi"]
try:
# snr
snr = float(payload_json["uplink_message"]["rx_metadata"][0]["snr"])
except KeyError:
# This message doesn't have SNR for some reason, ignore it
snr = None # If you put None here SQL will get upset and throw an error
# spreading_factor
spreading_factor = payload_json["uplink_message"]["settings"]["data_rate"]["lora"]["spreading_factor"]
# consumed_airtime
consumed_airtime = payload_json["uplink_message"]["consumed_airtime"].replace("s", "")
# bandwidth
bandwidth = payload_json["uplink_message"]["settings"]["data_rate"]["lora"]["bandwidth"]
# frequency
frequency = payload_json["uplink_message"]["settings"]["frequency"]
try:
# Get cursor and write to table
cursor = conn.cursor()
cursor.execute(
f"INSERT INTO {db_transmissional_data_table} (id, rssi, snr, spreading_factor, consumed_airtime, bandwidth, frequency) VALUES (?, ?, ?, ?, ?, ?, ?)",
(next_id, int(rssi), snr, int(spreading_factor), float(consumed_airtime), int(bandwidth), int(frequency))
)
except mariadb.Error as e:
conn.rollback()
print(f"MariaDB error inserting transmissional data: {e}")
return
# If something failed, this won't be reached, so we're sure everything's safe now
conn.commit()
print("{} Added new data to database!".format(datetime.datetime.now().strftime("%H:%M:%S %d-%b-%Y")))
def on_message(client, userdata, message):
try:
payload_json = json.loads(message.payload)
except ValueError as e:
print(f"Error parsing message from {client}")
return
ingest(payload_json)
client = mqttClient.Client() # create new instance
# Use HTTPS with 8883
client.tls_set()
# Authenticate to TTN and setup callback functions
client.username_pw_set(user, password=password) # set username and password
client.on_connect = on_connect # attach function to callback
client.on_message = on_message # attach function to callback
# Connect and start event loop
client.connect(broker_endpoint, int(port), 60) # connect to broker
client.loop_start() # start the loop
while True:
Event().wait() | 0.21626 | 0.048226 |
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import logging
from sqlalchemy import MetaData
from ...reg import reg_object1
from ...reg.result import *
from .stuff.backwardcompat import *
from .stuff import models
from .stuff.sheet_funcs import get_value, get_index
def suit_name(colname, column_names):
i = 1
while "{0}.{1}".format(colname, i) in column_names:
i += 1
return "{0}.{1}".format(colname, i)
def insert_dict(names, bind_dict):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table_name, keys, values)
def reg_sheet(sh, runtime, i, FILE=None):
sheet_dict = {
'_file': FILE,
# '_fileprocessing': FILE,
'name': sh.name,
'seq': i,
'ncols': sh.ncols,
'nrows': sh.nrows,
'visible': sh.visibility,
}
session = runtime.get('session')
if session:
SHEET = reg_object1(session, models.Sheet, sheet_dict, FILE)
else:
SHEET = set_object(sheet_dict, FILE)
return SHEET, session
def proceed_sheet(sh, runtime, i, FILE=None):
SHEET, session = reg_sheet(sh, runtime, i, FILE)
options = runtime.get('options', {})
flush = options.get('flush')
tablename = options.get('tablename', '_'.join(sh.name.strip().split()))
prefix = options.get('prefix', 'xls')
isolated = options.get('isolated')
names_row = options.get('names_row')
names = options.get('names', [])
ncols = options.get('ncols', sh.ncols)
nrows = options.get('nrows', sh.nrows)
start_from = options.get('start_from', 0 if names_row is None else names_row + 1)
decimal_point = options.get('decimal_point', '.')
if flush:
session.flush()
if isolated:
tablename = '{0}_{1}_{2}'.format(FILE.id, tablename, i)
if prefix:
tablename = '{0}_{1}'.format(prefix, tablename)
sh_column_names = []
w = len(str(ncols))
# Выбираем названия колонок, если указана соответствующая строка
if names_row is not None and sh.nrows > names_row:
for i in range(min(ncols, sh.ncols)):
# colname = sh.cell(names_row, i).value
colname = get_value(sh, names_row, i)
if colname:
if isinstance(colname, float):
colname = 'cf_{0}'.format(colname)
if names and colname not in names:
reg_warning(SHEET, "Extra column name '{0}' found in the sheet!".format(colname))
else:
colname = 'col_{0:0{width}}'.format(i, width=w)
if colname in sh_column_names:
if colname in names:
reg_warning(SHEET, "Possibly another column '{0}' will get lost!".format(colname))
colname = suit_name(colname, sh_column_names)
sh_column_names.append(colname)
# Расширяем ряд sh_column_names до ncols если sh.ncols < ncols
sh_column_names = sh_column_names + ['col_{0:0{width}}'.format(j, width=w) for j in range(len(sh_column_names), ncols)]
# Определяемся с колонкой, по которой будут выбираться строки для записи
col_index = None
check_name = options.get('check_name')
check_column = options.get('check_column')
if check_name:
if check_name in sh_column_names:
col_index = sh_column_names.index(check_name)
else:
msg = "Column '{0}' is not in the list of the column names".format(check_name)
logging.warning(msg)
reg_warning(SHEET, msg)
if check_column:
if col_index is None:
col_index = get_index(check_column)
else:
msg = "Parameters 'check_name' and 'check_column' set simultaneously!"
logging.warning(msg)
reg_warning(SHEET, msg)
# Выбираем из таблицы название колонок, если требуется сопоставление
if names:
metadata = MetaData(session.bind, reflect=True)
if tablename in metadata.tables.keys():
mtable = metadata.tables.get(tablename)
column_names0 = [j.name for j in mtable.c][4:]
else:
column_names0 = names
else:
column_names0 = sh_column_names
# Добавляем системные колонки
column_names = ['sh_dir', 'sh_file', 'sh_sheet', 'sh_sheets_id', 'sh_y'] + column_names0
tablecols = len(column_names)
sql = 'CREATE TABLE IF NOT EXISTS "{0}" ("{1}");'.format(tablename, '","'.join(column_names))
session.execute(sql)
reg_debug(SHEET, column_names)
# В общем, случается такой баг, что если обрабатываются два файла с одинаковым именем листа,
# и во втором листе будет больше колонок, то получим ошибку
if sh.nrows > start_from:
qmarks = ['?' for j in range(tablecols)]
sql = 'INSERT INTO "{0}" VALUES ({1});'.format(tablename, ','.join(qmarks))
for i in range(start_from, sh.nrows):
needful = get_value(sh, i, col_index) if col_index is not None else True
if needful:
sh_values = sh.row_values(i, 0, ncols)
if decimal_point != '.':
sh_values = map(lambda x: str(x).replace('.', decimal_point) if isinstance(x, float) else x, sh_values)
if names:
bind_params = []
for j in column_names0:
if j in sh_column_names:
bind_params.append(sh_values[sh_column_names.index(j)])
else:
bind_params.append(None)
else:
bind_params = sh_values + [None for j in range(len(sh_values), ncols)]
bind_params = [FILE._dir.name, FILE.name, SHEET.name, SHEET.id, i] + bind_params
try:
session.bind.execute(sql, bind_params)
reg_ok(SHEET)
except Exception as e:
reg_exception(SHEET, e)
# status.error = "Error during handle sheet '{0}'!".format(SHEET.name) | index/handlers/books0p3/sheet.py |
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import logging
from sqlalchemy import MetaData
from ...reg import reg_object1
from ...reg.result import *
from .stuff.backwardcompat import *
from .stuff import models
from .stuff.sheet_funcs import get_value, get_index
def suit_name(colname, column_names):
i = 1
while "{0}.{1}".format(colname, i) in column_names:
i += 1
return "{0}.{1}".format(colname, i)
def insert_dict(names, bind_dict):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table_name, keys, values)
def reg_sheet(sh, runtime, i, FILE=None):
sheet_dict = {
'_file': FILE,
# '_fileprocessing': FILE,
'name': sh.name,
'seq': i,
'ncols': sh.ncols,
'nrows': sh.nrows,
'visible': sh.visibility,
}
session = runtime.get('session')
if session:
SHEET = reg_object1(session, models.Sheet, sheet_dict, FILE)
else:
SHEET = set_object(sheet_dict, FILE)
return SHEET, session
def proceed_sheet(sh, runtime, i, FILE=None):
SHEET, session = reg_sheet(sh, runtime, i, FILE)
options = runtime.get('options', {})
flush = options.get('flush')
tablename = options.get('tablename', '_'.join(sh.name.strip().split()))
prefix = options.get('prefix', 'xls')
isolated = options.get('isolated')
names_row = options.get('names_row')
names = options.get('names', [])
ncols = options.get('ncols', sh.ncols)
nrows = options.get('nrows', sh.nrows)
start_from = options.get('start_from', 0 if names_row is None else names_row + 1)
decimal_point = options.get('decimal_point', '.')
if flush:
session.flush()
if isolated:
tablename = '{0}_{1}_{2}'.format(FILE.id, tablename, i)
if prefix:
tablename = '{0}_{1}'.format(prefix, tablename)
sh_column_names = []
w = len(str(ncols))
# Выбираем названия колонок, если указана соответствующая строка
if names_row is not None and sh.nrows > names_row:
for i in range(min(ncols, sh.ncols)):
# colname = sh.cell(names_row, i).value
colname = get_value(sh, names_row, i)
if colname:
if isinstance(colname, float):
colname = 'cf_{0}'.format(colname)
if names and colname not in names:
reg_warning(SHEET, "Extra column name '{0}' found in the sheet!".format(colname))
else:
colname = 'col_{0:0{width}}'.format(i, width=w)
if colname in sh_column_names:
if colname in names:
reg_warning(SHEET, "Possibly another column '{0}' will get lost!".format(colname))
colname = suit_name(colname, sh_column_names)
sh_column_names.append(colname)
# Расширяем ряд sh_column_names до ncols если sh.ncols < ncols
sh_column_names = sh_column_names + ['col_{0:0{width}}'.format(j, width=w) for j in range(len(sh_column_names), ncols)]
# Определяемся с колонкой, по которой будут выбираться строки для записи
col_index = None
check_name = options.get('check_name')
check_column = options.get('check_column')
if check_name:
if check_name in sh_column_names:
col_index = sh_column_names.index(check_name)
else:
msg = "Column '{0}' is not in the list of the column names".format(check_name)
logging.warning(msg)
reg_warning(SHEET, msg)
if check_column:
if col_index is None:
col_index = get_index(check_column)
else:
msg = "Parameters 'check_name' and 'check_column' set simultaneously!"
logging.warning(msg)
reg_warning(SHEET, msg)
# Выбираем из таблицы название колонок, если требуется сопоставление
if names:
metadata = MetaData(session.bind, reflect=True)
if tablename in metadata.tables.keys():
mtable = metadata.tables.get(tablename)
column_names0 = [j.name for j in mtable.c][4:]
else:
column_names0 = names
else:
column_names0 = sh_column_names
# Добавляем системные колонки
column_names = ['sh_dir', 'sh_file', 'sh_sheet', 'sh_sheets_id', 'sh_y'] + column_names0
tablecols = len(column_names)
sql = 'CREATE TABLE IF NOT EXISTS "{0}" ("{1}");'.format(tablename, '","'.join(column_names))
session.execute(sql)
reg_debug(SHEET, column_names)
# В общем, случается такой баг, что если обрабатываются два файла с одинаковым именем листа,
# и во втором листе будет больше колонок, то получим ошибку
if sh.nrows > start_from:
qmarks = ['?' for j in range(tablecols)]
sql = 'INSERT INTO "{0}" VALUES ({1});'.format(tablename, ','.join(qmarks))
for i in range(start_from, sh.nrows):
needful = get_value(sh, i, col_index) if col_index is not None else True
if needful:
sh_values = sh.row_values(i, 0, ncols)
if decimal_point != '.':
sh_values = map(lambda x: str(x).replace('.', decimal_point) if isinstance(x, float) else x, sh_values)
if names:
bind_params = []
for j in column_names0:
if j in sh_column_names:
bind_params.append(sh_values[sh_column_names.index(j)])
else:
bind_params.append(None)
else:
bind_params = sh_values + [None for j in range(len(sh_values), ncols)]
bind_params = [FILE._dir.name, FILE.name, SHEET.name, SHEET.id, i] + bind_params
try:
session.bind.execute(sql, bind_params)
reg_ok(SHEET)
except Exception as e:
reg_exception(SHEET, e)
# status.error = "Error during handle sheet '{0}'!".format(SHEET.name) | 0.374905 | 0.165762 |
from io import BytesIO
from buidl.helper import (
encode_varint,
hash256,
int_to_little_endian,
read_varint,
read_varstr,
)
from buidl.siphash import SipHash_2_4
BASIC_FILTER_TYPE = 0
GOLOMB_P = 19
GOLOMB_M = int(round(1.497137 * 2 ** GOLOMB_P))
def _siphash(key, value):
if len(key) != 16:
raise ValueError("Key should be 16 bytes")
sip = SipHash_2_4(key)
sip.update(value)
return sip.hash()
def hash_to_range(key, value, f):
"""Returns a number between 0 and f-1, uniformly distributed.
Uses siphash-2-4."""
return _siphash(key, value) * f >> 64
def hashed_items(key, items):
n = len(items)
f = n * GOLOMB_M
result = []
for item in items:
result.append(hash_to_range(key, item, f))
return sorted(result)
def encode_golomb(x, p):
"""converts a number x to a golomb-encoded array of 0's and 1's"""
# quotient when dividing x by 2^p
q = x >> p
# q 1's and a 0 at the end
result = [1] * q + [0]
# the last p bits of x
result += [x & (1 << (p - i - 1)) > 0 for i in range(p)]
return result
def decode_golomb(bits, p):
"""converts a golomb-encoded array of 0's and 1's to a number"""
q = 0
while bits[0] != 0:
q += 1
bits.pop(0)
bits.pop(0)
r = 0
for _ in range(p):
r <<= 1
if bits.pop(0) == 1:
r |= 1
return (q << p) + r
def pack_bits(bits):
"""converts bits to a byte-string"""
num_bytes = len(bits)
bits += [0] * (-num_bytes % 8)
result = 0
for bit in bits:
result <<= 1
if bit:
result |= 1
return result.to_bytes(len(bits) // 8, "big")
def unpack_bits(byte_string):
bits = []
for byte in byte_string:
for _ in range(8):
if byte & 0x80:
bits.append(1)
else:
bits.append(0)
byte <<= 1
return bits
def serialize_gcs(sorted_items):
last_value = 0
result = []
for item in sorted_items:
delta = item - last_value
result += encode_golomb(delta, GOLOMB_P)
last_value = item
return encode_varint(len(sorted_items)) + pack_bits(result)
def encode_gcs(key, items):
"""Returns the golomb-coded-set byte-string which is the sorted
hashes of the items"""
sorted_items = hashed_items(key, items)
return serialize_gcs(sorted_items)
def decode_gcs(key, gcs):
"""Returns the sorted hashes of the items from the golomb-coded-set"""
s = BytesIO(gcs)
num_items = read_varint(s)
bits = unpack_bits(s.read())
items = []
current = 0
for _ in range(num_items):
delta = decode_golomb(bits, GOLOMB_P)
current += delta
items.append(current)
return items
class CompactFilter:
def __init__(self, key, hashes):
self.key = key
self.hashes = set(hashes)
self.f = len(self.hashes) * GOLOMB_M
def __repr__(self):
result = f"{self.key.hex()}:\n\n"
for h in sorted(list(self.hashes)):
result += f"{h.hex()}\n"
return result
def __eq__(self, other):
return self.key == other.key and sorted(list(self.hashes)) == sorted(
list(other.hashes)
)
@classmethod
def parse(cls, key, filter_bytes):
return cls(key, set(decode_gcs(key, filter_bytes)))
def hash(self):
return hash256(self.serialize())
def serialize(self):
return serialize_gcs(sorted(list(self.hashes)))
def compute_hash(self, raw_script_pubkey):
return hash_to_range(self.key, raw_script_pubkey, self.f)
def __contains__(self, script_pubkey):
raw_script_pubkey = script_pubkey.raw_serialize()
return self.compute_hash(raw_script_pubkey) in self.hashes
class GetCFiltersMessage:
command = b"getcfilters"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, start_height=1, stop_hash=None):
self.filter_type = filter_type
self.start_height = start_height
if stop_hash is None:
raise RuntimeError("A stop hash is required")
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += int_to_little_endian(self.start_height, 4)
result += self.stop_hash[::-1]
return result
class CFilterMessage:
command = b"cfilter"
define_network = False
def __init__(self, filter_type, block_hash, filter_bytes):
self.filter_type = filter_type
self.block_hash = block_hash
self.filter_bytes = filter_bytes
self.cf = CompactFilter.parse(block_hash[::-1][:16], filter_bytes)
def __eq__(self, other):
return (
self.filter_type == other.filter_type
and self.block_hash == other.block_hash
and self.filter_bytes == other.filter_bytes
)
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
block_hash = s.read(32)[::-1]
filter_bytes = read_varstr(s)
return cls(filter_type, block_hash, filter_bytes)
def hash(self):
return hash256(self.filter_bytes)
def __contains__(self, script_pubkey):
return script_pubkey in self.cf
class GetCFHeadersMessage:
command = b"getcfheaders"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, start_height=0, stop_hash=None):
self.filter_type = filter_type
self.start_height = start_height
if stop_hash is None:
raise RuntimeError
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += int_to_little_endian(self.start_height, 4)
result += self.stop_hash[::-1]
return result
class CFHeadersMessage:
command = b"cfheaders"
define_network = False
def __init__(self, filter_type, stop_hash, previous_filter_header, filter_hashes):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.previous_filter_header = previous_filter_header
self.filter_hashes = filter_hashes
current = self.previous_filter_header
for filter_hash in self.filter_hashes:
current = hash256(filter_hash + current)
self.last_header = current
def __repr__(self):
result = f"up to {self.stop_hash.hex()}\nstarting from {self.previous_filter_header.hex()}\n\n"
for fh in self.filter_hashes:
result += f"{fh.hex()}\n"
return result
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
stop_hash = s.read(32)[::-1]
previous_filter_header = s.read(32)
filter_hashes_length = read_varint(s)
filter_hashes = []
for _ in range(filter_hashes_length):
filter_hashes.append(s.read(32))
return cls(filter_type, stop_hash, previous_filter_header, filter_hashes)
class GetCFCheckPointMessage:
command = b"getcfcheckpt"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, stop_hash=None):
self.filter_type = filter_type
if stop_hash is None:
raise RuntimeError("Need a stop hash")
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += self.stop_hash[::-1]
return result
class CFCheckPointMessage:
command = b"cfcheckpt"
define_network = False
def __init__(self, filter_type, stop_hash, filter_headers):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.filter_headers = filter_headers
def __repr__(self):
result = f"up to {self.stop_hash.hex()}\n\n"
for fh in self.filter_headers:
result += f"{fh.hex()}\n"
return result
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
stop_hash = s.read(32)[::-1]
filter_headers_length = read_varint(s)
filter_headers = []
for _ in range(filter_headers_length):
filter_headers.append(s.read(32))
return cls(filter_type, stop_hash, filter_headers) | buidl/compactfilter.py | from io import BytesIO
from buidl.helper import (
encode_varint,
hash256,
int_to_little_endian,
read_varint,
read_varstr,
)
from buidl.siphash import SipHash_2_4
BASIC_FILTER_TYPE = 0
GOLOMB_P = 19
GOLOMB_M = int(round(1.497137 * 2 ** GOLOMB_P))
def _siphash(key, value):
if len(key) != 16:
raise ValueError("Key should be 16 bytes")
sip = SipHash_2_4(key)
sip.update(value)
return sip.hash()
def hash_to_range(key, value, f):
"""Returns a number between 0 and f-1, uniformly distributed.
Uses siphash-2-4."""
return _siphash(key, value) * f >> 64
def hashed_items(key, items):
n = len(items)
f = n * GOLOMB_M
result = []
for item in items:
result.append(hash_to_range(key, item, f))
return sorted(result)
def encode_golomb(x, p):
"""converts a number x to a golomb-encoded array of 0's and 1's"""
# quotient when dividing x by 2^p
q = x >> p
# q 1's and a 0 at the end
result = [1] * q + [0]
# the last p bits of x
result += [x & (1 << (p - i - 1)) > 0 for i in range(p)]
return result
def decode_golomb(bits, p):
"""converts a golomb-encoded array of 0's and 1's to a number"""
q = 0
while bits[0] != 0:
q += 1
bits.pop(0)
bits.pop(0)
r = 0
for _ in range(p):
r <<= 1
if bits.pop(0) == 1:
r |= 1
return (q << p) + r
def pack_bits(bits):
"""converts bits to a byte-string"""
num_bytes = len(bits)
bits += [0] * (-num_bytes % 8)
result = 0
for bit in bits:
result <<= 1
if bit:
result |= 1
return result.to_bytes(len(bits) // 8, "big")
def unpack_bits(byte_string):
bits = []
for byte in byte_string:
for _ in range(8):
if byte & 0x80:
bits.append(1)
else:
bits.append(0)
byte <<= 1
return bits
def serialize_gcs(sorted_items):
last_value = 0
result = []
for item in sorted_items:
delta = item - last_value
result += encode_golomb(delta, GOLOMB_P)
last_value = item
return encode_varint(len(sorted_items)) + pack_bits(result)
def encode_gcs(key, items):
"""Returns the golomb-coded-set byte-string which is the sorted
hashes of the items"""
sorted_items = hashed_items(key, items)
return serialize_gcs(sorted_items)
def decode_gcs(key, gcs):
"""Returns the sorted hashes of the items from the golomb-coded-set"""
s = BytesIO(gcs)
num_items = read_varint(s)
bits = unpack_bits(s.read())
items = []
current = 0
for _ in range(num_items):
delta = decode_golomb(bits, GOLOMB_P)
current += delta
items.append(current)
return items
class CompactFilter:
def __init__(self, key, hashes):
self.key = key
self.hashes = set(hashes)
self.f = len(self.hashes) * GOLOMB_M
def __repr__(self):
result = f"{self.key.hex()}:\n\n"
for h in sorted(list(self.hashes)):
result += f"{h.hex()}\n"
return result
def __eq__(self, other):
return self.key == other.key and sorted(list(self.hashes)) == sorted(
list(other.hashes)
)
@classmethod
def parse(cls, key, filter_bytes):
return cls(key, set(decode_gcs(key, filter_bytes)))
def hash(self):
return hash256(self.serialize())
def serialize(self):
return serialize_gcs(sorted(list(self.hashes)))
def compute_hash(self, raw_script_pubkey):
return hash_to_range(self.key, raw_script_pubkey, self.f)
def __contains__(self, script_pubkey):
raw_script_pubkey = script_pubkey.raw_serialize()
return self.compute_hash(raw_script_pubkey) in self.hashes
class GetCFiltersMessage:
command = b"getcfilters"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, start_height=1, stop_hash=None):
self.filter_type = filter_type
self.start_height = start_height
if stop_hash is None:
raise RuntimeError("A stop hash is required")
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += int_to_little_endian(self.start_height, 4)
result += self.stop_hash[::-1]
return result
class CFilterMessage:
command = b"cfilter"
define_network = False
def __init__(self, filter_type, block_hash, filter_bytes):
self.filter_type = filter_type
self.block_hash = block_hash
self.filter_bytes = filter_bytes
self.cf = CompactFilter.parse(block_hash[::-1][:16], filter_bytes)
def __eq__(self, other):
return (
self.filter_type == other.filter_type
and self.block_hash == other.block_hash
and self.filter_bytes == other.filter_bytes
)
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
block_hash = s.read(32)[::-1]
filter_bytes = read_varstr(s)
return cls(filter_type, block_hash, filter_bytes)
def hash(self):
return hash256(self.filter_bytes)
def __contains__(self, script_pubkey):
return script_pubkey in self.cf
class GetCFHeadersMessage:
command = b"getcfheaders"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, start_height=0, stop_hash=None):
self.filter_type = filter_type
self.start_height = start_height
if stop_hash is None:
raise RuntimeError
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += int_to_little_endian(self.start_height, 4)
result += self.stop_hash[::-1]
return result
class CFHeadersMessage:
command = b"cfheaders"
define_network = False
def __init__(self, filter_type, stop_hash, previous_filter_header, filter_hashes):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.previous_filter_header = previous_filter_header
self.filter_hashes = filter_hashes
current = self.previous_filter_header
for filter_hash in self.filter_hashes:
current = hash256(filter_hash + current)
self.last_header = current
def __repr__(self):
result = f"up to {self.stop_hash.hex()}\nstarting from {self.previous_filter_header.hex()}\n\n"
for fh in self.filter_hashes:
result += f"{fh.hex()}\n"
return result
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
stop_hash = s.read(32)[::-1]
previous_filter_header = s.read(32)
filter_hashes_length = read_varint(s)
filter_hashes = []
for _ in range(filter_hashes_length):
filter_hashes.append(s.read(32))
return cls(filter_type, stop_hash, previous_filter_header, filter_hashes)
class GetCFCheckPointMessage:
command = b"getcfcheckpt"
define_network = False
def __init__(self, filter_type=BASIC_FILTER_TYPE, stop_hash=None):
self.filter_type = filter_type
if stop_hash is None:
raise RuntimeError("Need a stop hash")
self.stop_hash = stop_hash
def serialize(self):
result = self.filter_type.to_bytes(1, "big")
result += self.stop_hash[::-1]
return result
class CFCheckPointMessage:
command = b"cfcheckpt"
define_network = False
def __init__(self, filter_type, stop_hash, filter_headers):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.filter_headers = filter_headers
def __repr__(self):
result = f"up to {self.stop_hash.hex()}\n\n"
for fh in self.filter_headers:
result += f"{fh.hex()}\n"
return result
@classmethod
def parse(cls, s):
filter_type = s.read(1)[0]
stop_hash = s.read(32)[::-1]
filter_headers_length = read_varint(s)
filter_headers = []
for _ in range(filter_headers_length):
filter_headers.append(s.read(32))
return cls(filter_type, stop_hash, filter_headers) | 0.753739 | 0.403684 |
from rest_framework.test import APIClient
from tests.utils import decode_content
def test_model_list_response_unfiltered():
response = APIClient().get('/snippets/')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'code': ':(){ :|: & };:',
'linenos': False,
'language': 'bash',
},
{
'id': 2,
'title': 'French flag',
'code': "print((u'\x1b[3%s;1m\u2588'*78+u'\n')%((4,)*26+(7,)*26+(1,)*26)*30)",
'linenos': False,
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_unfiltered():
response = APIClient().get('/snippets/3/')
expected = {
'id': 3,
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_includes():
response = APIClient().get('/snippets/?fields=title,language')
expected = [
{
'title': 'Fork bomb',
'language': 'bash',
},
{
'title': 'French flag',
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_includes():
response = APIClient().get('/snippets/3/?fields=title,language')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_excludes():
response = APIClient().get('/snippets/?fields!=code,language')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'linenos': False,
},
{
'id': 2,
'title': 'French flag',
'linenos': False,
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_excludes():
response = APIClient().get('/snippets/3/?fields!=id,linenos,code')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_some_bogus_fields():
response = APIClient().get('/snippets/3/?fields=title,spam,eggs')
expected = {
'title': 'Russian roulette',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_only_bogus_fields():
response = APIClient().get('/snippets/3/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/snippets/3/?fields=title&fields=linenos,language')
expected = {
'title': 'Russian roulette',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_include_and_exclude():
response = APIClient().get('/snippets/3/?fields=id&fields!=language')
expected = {
'id': 3,
}
content = decode_content(response)
assert content == expected
def test_model_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/snippets/3/?fields=id,title,code&fields!=id')
expected = {
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
}
content = decode_content(response)
assert content == expected | tests/test_userfields_with_modelserializer.py | from rest_framework.test import APIClient
from tests.utils import decode_content
def test_model_list_response_unfiltered():
response = APIClient().get('/snippets/')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'code': ':(){ :|: & };:',
'linenos': False,
'language': 'bash',
},
{
'id': 2,
'title': 'French flag',
'code': "print((u'\x1b[3%s;1m\u2588'*78+u'\n')%((4,)*26+(7,)*26+(1,)*26)*30)",
'linenos': False,
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_unfiltered():
response = APIClient().get('/snippets/3/')
expected = {
'id': 3,
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_includes():
response = APIClient().get('/snippets/?fields=title,language')
expected = [
{
'title': 'Fork bomb',
'language': 'bash',
},
{
'title': 'French flag',
'language': 'python',
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_includes():
response = APIClient().get('/snippets/3/?fields=title,language')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_list_response_filtered_excludes():
response = APIClient().get('/snippets/?fields!=code,language')
expected = [
{
'id': 1,
'title': 'Fork bomb',
'linenos': False,
},
{
'id': 2,
'title': 'French flag',
'linenos': False,
},
]
content = decode_content(response)
assert content == expected
def test_model_detail_response_filtered_excludes():
response = APIClient().get('/snippets/3/?fields!=id,linenos,code')
expected = {
'title': 'Russian roulette',
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_some_bogus_fields():
response = APIClient().get('/snippets/3/?fields=title,spam,eggs')
expected = {
'title': 'Russian roulette',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_only_bogus_fields():
response = APIClient().get('/snippets/3/?fields=blah')
expected = {}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_multiple_fields_in_separate_query_args():
response = APIClient().get('/snippets/3/?fields=title&fields=linenos,language')
expected = {
'title': 'Russian roulette',
'linenos': False,
'language': 'bash',
}
content = decode_content(response)
assert content == expected
def test_model_response_filtered_with_include_and_exclude():
response = APIClient().get('/snippets/3/?fields=id&fields!=language')
expected = {
'id': 3,
}
content = decode_content(response)
assert content == expected
def test_model_exclude_wins_for_ambiguous_filtering():
response = APIClient().get('/snippets/3/?fields=id,title,code&fields!=id')
expected = {
'title': 'Russian roulette',
'code': '[ $[ $RANDOM % 6 ] == 0 ] && rm -rf / || echo "click"',
}
content = decode_content(response)
assert content == expected | 0.47171 | 0.491212 |
from forse.tools.nn_tools import *
from forse.tools.img_tools import *
from forse.tools.mix_tools import *
from keras.models import Sequential, Model, load_model
from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization
from keras.layers import Reshape, Dense, Input
from keras.layers import LeakyReLU, Dropout, Flatten, ZeroPadding2D
from keras.optimizers import Adam
from keras import losses
import numpy as np
import os
from keras import backend as K
# Modified from:
# https://github.com/eriklindernoren/Keras-GAN.git
class DCGAN:
def __init__(self, output_directory, img_size):
self.img_size = img_size
self.channels = 1
self.kernel_size = 5
self.output_directory = output_directory
def smooth_accuracy(self, y_true, y_pred):
return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
def build_generator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) # 64x64x64
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same", strides=2)) #32x32x128
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same", strides=2)) #16x16x256
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same"))
model.add(Activation("tanh"))
img_in = Input(shape=img_shape)
img_out = model(img_in)
return Model(img_in, img_out)
def build_discriminator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, strides=1, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def build_gan(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
optimizer = Adam(0.0002, 0.5)
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.generator = self.build_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
z = Input(shape=img_shape)
img = self.generator(z)
self.discriminator.trainable = False
valid = self.discriminator(img)
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def train(self, epochs, patches_file, batch_size=32, save_interval=100, seed=4324):
self.build_gan()
X_train, X_test, Y_train, Y_test = load_training_set(patches_file, seed=seed)
print("Training Data Shape: ", X_train.shape)
half_batch = batch_size // 2
accs = []
self.discriminator.summary()
for epoch in range(epochs):
ind_batch = np.random.randint(0, X_train.shape[0], batch_size)
g_loss = self.combined.train_on_batch(X_train[ind_batch], np.ones((batch_size, 1)))
target_real = np.ones((half_batch, 1))
target_fake = np.zeros((half_batch, 1))
idxX = np.random.randint(0, X_train.shape[0], half_batch)
idxY = np.random.randint(0, X_train.shape[0], half_batch)
imgs = Y_train[idxY]
gen_imgs = self.generator.predict(X_train[idxX])
d_loss_real = self.discriminator.train_on_batch(imgs, target_real)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, target_fake)
if epoch % (save_interval) == 0:
print(epoch)
gen_imgs_test = self.generator.predict(X_test)
save_path = self.output_directory + "/models"
if not os.path.exists(save_path):
os.makedirs(save_path)
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5')
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5') | forse/networks/dcgan.py | from forse.tools.nn_tools import *
from forse.tools.img_tools import *
from forse.tools.mix_tools import *
from keras.models import Sequential, Model, load_model
from keras.layers import UpSampling2D, Conv2D, Activation, BatchNormalization
from keras.layers import Reshape, Dense, Input
from keras.layers import LeakyReLU, Dropout, Flatten, ZeroPadding2D
from keras.optimizers import Adam
from keras import losses
import numpy as np
import os
from keras import backend as K
# Modified from:
# https://github.com/eriklindernoren/Keras-GAN.git
class DCGAN:
def __init__(self, output_directory, img_size):
self.img_size = img_size
self.channels = 1
self.kernel_size = 5
self.output_directory = output_directory
def smooth_accuracy(self, y_true, y_pred):
return K.mean(K.equal(K.round(y_true), K.round(y_pred)))
def build_generator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same")) # 64x64x64
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same", strides=2)) #32x32x128
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, padding="same", strides=2)) #16x16x256
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=self.kernel_size, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(self.channels, kernel_size=self.kernel_size, padding="same"))
model.add(Activation("tanh"))
img_in = Input(shape=img_shape)
img_out = model(img_in)
return Model(img_in, img_out)
def build_discriminator(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
model = Sequential()
model.add(Conv2D(64, kernel_size=self.kernel_size, strides=1, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.5))
model.add(Conv2D(256, kernel_size=self.kernel_size, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def build_gan(self):
img_shape = (self.img_size[0], self.img_size[1], self.channels)
optimizer = Adam(0.0002, 0.5)
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.generator = self.build_generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
z = Input(shape=img_shape)
img = self.generator(z)
self.discriminator.trainable = False
valid = self.discriminator(img)
self.combined = Model(z, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def train(self, epochs, patches_file, batch_size=32, save_interval=100, seed=4324):
self.build_gan()
X_train, X_test, Y_train, Y_test = load_training_set(patches_file, seed=seed)
print("Training Data Shape: ", X_train.shape)
half_batch = batch_size // 2
accs = []
self.discriminator.summary()
for epoch in range(epochs):
ind_batch = np.random.randint(0, X_train.shape[0], batch_size)
g_loss = self.combined.train_on_batch(X_train[ind_batch], np.ones((batch_size, 1)))
target_real = np.ones((half_batch, 1))
target_fake = np.zeros((half_batch, 1))
idxX = np.random.randint(0, X_train.shape[0], half_batch)
idxY = np.random.randint(0, X_train.shape[0], half_batch)
imgs = Y_train[idxY]
gen_imgs = self.generator.predict(X_train[idxX])
d_loss_real = self.discriminator.train_on_batch(imgs, target_real)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, target_fake)
if epoch % (save_interval) == 0:
print(epoch)
gen_imgs_test = self.generator.predict(X_test)
save_path = self.output_directory + "/models"
if not os.path.exists(save_path):
os.makedirs(save_path)
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5')
self.discriminator.save(save_path + '/discrim_'+str(epoch)+'.h5')
self.generator.save(save_path + '/generat_'+str(epoch)+'.h5') | 0.841923 | 0.392046 |
import pathlib
import os
import sys
from tempfile import TemporaryDirectory
import pytest
from osbuild.buildroot import BuildRoot
from osbuild.monitor import LogMonitor, NullMonitor
from osbuild.pipeline import detect_host_runner
from osbuild.util import linux
from ..test import TestBase
@pytest.fixture(name="tempdir")
def tempdir_fixture():
with TemporaryDirectory(prefix="lvm2-") as tmp:
yield tmp
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_basic(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
# Test we can use `.run` multiple times
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
r = root.run(["/usr/bin/false"], monitor)
assert r.returncode != 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_runner_fail(tempdir):
runner = "org.osbuild.nonexistantrunner"
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
logfile = os.path.join(tempdir, "log.txt")
with BuildRoot("/", runner, libdir, var) as root, \
open(logfile, "w") as log:
monitor = LogMonitor(log.fileno())
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 1
with open(logfile) as f:
log = f.read()
assert log
assert r.output
assert log == r.output
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_output(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
data = "42. cats are superior to dogs"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/echo", data], monitor)
assert r.returncode == 0
assert data in r.output.strip()
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_bind_mounts(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
rw_data = pathlib.Path(tempdir, "data")
rw_data.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/scripts",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
cmd = ["/scripts/mount_flags.py",
"/rw-data",
"ro"]
binds = [f"{rw_data}:/rw-data"]
r = root.run(cmd, monitor, binds=binds, readonly_binds=ro_binds)
assert r.returncode == 1
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not os.path.exists("/sys/fs/selinux"), reason="no SELinux")
def test_selinuxfs_ro(tempdir):
# /sys/fs/selinux must never be writable in the container
# because RPM and other tools must not assume the policy
# of the host is the valid policy
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/sys/fs/selinux",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_proc_overrides(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
cmdline = "is-this-the-real-world"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.proc.cmdline = cmdline
r = root.run(["cat", "/proc/cmdline"], monitor)
assert r.returncode == 0
assert cmdline in r.output.strip()
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_timeout(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.run(["/bin/sleep", "1"], monitor, timeout=2)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_env_isolation(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
# Set some env variable to make sure it is not leaked into
# the container
os.environ["OSBUILD_TEST_ENV_ISOLATION"] = "42"
with BuildRoot("/", runner, libdir, var) as root:
cmd = ["/bin/sh", "-c", "/usr/bin/env > /ipc/env.txt"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "env.txt")) as f:
data = f.read().strip()
assert data
have = dict(map(lambda x: x.split("=", 1), data.split("\n")))
allowed = [
"_", # added by `env` itself
"container",
"LC_CTYPE",
"PATH",
"PWD",
"PYTHONPATH",
"PYTHONUNBUFFERED",
"SHLVL", # added by the shell wrapper
"TERM",
]
for k in have:
assert k in allowed
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_caps(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
def run_and_get_caps():
cmd = ["/bin/sh", "-c", "cat /proc/self/status > /ipc/status"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "status"), encoding="utf-8") as f:
data = f.readlines()
assert data
print(data)
perm = list(filter(lambda x: x.startswith("CapEff"), data))
assert perm and len(perm) == 1
perm = perm[0]
perm = perm[7:].strip() # strip "CapEff"
print(perm)
caps = linux.cap_mask_to_set(int(perm, base=16))
return caps
# check case of `BuildRoot.caps` is `None`, i.e. don't drop capabilities,
# thus the effective capabilities should be the bounding set
assert root.caps is None
bound_set = linux.cap_bound_set()
caps = run_and_get_caps()
assert caps == bound_set
# drop everything but `CAP_SYS_ADMIN`
assert "CAP_SYS_ADMIN" in bound_set
enable = set(["CAP_SYS_ADMIN"])
disable = bound_set - enable
root.caps = enable
caps = run_and_get_caps()
for e in enable:
assert e in caps
for d in disable:
assert d not in caps | test/mod/test_buildroot.py |
import pathlib
import os
import sys
from tempfile import TemporaryDirectory
import pytest
from osbuild.buildroot import BuildRoot
from osbuild.monitor import LogMonitor, NullMonitor
from osbuild.pipeline import detect_host_runner
from osbuild.util import linux
from ..test import TestBase
@pytest.fixture(name="tempdir")
def tempdir_fixture():
with TemporaryDirectory(prefix="lvm2-") as tmp:
yield tmp
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_basic(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
# Test we can use `.run` multiple times
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 0
r = root.run(["/usr/bin/false"], monitor)
assert r.returncode != 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_runner_fail(tempdir):
runner = "org.osbuild.nonexistantrunner"
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
logfile = os.path.join(tempdir, "log.txt")
with BuildRoot("/", runner, libdir, var) as root, \
open(logfile, "w") as log:
monitor = LogMonitor(log.fileno())
r = root.run(["/usr/bin/true"], monitor)
assert r.returncode == 1
with open(logfile) as f:
log = f.read()
assert log
assert r.output
assert log == r.output
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_output(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
data = "42. cats are superior to dogs"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
r = root.run(["/usr/bin/echo", data], monitor)
assert r.returncode == 0
assert data in r.output.strip()
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_bind_mounts(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
rw_data = pathlib.Path(tempdir, "data")
rw_data.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/scripts",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
cmd = ["/scripts/mount_flags.py",
"/rw-data",
"ro"]
binds = [f"{rw_data}:/rw-data"]
r = root.run(cmd, monitor, binds=binds, readonly_binds=ro_binds)
assert r.returncode == 1
@pytest.mark.skipif(not TestBase.have_test_data(), reason="no test-data access")
@pytest.mark.skipif(not os.path.exists("/sys/fs/selinux"), reason="no SELinux")
def test_selinuxfs_ro(tempdir):
# /sys/fs/selinux must never be writable in the container
# because RPM and other tools must not assume the policy
# of the host is the valid policy
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
scripts = os.path.join(TestBase.locate_test_data(), "scripts")
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
ro_binds = [f"{scripts}:/scripts"]
cmd = ["/scripts/mount_flags.py",
"/sys/fs/selinux",
"ro"]
r = root.run(cmd, monitor, readonly_binds=ro_binds)
assert r.returncode == 0
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_proc_overrides(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
cmdline = "is-this-the-real-world"
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.proc.cmdline = cmdline
r = root.run(["cat", "/proc/cmdline"], monitor)
assert r.returncode == 0
assert cmdline in r.output.strip()
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_timeout(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
root.run(["/bin/sleep", "1"], monitor, timeout=2)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
with pytest.raises(TimeoutError):
root.run(["/bin/sleep", "1"], monitor, timeout=0.1)
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_env_isolation(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
# Set some env variable to make sure it is not leaked into
# the container
os.environ["OSBUILD_TEST_ENV_ISOLATION"] = "42"
with BuildRoot("/", runner, libdir, var) as root:
cmd = ["/bin/sh", "-c", "/usr/bin/env > /ipc/env.txt"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "env.txt")) as f:
data = f.read().strip()
assert data
have = dict(map(lambda x: x.split("=", 1), data.split("\n")))
allowed = [
"_", # added by `env` itself
"container",
"LC_CTYPE",
"PATH",
"PWD",
"PYTHONPATH",
"PYTHONUNBUFFERED",
"SHLVL", # added by the shell wrapper
"TERM",
]
for k in have:
assert k in allowed
@pytest.mark.skipif(not TestBase.can_bind_mount(), reason="root only")
def test_caps(tempdir):
runner = detect_host_runner()
libdir = os.path.abspath(os.curdir)
var = pathlib.Path(tempdir, "var")
var.mkdir()
ipc = pathlib.Path(tempdir, "ipc")
ipc.mkdir()
monitor = NullMonitor(sys.stderr.fileno())
with BuildRoot("/", runner, libdir, var) as root:
def run_and_get_caps():
cmd = ["/bin/sh", "-c", "cat /proc/self/status > /ipc/status"]
r = root.run(cmd, monitor, binds=[f"{ipc}:/ipc"])
assert r.returncode == 0
with open(os.path.join(ipc, "status"), encoding="utf-8") as f:
data = f.readlines()
assert data
print(data)
perm = list(filter(lambda x: x.startswith("CapEff"), data))
assert perm and len(perm) == 1
perm = perm[0]
perm = perm[7:].strip() # strip "CapEff"
print(perm)
caps = linux.cap_mask_to_set(int(perm, base=16))
return caps
# check case of `BuildRoot.caps` is `None`, i.e. don't drop capabilities,
# thus the effective capabilities should be the bounding set
assert root.caps is None
bound_set = linux.cap_bound_set()
caps = run_and_get_caps()
assert caps == bound_set
# drop everything but `CAP_SYS_ADMIN`
assert "CAP_SYS_ADMIN" in bound_set
enable = set(["CAP_SYS_ADMIN"])
disable = bound_set - enable
root.caps = enable
caps = run_and_get_caps()
for e in enable:
assert e in caps
for d in disable:
assert d not in caps | 0.331336 | 0.235746 |
from typing import List
from detector.clone import ClonePart
from detector.clone.CloneGroup import CloneGroup
class Filter:
"""
Filter clones fully covered by other clones.
"""
NAME_COMPARATOR = 'NAME_COMPARATOR'
NAME_UNIT_COMPARATOR = 'NAME_UNIT_COMPARATOR'
def __init__(self):
self.l1 = None
self.l2 = None
self.filtered: List[CloneGroup] = []
def get__filtered(self):
return self.filtered
def add_in_filter(self, current_clone_group: CloneGroup):
for earlier_clone_group in self.filtered:
if self.contains_in(current_clone_group, earlier_clone_group):
return
if self.contains_in(earlier_clone_group, current_clone_group):
self.filtered.remove(earlier_clone_group)
self.filtered.append(current_clone_group)
def contains_in(self, first: CloneGroup, second: CloneGroup):
if first.get__group_length() > second.get__group_length():
return False
first_parts: List[ClonePart] = first.get__parts()
second_parts: List[ClonePart] = second.get__parts()
self.l1 = first.get__group_length()
self.l2 = second.get__group_length()
return self.contains(first_parts, second_parts, self.NAME_UNIT_COMPARATOR) and \
self.contains(first_parts, second_parts, self.NAME_COMPARATOR)
def contains(self, container, list1, comparator):
container_index = 0
list_index = 0
while container_index < len(container) and list_index < len(list1):
container_part: ClonePart = container[container_index]
list_part: ClonePart = list1[list_index]
if comparator == self.NAME_COMPARATOR:
compare = self.compare_by_filename(container_part, list_part)
else:
compare = self.compare_by_filename_and_unit(container_part, list_part)
if compare == 0:
if list_index + 1 == len(list1):
return True
list_index += 1
elif compare < 0:
if container_index + 1 == len(container):
return False
container_index += 1
else:
return False
def compare_by_filename(self, part1: ClonePart, part2: ClonePart):
filename_1 = part1.get__filename()
filename_2 = part2.get__filename()
if filename_1 == filename_2:
return 0
elif filename_1 < filename_2:
return -1
else:
return 1
def compare_by_filename_and_unit(self, part1: ClonePart, part2: ClonePart):
compare = self.compare_by_filename(part1, part2)
if compare == 0:
if part1.get__unit_start() <= part2.get__unit_start():
if part2.get__unit_start() + self.l2 <= part1.get__unit_start() + self.l1:
return 0
else:
return -1
else:
return 1
else:
return compare | Original/detector/clone/Filter.py | from typing import List
from detector.clone import ClonePart
from detector.clone.CloneGroup import CloneGroup
class Filter:
"""
Filter clones fully covered by other clones.
"""
NAME_COMPARATOR = 'NAME_COMPARATOR'
NAME_UNIT_COMPARATOR = 'NAME_UNIT_COMPARATOR'
def __init__(self):
self.l1 = None
self.l2 = None
self.filtered: List[CloneGroup] = []
def get__filtered(self):
return self.filtered
def add_in_filter(self, current_clone_group: CloneGroup):
for earlier_clone_group in self.filtered:
if self.contains_in(current_clone_group, earlier_clone_group):
return
if self.contains_in(earlier_clone_group, current_clone_group):
self.filtered.remove(earlier_clone_group)
self.filtered.append(current_clone_group)
def contains_in(self, first: CloneGroup, second: CloneGroup):
if first.get__group_length() > second.get__group_length():
return False
first_parts: List[ClonePart] = first.get__parts()
second_parts: List[ClonePart] = second.get__parts()
self.l1 = first.get__group_length()
self.l2 = second.get__group_length()
return self.contains(first_parts, second_parts, self.NAME_UNIT_COMPARATOR) and \
self.contains(first_parts, second_parts, self.NAME_COMPARATOR)
def contains(self, container, list1, comparator):
container_index = 0
list_index = 0
while container_index < len(container) and list_index < len(list1):
container_part: ClonePart = container[container_index]
list_part: ClonePart = list1[list_index]
if comparator == self.NAME_COMPARATOR:
compare = self.compare_by_filename(container_part, list_part)
else:
compare = self.compare_by_filename_and_unit(container_part, list_part)
if compare == 0:
if list_index + 1 == len(list1):
return True
list_index += 1
elif compare < 0:
if container_index + 1 == len(container):
return False
container_index += 1
else:
return False
def compare_by_filename(self, part1: ClonePart, part2: ClonePart):
filename_1 = part1.get__filename()
filename_2 = part2.get__filename()
if filename_1 == filename_2:
return 0
elif filename_1 < filename_2:
return -1
else:
return 1
def compare_by_filename_and_unit(self, part1: ClonePart, part2: ClonePart):
compare = self.compare_by_filename(part1, part2)
if compare == 0:
if part1.get__unit_start() <= part2.get__unit_start():
if part2.get__unit_start() + self.l2 <= part1.get__unit_start() + self.l1:
return 0
else:
return -1
else:
return 1
else:
return compare | 0.563498 | 0.195805 |
from pathlib import Path
import requests
import colorama
colorama.init()
# Function for determining whether we are using a locally
# stored book or the online book
def getBook():
bookPath = Path.cwd() / 'romeoJuliet.txt'
if bookPath.exists():
print("Using locally stored book")
bookFile = open(bookPath, 'r')
book = bookFile.read()
bookFile.close()
else:
print("Using online book")
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
res.raise_for_status()
book = res.text
return book
# Handles input for the beginning menu
def menu(flag):
print("\033[093m1: Start from the beginning")
print("2: Start at a bookmark")
if (flag == 0): # Special case for if book has already been saved
print("3: Save book locally")
print("0: Exit")
print("Please enter a selection: \033[0m", end='')
sel = input()
# Input validation
if sel != '1' and sel != '2' and sel != '3' and sel != '0':
print("\033[091mERROR: \033[0mIncorrect input detected")
sel = menu(flag)
elif sel == '3' and flag == 1: # Special case for if book has already been saved
print("\033[091mERROR: \033[0mIncorrect input detected")
sel = menu(flag)
return sel
# Beginning Menu
print("\033[091mWelcome to Romeo and Juliet")
sel = menu(0)
# Download handler
if sel == '3':
bookPath = Path.cwd() / 'romeoJuliet.txt'
if bookPath.exists():
print("\033[091mERROR: \033[0mromeoJuliet.txt already exists")
else:
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
res.raise_for_status()
bookFile = open('romeoJuliet.txt', 'wb')
for chunk in res.iter_content(100000):
bookFile.write(chunk)
print("Book saved as romeoJuliet.txt")
sel = menu(1)
# If sel == '0' quit program
if sel != '0':
book = getBook()
# Default is that our starting position is at the beginning
i = 0
# Bookmark Handler
if sel == '2':
filePath = Path.cwd() / 'bookmark.txt'
if filePath.exists():
bookMarkFile = open(filePath, 'r')
i = int(bookMarkFile.read())
if (i < len(book) - 1):
print("\033[092mStarting from position %s out of %s\033[0m" %
(i, len(book)))
else:
print("\033[091mERROR: \033[0mBookmark is past length of text")
print("\033[092mStarting from the beginning\033[0m")
i = 0
else:
print("\033[091mERROR: \033[0mIt doesn't appear a bookmark exists")
print("\033[092mStarting from the beginning\033[0m")
# Main reading section
inp = ''
while inp.lower() != 'x':
j = 0
for k in range(i, len(book)):
if book[k] == '\n':
j += 1
if j == 20:
l = i
i = k
break
elif k == (len(book) - 1):
l = i
i = k
print ("\033[092mPosition %s out of %s\033[0m" % (l, len(book)))
print(book[l:i])
print("\033[093mPress enter to continue, \"B\" to go back a passage, or \"X\" to exit: \033[0m", end='')
inp = input()
# Error handler for end of text
while i == len(book) - 1 and inp.lower() != 'x' and inp.lower() != 'b':
print('\033[091mERROR: \033[0mYou are at the end')
print("\033[093mEnter \"B\" to go back a passage, or \"X\" to exit: \033[0m", end='')
inp = input()
# Going backwards
if inp.lower() == 'b':
i = l
while i == 0 and inp.lower() == 'b':
print('\033[091mERROR: \033[0mYou are at the beginning')
print("\033[093mPress enter to continue or \"X\" to exit: \033[0m", end='')
inp = input()
if i > 0:
i = l
j = 0
for k in range(i, 0, -1):
if book[k] == '\n':
j += 1
if j == 20:
i = k
break
elif k == 1:
i = 0
# Bookmark creation and program exit
if inp.lower() == 'x':
print("\033[093mCreate a bookmark? (y/n): \033[0m", end='')
inp = input()
if inp.lower() == 'y':
print("\033[092mCreating a bookmark at position %s out of %s in bookmark.txt\033[0m" % (
l, len(book)))
bookMarkFile = open(Path.cwd() / 'bookmark.txt', 'w')
bookMarkFile.write(str(l))
bookMarkFile.close()
break | romeoJuliet.py | from pathlib import Path
import requests
import colorama
colorama.init()
# Function for determining whether we are using a locally
# stored book or the online book
def getBook():
bookPath = Path.cwd() / 'romeoJuliet.txt'
if bookPath.exists():
print("Using locally stored book")
bookFile = open(bookPath, 'r')
book = bookFile.read()
bookFile.close()
else:
print("Using online book")
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
res.raise_for_status()
book = res.text
return book
# Handles input for the beginning menu
def menu(flag):
print("\033[093m1: Start from the beginning")
print("2: Start at a bookmark")
if (flag == 0): # Special case for if book has already been saved
print("3: Save book locally")
print("0: Exit")
print("Please enter a selection: \033[0m", end='')
sel = input()
# Input validation
if sel != '1' and sel != '2' and sel != '3' and sel != '0':
print("\033[091mERROR: \033[0mIncorrect input detected")
sel = menu(flag)
elif sel == '3' and flag == 1: # Special case for if book has already been saved
print("\033[091mERROR: \033[0mIncorrect input detected")
sel = menu(flag)
return sel
# Beginning Menu
print("\033[091mWelcome to Romeo and Juliet")
sel = menu(0)
# Download handler
if sel == '3':
bookPath = Path.cwd() / 'romeoJuliet.txt'
if bookPath.exists():
print("\033[091mERROR: \033[0mromeoJuliet.txt already exists")
else:
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
res.raise_for_status()
bookFile = open('romeoJuliet.txt', 'wb')
for chunk in res.iter_content(100000):
bookFile.write(chunk)
print("Book saved as romeoJuliet.txt")
sel = menu(1)
# If sel == '0' quit program
if sel != '0':
book = getBook()
# Default is that our starting position is at the beginning
i = 0
# Bookmark Handler
if sel == '2':
filePath = Path.cwd() / 'bookmark.txt'
if filePath.exists():
bookMarkFile = open(filePath, 'r')
i = int(bookMarkFile.read())
if (i < len(book) - 1):
print("\033[092mStarting from position %s out of %s\033[0m" %
(i, len(book)))
else:
print("\033[091mERROR: \033[0mBookmark is past length of text")
print("\033[092mStarting from the beginning\033[0m")
i = 0
else:
print("\033[091mERROR: \033[0mIt doesn't appear a bookmark exists")
print("\033[092mStarting from the beginning\033[0m")
# Main reading section
inp = ''
while inp.lower() != 'x':
j = 0
for k in range(i, len(book)):
if book[k] == '\n':
j += 1
if j == 20:
l = i
i = k
break
elif k == (len(book) - 1):
l = i
i = k
print ("\033[092mPosition %s out of %s\033[0m" % (l, len(book)))
print(book[l:i])
print("\033[093mPress enter to continue, \"B\" to go back a passage, or \"X\" to exit: \033[0m", end='')
inp = input()
# Error handler for end of text
while i == len(book) - 1 and inp.lower() != 'x' and inp.lower() != 'b':
print('\033[091mERROR: \033[0mYou are at the end')
print("\033[093mEnter \"B\" to go back a passage, or \"X\" to exit: \033[0m", end='')
inp = input()
# Going backwards
if inp.lower() == 'b':
i = l
while i == 0 and inp.lower() == 'b':
print('\033[091mERROR: \033[0mYou are at the beginning')
print("\033[093mPress enter to continue or \"X\" to exit: \033[0m", end='')
inp = input()
if i > 0:
i = l
j = 0
for k in range(i, 0, -1):
if book[k] == '\n':
j += 1
if j == 20:
i = k
break
elif k == 1:
i = 0
# Bookmark creation and program exit
if inp.lower() == 'x':
print("\033[093mCreate a bookmark? (y/n): \033[0m", end='')
inp = input()
if inp.lower() == 'y':
print("\033[092mCreating a bookmark at position %s out of %s in bookmark.txt\033[0m" % (
l, len(book)))
bookMarkFile = open(Path.cwd() / 'bookmark.txt', 'w')
bookMarkFile.write(str(l))
bookMarkFile.close()
break | 0.249447 | 0.329607 |
class Commission(object):
"""
手续费标准,包含如下属性
* self.buycost:买进手续费
* self.sellcost:卖出手续费
* self.unit:手续费单位
"""
def __init__(self, buycost=0.001, sellcost=0.002, unit="perValue"):
"""
初始化开仓、平仓的成本和单位
Args:
buycost (float): 开仓手续费
sellcost (float): 平仓手续费
unit (str): 手续费单位,可选值'perValue'或'perShare'
Examples:
>> commission = Commission()
>> commission = Commission(0.0005, 0.0005, 'perShare')
"""
self.buycost = float(buycost)
self.sellcost = float(sellcost)
if unit not in ['perValue', 'perShare']:
raise ValueError('Exception in "Commission": unit must be perValue or perShare!')
self.unit = unit
def calculate_stock_commission(self, price, direction):
"""
计算股票每股手续费
Args:
price (float): 成交价
direction (int): 交易方向,1为买入,-1为卖出
Returns:
float: 每股手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_stock_commission(10.00, 1)
>> commission.calculate_stock_commission(10.00, -1)
"""
if direction > 0:
if self.unit == 'perValue':
cost_per_share = price * self.buycost
else:
cost_per_share = self.buycost
else:
if self.unit == 'perValue':
cost_per_share = price * self.sellcost
else:
cost_per_share = self.sellcost
return cost_per_share
def calculate_futures_commission(self, market_value, offset_flag='open'):
"""
计算期货手续费
Args:
market_value (float): 市值
offset_flag (basestring): 开仓或平仓
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_futures_commission(10000.00)
"""
cost = self.buycost if offset_flag == 'open' else self.sellcost
if self.unit == 'perValue':
return cost * market_value
else:
return cost
def calculate_otc_fund_commission(self, cash, order_type='purchase'):
"""
计算场外基金手续费
Args:
cash (float): 总金额
order_type (str): 下单类型, 'purchase' or 'redeem'
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_futures_commission(10000.00)
"""
if self.unit == 'perShare':
raise Exception('The commission of OTC Fund account can not be set in "perShare" mode! ')
if order_type == 'purchase':
return cash * self.buycost / (1. + self.buycost)
else:
return cash * self.sellcost
def calculate_index_commission(self, market_value, offset_flag=None):
"""
计算指数交易手续费
Args:
market_value (float): 总金额
offset_flag (str): 下单类型, 'open' or 'close'分别对应开平
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_index_commission(10000.00)
"""
if self.unit == 'perShare':
raise Exception('The commission of index account can not be set in "perShare" mode! ')
if offset_flag == 'open':
return round(market_value * self.buycost, 2)
else:
return round(market_value * self.sellcost, 2)
def __repr__(self):
return "{class_name}(buycost={buycost}, sellcost = {sellcost}, " \
"unit = {unit}".format(class_name=self.__class__.__name__, buycost=self.buycost,
sellcost=self.sellcost, unit=self.unit)
class Slippage(object):
def __init__(self, value=0, unit="perValue"):
"""
初始化滑点的值和单位
Args:
value (float): 滑点值
unit (str): 滑点单位,可选值'perValue'或'perShare'
Examples:
>> slippage = Slippage()
>> slippage = Slippage(0.01, 'perShare')
"""
self.value = float(value)
if unit not in ['perValue', 'perShare']:
raise ValueError('Exception in "Slippage": unit must be perValue or perShare!')
self.unit = unit
def calculate_stock_slippage(self, price):
"""
计算股票滑点
Args:
price (float): 成交价
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_stock_slippage(10.00)
"""
if self.unit == 'perValue':
slippage_per_share = price * self.value
else:
slippage_per_share = self.value
return slippage_per_share
def calculate_futures_slippage(self, market_value):
"""
计算期货滑点
Args:
market_value (float): 市值
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_futures_slippage(10000.00)
"""
if self.unit == 'perValue':
return self.value * market_value
else:
return self.value
def calculate_index_slippage(self, market_value):
"""
计算指数账户交易滑点
Args:
market_value (float): 市值
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_index_slippage(10000.00)
"""
if self.unit == 'perValue':
return self.value * market_value
else:
return self.value
def __repr__(self):
return "{class_name}(value = {value},unit = {unit})".format(
class_name=self.__class__.__name__,
value=self.value,
unit=self.unit) | lib/trade/cost.py | class Commission(object):
"""
手续费标准,包含如下属性
* self.buycost:买进手续费
* self.sellcost:卖出手续费
* self.unit:手续费单位
"""
def __init__(self, buycost=0.001, sellcost=0.002, unit="perValue"):
"""
初始化开仓、平仓的成本和单位
Args:
buycost (float): 开仓手续费
sellcost (float): 平仓手续费
unit (str): 手续费单位,可选值'perValue'或'perShare'
Examples:
>> commission = Commission()
>> commission = Commission(0.0005, 0.0005, 'perShare')
"""
self.buycost = float(buycost)
self.sellcost = float(sellcost)
if unit not in ['perValue', 'perShare']:
raise ValueError('Exception in "Commission": unit must be perValue or perShare!')
self.unit = unit
def calculate_stock_commission(self, price, direction):
"""
计算股票每股手续费
Args:
price (float): 成交价
direction (int): 交易方向,1为买入,-1为卖出
Returns:
float: 每股手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_stock_commission(10.00, 1)
>> commission.calculate_stock_commission(10.00, -1)
"""
if direction > 0:
if self.unit == 'perValue':
cost_per_share = price * self.buycost
else:
cost_per_share = self.buycost
else:
if self.unit == 'perValue':
cost_per_share = price * self.sellcost
else:
cost_per_share = self.sellcost
return cost_per_share
def calculate_futures_commission(self, market_value, offset_flag='open'):
"""
计算期货手续费
Args:
market_value (float): 市值
offset_flag (basestring): 开仓或平仓
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_futures_commission(10000.00)
"""
cost = self.buycost if offset_flag == 'open' else self.sellcost
if self.unit == 'perValue':
return cost * market_value
else:
return cost
def calculate_otc_fund_commission(self, cash, order_type='purchase'):
"""
计算场外基金手续费
Args:
cash (float): 总金额
order_type (str): 下单类型, 'purchase' or 'redeem'
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_futures_commission(10000.00)
"""
if self.unit == 'perShare':
raise Exception('The commission of OTC Fund account can not be set in "perShare" mode! ')
if order_type == 'purchase':
return cash * self.buycost / (1. + self.buycost)
else:
return cash * self.sellcost
def calculate_index_commission(self, market_value, offset_flag=None):
"""
计算指数交易手续费
Args:
market_value (float): 总金额
offset_flag (str): 下单类型, 'open' or 'close'分别对应开平
Returns:
float: 手续费成本
Examples:
>> commission = Commission()
>> commission.calculate_index_commission(10000.00)
"""
if self.unit == 'perShare':
raise Exception('The commission of index account can not be set in "perShare" mode! ')
if offset_flag == 'open':
return round(market_value * self.buycost, 2)
else:
return round(market_value * self.sellcost, 2)
def __repr__(self):
return "{class_name}(buycost={buycost}, sellcost = {sellcost}, " \
"unit = {unit}".format(class_name=self.__class__.__name__, buycost=self.buycost,
sellcost=self.sellcost, unit=self.unit)
class Slippage(object):
def __init__(self, value=0, unit="perValue"):
"""
初始化滑点的值和单位
Args:
value (float): 滑点值
unit (str): 滑点单位,可选值'perValue'或'perShare'
Examples:
>> slippage = Slippage()
>> slippage = Slippage(0.01, 'perShare')
"""
self.value = float(value)
if unit not in ['perValue', 'perShare']:
raise ValueError('Exception in "Slippage": unit must be perValue or perShare!')
self.unit = unit
def calculate_stock_slippage(self, price):
"""
计算股票滑点
Args:
price (float): 成交价
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_stock_slippage(10.00)
"""
if self.unit == 'perValue':
slippage_per_share = price * self.value
else:
slippage_per_share = self.value
return slippage_per_share
def calculate_futures_slippage(self, market_value):
"""
计算期货滑点
Args:
market_value (float): 市值
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_futures_slippage(10000.00)
"""
if self.unit == 'perValue':
return self.value * market_value
else:
return self.value
def calculate_index_slippage(self, market_value):
"""
计算指数账户交易滑点
Args:
market_value (float): 市值
Returns:
float: 滑点成本
Examples:
>> slippage = Slippage()
>> slippage.calculate_index_slippage(10000.00)
"""
if self.unit == 'perValue':
return self.value * market_value
else:
return self.value
def __repr__(self):
return "{class_name}(value = {value},unit = {unit})".format(
class_name=self.__class__.__name__,
value=self.value,
unit=self.unit) | 0.60964 | 0.356083 |
import importlib
from rest_framework import serializers
class InvalidValue(Exception):
def __init__(self, ref, expected_type=None):
message = "The value for %s is invalid." % ref
if expected_type is not None:
message += " Expected type or types %s." % expected_type
super().__init__(message)
class InvalidMetaValue(Exception):
def __init__(self, value, expected_type=None):
message = "Invalid value for %s on the serializer's Meta class." % value
if expected_type is not None:
message += " Expected type or types %s." % expected_type
super().__init__(message)
class MissingSerializerField(Exception):
def __init__(self, field):
message = (
"The field %s does not exist in the list of fields available "
"on the serializer." % field)
super().__init__(message)
class CannotExpandField(Exception):
def __init__(self, field):
message = "The field %s cannot be expanded" % field
super().__init__(message)
class PolymorphicNonPolymorphicSerializer(serializers.Serializer):
"""
A :obj:`rest_framework.serializers.Serializer` uses different serializers
depending on the instance being serialized.
The name of this :obj:`rest_framework.serializers.Serializer` is not a joke.
Typically, with Polymorphic serializers, the serializer will serialize
based on the type of instance where each instance that it can serialize
must be a child of a PolymorphicModel. Here, we loosen that requirement,
and allow the serializer to conditionally serialize an instance where that
instance need not be a child of a Polymorphic model.
"""
def _import_serializer_cls(self, module_path):
module_name = ".".join(module_path.split(".")[:-1])
class_name = module_path.split(".")[-1]
module = importlib.import_module(module_name)
return getattr(module, class_name)
def _configure_serializer_cls(self, value):
if isinstance(value, str):
cls = self._import_serializer_cls(value)
return self._configure_serializer_cls(cls)
assert issubclass(value, serializers.BaseSerializer), (
"%s is not a serializer class or a module path to a serializer "
"class." % value)
return value
def _find_config_for_instance(self, instance):
if not hasattr(self, "choices"):
raise Exception(
"Extensions of PolymorphicNonPolymorphicSerializer must define "
"a Meta attribute on the serializer with a `choices` attribute."
)
for k, v in self.choices.items():
if isinstance(instance, k):
return v
raise Exception(
"PolymorphicNonPolymorphicSerializer not configured to "
"serialize type %s." % type(instance)
)
def to_representation(self, instance):
config = self._find_config_for_instance(instance)
options = {}
instance_type = getattr(instance, "type", None)
if isinstance(config, (list, tuple)):
if len(config) != 1 and len(config) != 2 and len(config) != 3:
raise Exception("Invalid choice provided.")
serializer_cls = self._configure_serializer_cls(config[0])
if len(config) == 2:
if type(config[1]) is dict:
options = config[1]
else:
assert type(config[1]) is str, "Invalid choice."
instance_type = config[1]
elif len(config) == 3:
assert type(config[1]) is str, "Invalid choice."
assert type(config[2]) is dict, \
"Serializer keyword arguments must be a dict."
instance_type = config[1]
options = config[2]
else:
serializer_cls = self._configure_serializer_cls(config[0])
data = serializer_cls(instance, **options).data
data["type"] = instance_type
return data
class EnhancedModelSerializer(serializers.ModelSerializer):
"""
An extremely powerful extension of Django REST Framework's
:obj:`serializers.ModelSerializer` that provides additional useful behavior
for constructing tailored responses to fit the needs of the application.
The :obj:`EnhancedModelSerializer` provides (4) implementations that can
be used to fine tune the behavior of a single serializer to fit multiple
different use cases. These implementations are:
(1) HTTP Field Toggling
-----------------------
This implementation allows the definitions of already defined fields on
the serializer to change depending on the context's HTTP request method
that the serializer is being used for.
This is extremely important when we want to use the same serializer for
both read/write operations where the serializer has foreign key or M2M
relationships.
The behavior toggling can be defined by including a `http_toggle`
:obj:`dict` on the serializer's Meta class which instructs the serializer
how to change field behavior for the provided HTTP methods.
Example:
~~~~~~~
Let's assume with have a model `Child` and a model `Parent`, where `Child`
points to `Parent` by the means of a `ForeignKey` relationship:
class Child(db.Model):
name = models.CharField()
parent = models.ForeignKey(to=Parent, reverse_name="children")
class Parent(db.Model):
name = models.CharField()
Now, when we are sending PATCH/POST requests to either update a `Child`
instance or create a new `Child` instance, it is useful to specify the
`Parent` instance by it's Primary Key:
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": 1, "name": "Jack"}
However, when we want to send a GET request to either list all the instances
of `Child` or a single instance of `Child`, we want the parent to be
represented by a nested serializer.
This toggling can be accomplished by specifying the `http_toggle` attribute
on the associated serializer's Meta class to toggle the field to another
definition on specific HTTP requests:
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = ParentSerializer()
class Meta:
model = Child
http_toggle = {
'parent': {
('POST', 'PATCH'): (
serializers.PrimaryKeyRelatedField,
{"queryset": Parent.objects.all()}'
)
}
}
Now, when we send a POST/PATCH request to the endpoints associated with
the `Child` model, we can specify the `Parent` by it's primary key - but
still get the full serialized `Parent` on GET requests.
(2) Explicit Field Expansion
----------------------------
This implementation allows fields to be toggled in the presence of an
`expand` argument supplied to the serializer's __init__ method.
Example:
~~~~~~~
Returning to the above example, suppose that we want the default field
definition for `parent` on the `ChildSerializer` to be a
:obj:`serializers.PrimaryKeyRelatedField`, but want to use the nested
`ParentSerializer` in certain situations.
This toggling can be accomplished by specifying the `expand` attribute on
the associated serializer's Meta class to expand the field by using a new
field definition when the `expand=True` argument is supplied to the
serializer.
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = serializers.PrimaryKeyRelatedField(
queryset=Parent.objects.all(),
)
class Meta:
model = Child
expand = {
'parent': ParentSerializer
}
Now, when we reference the serializer as `ChildSerializer(expand=True)`,
the expanded fields will be used in place of the default fields.
class SchoolSerializer(serializers.ModelSerializer):
children = ChildSerializer(many=True, expand=True)
class Meta:
model = School
Note that we can also expand explicit fields, as shown here:
class SchoolSerializer(serializers.ModelSerializer):
children = ChildSerializer(many=True, expand=['parent'])
class Meta:
model = School
(3) Field Response Rendering
----------------------------
This implementation is critically important to developing an API response
contract that is consistent for a frontend client to use.
Returning to the example from implementation (1), we are using a
:obj:`serializers.PrimaryKeyRelatedField` to allow the creation/updating
of `Child` instances referencing a `Parent` instance by it's PK.
Implementation (1) allows us to toggle the field for read/write HTTP
methods, but it does not allow us to render a consistent response between
the two.
For example, if we send a POST request to "/children" with the JSON body
{ "parent": 1, "name": "Jack" }, the response of the POST request will
still reference the serialized form of the created `Child` as
{ "parent": 1, "name": "Jack" }.
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": 1, "name": "Jack"}
If we want the full serialized parent, we have to send a GET request to
"/children/<pk>".
In order to render consistent responses between the GET and POST/PATCH
methods, we can use this implementation to render the full serialized
`Parent` on responses of POST and PATCH methods:
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": {"id": 1, "name": "<NAME>." }, "name": "Jack"}
This can be done by identifying how fields should be handled for responses
of ALL request types by including `response` attribute on the serializer's
Meta class.
Example:
~~~~~~~
To achieve the Request/Response pattern shown above, we can do the
following:
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = ParentSerializer()
class Meta:
model = Child
http_toggle = {
'parent': {
('POST', 'PATCH'): (
serializers.PrimaryKeyRelatedField,
{"queryset": Parent.objects.all()}'
)
}
}
response = {
'parent': ParentSerializer
}
Now, all responses will include the full serialized `Parent` instance, while
we can still reference the `Parent` instance by PK for POST and PATCH
requests.
(4) Explicit Field Nesting
--------------------------
This implementation allows fields to be included or excluded based on
whether or not the serializer is nested inside of another serializer.
With this implementation, fields that are listed by the `nested_fields`
attribute of the serializer's Meta class will be the only fields included
if `nested=True` is provided to the serializer on initialization.
Example:
~~~~~~~
Returning to the above example, suppose that when we send a GET to either
"/children" or "/children/<pk>" that we want to include the full serialized
child. However, when we send a request to "/schools/<pk>/" or "/schools"
we want the serialized `School` to include a condensed version of the nested
`ChildSerializer`.
We can accomplish this by specifying the fields that we want to use in
a condensed form of the `ChildSerializer` by the `nested_fields` attribute
on the serializer's Meta class:
class ChildSerializer(EnhancedModelSerializer):
id = serializers.IntegerField()
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.EmailField()
class Meta:
model = Child
fields = ('id', 'first_name', 'last_name', 'email')
nested_fields = ('id', 'email')
class ParentSerializer(serializers.ModelSerializer):
child = ChildSerializer(nested=True)
id = serializers.IntegerField()
class Meta:
fields = ('id', 'child')
TODO:
----
While this serializer class is wildly useful, it's API can be improved
substantially.
"""
def __init__(self, *args, **kwargs):
self._response = kwargs.pop('response', False)
self._nested = kwargs.pop('nested', False)
self._expand = kwargs.pop('expand', [])
if not isinstance(self._expand, (bool, list, tuple)):
raise InvalidValue('expand', expected_type=(bool, list, tuple))
super().__init__(*args, **kwargs)
# Fields that depend on HTTP methods have the lowest precedence.
toggle_when_nested = getattr(
self.Meta, 'http_toggle_when_nested', False)
if (hasattr(self.Meta, 'http_toggle')
and (self._nested is False or toggle_when_nested)):
if not isinstance(self.Meta.http_toggle, dict):
raise InvalidMetaValue('http_toggle', expected_type=dict)
for field, config in self.Meta.http_toggle.items():
if not isinstance(config, dict):
raise InvalidMetaValue(
'http_toggle.<value>', expected_type=dict)
if field not in self.fields:
raise MissingSerializerField(field)
if self.context_request_method is not None:
definition = None
for k, v in config.items():
if isinstance(k, tuple):
if self.context_request_method.lower() in [
n.lower() for n in k]:
definition = v
break
elif isinstance(k, str):
if self.context_request_method.lower() == k.lower():
definition = v
break
else:
raise InvalidValue(
'http_toggle.<field>.<key>',
expected_type=(tuple, str)
)
if definition is not None:
self.fields[field] = self._instantiate_field(definition)
# Fields that are explicitly used to render responses take precedence
# over HTTP toggled field behaviors - but not behaviors that are
# controlled on instantiation of the serializer (collapsing, expanding,
# nesting).
response_fields = getattr(self.Meta, 'response', {})
if not isinstance(response_fields, dict):
raise InvalidMetaValue('response', expected_type=dict)
if self._response is True:
for k, v in response_fields.items():
if k not in self.fields:
raise MissingSerializerField(k)
self.fields[k] = self._instantiate_field(v)
# Fields that are included/excluded based on the nesting of the
# serializer take precedence over all.
nested_fields = getattr(self.Meta, 'nested_fields', [])
if not isinstance(nested_fields, (list, tuple)):
raise InvalidMetaValue(
'nested_fields', expected_type=(list, tuple))
if len(nested_fields) != 0 and self._nested is True:
new_fields = {}
for field_name in nested_fields:
if field_name not in self.fields:
raise MissingSerializerField(field_name)
new_fields[field_name] = self.fields[field_name]
self.fields = new_fields
# If fields are explicitly expanded, they take precedence over the
# field behavior defined that is dependent on the HTTP request
# method and field behavior that is defined from nesting.
expandable = getattr(self.Meta, 'expand', {})
if not isinstance(expandable, dict):
raise InvalidMetaValue('expand', expected_type=dict)
if (self._expand is True
or (isinstance(self._expand, (tuple, list))
and len(self._expand) != 0)):
for k, v in expandable.items():
if k not in self.fields:
raise MissingSerializerField(k)
if isinstance(self._expand, bool):
if self._expand is True:
self.fields[k] = self._instantiate_field(v)
elif k in self._expand:
self.fields[k] = self._instantiate_field(v)
def _import_serializer_cls(self, module_path):
module_name = ".".join(module_path.split(".")[:-1])
class_name = module_path.split(".")[-1]
module = importlib.import_module(module_name)
return getattr(module, class_name)
def _instantiate_field(self, definition):
# In the case that the serializer is provided by it's module path,
# to avoid circular imports, this method will dynamically import that
# serializer.
if not isinstance(definition, (list, tuple)):
serializer_cls = definition
if isinstance(definition, str):
serializer_cls = self._import_serializer_cls(definition)
return serializer_cls()
else:
if (len(definition) != 2
or not isinstance(definition[1], dict)):
raise Exception(
"Could not instantiate a serializer from the provided "
"definition."
)
serializer_cls = definition[0]
if isinstance(definition[0], str):
serializer_cls = self._import_serializer_cls(definition[0])
return serializer_cls(**definition[1])
@property
def context_request_method(self):
if 'request' in self.context:
return self.context['request'].method
return None
def to_representation(self, instance):
if not self._response and getattr(self.Meta, 'response', {}):
serializer = self.__class__(
instance,
response=True,
nested=self._nested,
expand=self._expand,
context=self.context,
)
return serializer.data
return super().to_representation(instance) | src/greenbudget/lib/rest_framework_utils/serializers.py | import importlib
from rest_framework import serializers
class InvalidValue(Exception):
def __init__(self, ref, expected_type=None):
message = "The value for %s is invalid." % ref
if expected_type is not None:
message += " Expected type or types %s." % expected_type
super().__init__(message)
class InvalidMetaValue(Exception):
def __init__(self, value, expected_type=None):
message = "Invalid value for %s on the serializer's Meta class." % value
if expected_type is not None:
message += " Expected type or types %s." % expected_type
super().__init__(message)
class MissingSerializerField(Exception):
def __init__(self, field):
message = (
"The field %s does not exist in the list of fields available "
"on the serializer." % field)
super().__init__(message)
class CannotExpandField(Exception):
def __init__(self, field):
message = "The field %s cannot be expanded" % field
super().__init__(message)
class PolymorphicNonPolymorphicSerializer(serializers.Serializer):
"""
A :obj:`rest_framework.serializers.Serializer` uses different serializers
depending on the instance being serialized.
The name of this :obj:`rest_framework.serializers.Serializer` is not a joke.
Typically, with Polymorphic serializers, the serializer will serialize
based on the type of instance where each instance that it can serialize
must be a child of a PolymorphicModel. Here, we loosen that requirement,
and allow the serializer to conditionally serialize an instance where that
instance need not be a child of a Polymorphic model.
"""
def _import_serializer_cls(self, module_path):
module_name = ".".join(module_path.split(".")[:-1])
class_name = module_path.split(".")[-1]
module = importlib.import_module(module_name)
return getattr(module, class_name)
def _configure_serializer_cls(self, value):
if isinstance(value, str):
cls = self._import_serializer_cls(value)
return self._configure_serializer_cls(cls)
assert issubclass(value, serializers.BaseSerializer), (
"%s is not a serializer class or a module path to a serializer "
"class." % value)
return value
def _find_config_for_instance(self, instance):
if not hasattr(self, "choices"):
raise Exception(
"Extensions of PolymorphicNonPolymorphicSerializer must define "
"a Meta attribute on the serializer with a `choices` attribute."
)
for k, v in self.choices.items():
if isinstance(instance, k):
return v
raise Exception(
"PolymorphicNonPolymorphicSerializer not configured to "
"serialize type %s." % type(instance)
)
def to_representation(self, instance):
config = self._find_config_for_instance(instance)
options = {}
instance_type = getattr(instance, "type", None)
if isinstance(config, (list, tuple)):
if len(config) != 1 and len(config) != 2 and len(config) != 3:
raise Exception("Invalid choice provided.")
serializer_cls = self._configure_serializer_cls(config[0])
if len(config) == 2:
if type(config[1]) is dict:
options = config[1]
else:
assert type(config[1]) is str, "Invalid choice."
instance_type = config[1]
elif len(config) == 3:
assert type(config[1]) is str, "Invalid choice."
assert type(config[2]) is dict, \
"Serializer keyword arguments must be a dict."
instance_type = config[1]
options = config[2]
else:
serializer_cls = self._configure_serializer_cls(config[0])
data = serializer_cls(instance, **options).data
data["type"] = instance_type
return data
class EnhancedModelSerializer(serializers.ModelSerializer):
"""
An extremely powerful extension of Django REST Framework's
:obj:`serializers.ModelSerializer` that provides additional useful behavior
for constructing tailored responses to fit the needs of the application.
The :obj:`EnhancedModelSerializer` provides (4) implementations that can
be used to fine tune the behavior of a single serializer to fit multiple
different use cases. These implementations are:
(1) HTTP Field Toggling
-----------------------
This implementation allows the definitions of already defined fields on
the serializer to change depending on the context's HTTP request method
that the serializer is being used for.
This is extremely important when we want to use the same serializer for
both read/write operations where the serializer has foreign key or M2M
relationships.
The behavior toggling can be defined by including a `http_toggle`
:obj:`dict` on the serializer's Meta class which instructs the serializer
how to change field behavior for the provided HTTP methods.
Example:
~~~~~~~
Let's assume with have a model `Child` and a model `Parent`, where `Child`
points to `Parent` by the means of a `ForeignKey` relationship:
class Child(db.Model):
name = models.CharField()
parent = models.ForeignKey(to=Parent, reverse_name="children")
class Parent(db.Model):
name = models.CharField()
Now, when we are sending PATCH/POST requests to either update a `Child`
instance or create a new `Child` instance, it is useful to specify the
`Parent` instance by it's Primary Key:
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": 1, "name": "Jack"}
However, when we want to send a GET request to either list all the instances
of `Child` or a single instance of `Child`, we want the parent to be
represented by a nested serializer.
This toggling can be accomplished by specifying the `http_toggle` attribute
on the associated serializer's Meta class to toggle the field to another
definition on specific HTTP requests:
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = ParentSerializer()
class Meta:
model = Child
http_toggle = {
'parent': {
('POST', 'PATCH'): (
serializers.PrimaryKeyRelatedField,
{"queryset": Parent.objects.all()}'
)
}
}
Now, when we send a POST/PATCH request to the endpoints associated with
the `Child` model, we can specify the `Parent` by it's primary key - but
still get the full serialized `Parent` on GET requests.
(2) Explicit Field Expansion
----------------------------
This implementation allows fields to be toggled in the presence of an
`expand` argument supplied to the serializer's __init__ method.
Example:
~~~~~~~
Returning to the above example, suppose that we want the default field
definition for `parent` on the `ChildSerializer` to be a
:obj:`serializers.PrimaryKeyRelatedField`, but want to use the nested
`ParentSerializer` in certain situations.
This toggling can be accomplished by specifying the `expand` attribute on
the associated serializer's Meta class to expand the field by using a new
field definition when the `expand=True` argument is supplied to the
serializer.
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = serializers.PrimaryKeyRelatedField(
queryset=Parent.objects.all(),
)
class Meta:
model = Child
expand = {
'parent': ParentSerializer
}
Now, when we reference the serializer as `ChildSerializer(expand=True)`,
the expanded fields will be used in place of the default fields.
class SchoolSerializer(serializers.ModelSerializer):
children = ChildSerializer(many=True, expand=True)
class Meta:
model = School
Note that we can also expand explicit fields, as shown here:
class SchoolSerializer(serializers.ModelSerializer):
children = ChildSerializer(many=True, expand=['parent'])
class Meta:
model = School
(3) Field Response Rendering
----------------------------
This implementation is critically important to developing an API response
contract that is consistent for a frontend client to use.
Returning to the example from implementation (1), we are using a
:obj:`serializers.PrimaryKeyRelatedField` to allow the creation/updating
of `Child` instances referencing a `Parent` instance by it's PK.
Implementation (1) allows us to toggle the field for read/write HTTP
methods, but it does not allow us to render a consistent response between
the two.
For example, if we send a POST request to "/children" with the JSON body
{ "parent": 1, "name": "Jack" }, the response of the POST request will
still reference the serialized form of the created `Child` as
{ "parent": 1, "name": "Jack" }.
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": 1, "name": "Jack"}
If we want the full serialized parent, we have to send a GET request to
"/children/<pk>".
In order to render consistent responses between the GET and POST/PATCH
methods, we can use this implementation to render the full serialized
`Parent` on responses of POST and PATCH methods:
Request: POST "/children" { "parent": 1, "name": "Jack" }
Response: 201 {"parent": {"id": 1, "name": "<NAME>." }, "name": "Jack"}
This can be done by identifying how fields should be handled for responses
of ALL request types by including `response` attribute on the serializer's
Meta class.
Example:
~~~~~~~
To achieve the Request/Response pattern shown above, we can do the
following:
class ParentSerializer(serializers.Serializer):
name = serializers.CharField()
class Meta:
model = Parent
fields = ('id', 'name')
class ChildSerializer(EnhancedModelSerializer):
name = serializers.CharField()
parent = ParentSerializer()
class Meta:
model = Child
http_toggle = {
'parent': {
('POST', 'PATCH'): (
serializers.PrimaryKeyRelatedField,
{"queryset": Parent.objects.all()}'
)
}
}
response = {
'parent': ParentSerializer
}
Now, all responses will include the full serialized `Parent` instance, while
we can still reference the `Parent` instance by PK for POST and PATCH
requests.
(4) Explicit Field Nesting
--------------------------
This implementation allows fields to be included or excluded based on
whether or not the serializer is nested inside of another serializer.
With this implementation, fields that are listed by the `nested_fields`
attribute of the serializer's Meta class will be the only fields included
if `nested=True` is provided to the serializer on initialization.
Example:
~~~~~~~
Returning to the above example, suppose that when we send a GET to either
"/children" or "/children/<pk>" that we want to include the full serialized
child. However, when we send a request to "/schools/<pk>/" or "/schools"
we want the serialized `School` to include a condensed version of the nested
`ChildSerializer`.
We can accomplish this by specifying the fields that we want to use in
a condensed form of the `ChildSerializer` by the `nested_fields` attribute
on the serializer's Meta class:
class ChildSerializer(EnhancedModelSerializer):
id = serializers.IntegerField()
first_name = serializers.CharField()
last_name = serializers.CharField()
email = serializers.EmailField()
class Meta:
model = Child
fields = ('id', 'first_name', 'last_name', 'email')
nested_fields = ('id', 'email')
class ParentSerializer(serializers.ModelSerializer):
child = ChildSerializer(nested=True)
id = serializers.IntegerField()
class Meta:
fields = ('id', 'child')
TODO:
----
While this serializer class is wildly useful, it's API can be improved
substantially.
"""
def __init__(self, *args, **kwargs):
self._response = kwargs.pop('response', False)
self._nested = kwargs.pop('nested', False)
self._expand = kwargs.pop('expand', [])
if not isinstance(self._expand, (bool, list, tuple)):
raise InvalidValue('expand', expected_type=(bool, list, tuple))
super().__init__(*args, **kwargs)
# Fields that depend on HTTP methods have the lowest precedence.
toggle_when_nested = getattr(
self.Meta, 'http_toggle_when_nested', False)
if (hasattr(self.Meta, 'http_toggle')
and (self._nested is False or toggle_when_nested)):
if not isinstance(self.Meta.http_toggle, dict):
raise InvalidMetaValue('http_toggle', expected_type=dict)
for field, config in self.Meta.http_toggle.items():
if not isinstance(config, dict):
raise InvalidMetaValue(
'http_toggle.<value>', expected_type=dict)
if field not in self.fields:
raise MissingSerializerField(field)
if self.context_request_method is not None:
definition = None
for k, v in config.items():
if isinstance(k, tuple):
if self.context_request_method.lower() in [
n.lower() for n in k]:
definition = v
break
elif isinstance(k, str):
if self.context_request_method.lower() == k.lower():
definition = v
break
else:
raise InvalidValue(
'http_toggle.<field>.<key>',
expected_type=(tuple, str)
)
if definition is not None:
self.fields[field] = self._instantiate_field(definition)
# Fields that are explicitly used to render responses take precedence
# over HTTP toggled field behaviors - but not behaviors that are
# controlled on instantiation of the serializer (collapsing, expanding,
# nesting).
response_fields = getattr(self.Meta, 'response', {})
if not isinstance(response_fields, dict):
raise InvalidMetaValue('response', expected_type=dict)
if self._response is True:
for k, v in response_fields.items():
if k not in self.fields:
raise MissingSerializerField(k)
self.fields[k] = self._instantiate_field(v)
# Fields that are included/excluded based on the nesting of the
# serializer take precedence over all.
nested_fields = getattr(self.Meta, 'nested_fields', [])
if not isinstance(nested_fields, (list, tuple)):
raise InvalidMetaValue(
'nested_fields', expected_type=(list, tuple))
if len(nested_fields) != 0 and self._nested is True:
new_fields = {}
for field_name in nested_fields:
if field_name not in self.fields:
raise MissingSerializerField(field_name)
new_fields[field_name] = self.fields[field_name]
self.fields = new_fields
# If fields are explicitly expanded, they take precedence over the
# field behavior defined that is dependent on the HTTP request
# method and field behavior that is defined from nesting.
expandable = getattr(self.Meta, 'expand', {})
if not isinstance(expandable, dict):
raise InvalidMetaValue('expand', expected_type=dict)
if (self._expand is True
or (isinstance(self._expand, (tuple, list))
and len(self._expand) != 0)):
for k, v in expandable.items():
if k not in self.fields:
raise MissingSerializerField(k)
if isinstance(self._expand, bool):
if self._expand is True:
self.fields[k] = self._instantiate_field(v)
elif k in self._expand:
self.fields[k] = self._instantiate_field(v)
def _import_serializer_cls(self, module_path):
module_name = ".".join(module_path.split(".")[:-1])
class_name = module_path.split(".")[-1]
module = importlib.import_module(module_name)
return getattr(module, class_name)
def _instantiate_field(self, definition):
# In the case that the serializer is provided by it's module path,
# to avoid circular imports, this method will dynamically import that
# serializer.
if not isinstance(definition, (list, tuple)):
serializer_cls = definition
if isinstance(definition, str):
serializer_cls = self._import_serializer_cls(definition)
return serializer_cls()
else:
if (len(definition) != 2
or not isinstance(definition[1], dict)):
raise Exception(
"Could not instantiate a serializer from the provided "
"definition."
)
serializer_cls = definition[0]
if isinstance(definition[0], str):
serializer_cls = self._import_serializer_cls(definition[0])
return serializer_cls(**definition[1])
@property
def context_request_method(self):
if 'request' in self.context:
return self.context['request'].method
return None
def to_representation(self, instance):
if not self._response and getattr(self.Meta, 'response', {}):
serializer = self.__class__(
instance,
response=True,
nested=self._nested,
expand=self._expand,
context=self.context,
)
return serializer.data
return super().to_representation(instance) | 0.901217 | 0.274218 |
from compiler import *
ui_strings = [
("music_volume", "Music Volume:"),
("sound_volume", "Sound Volume:"),
("mouse_sensitivity", "Mouse Sensitivity:"),
("invert_mouse_y_axis", "Invert Mouse Y Axis"),
("enabled", "Enabled"),
("disabled", "Disabled"),
("damage_to_player", "Damage to Player:"),
("reduced_to_1_over_4_easiest", "Reduced to 1/4 (Easiest)"),
("reduced_to_1_over_2_easy", "Reduced to 1/2 (Easy)"),
("damage_to_friends", "Damage to Friends:"),
("reduced_to_1_over_2_easiest", "Reduced to 1/2 (Easiest)"),
("reduced_to_3_over_4_easy", "Reduced to 3/4 (Easy)"),
("normal", "Normal"),
("combat_ai", "Combat AI:"),
("combat_speed", "Combat Speed:"),
("good", "Good"),
("average_caps", "Average"),
("poor", "Poor"),
("faster", "Faster"),
("slower", "Slower"),
("control_block_direction", "Control Block Direction:"),
("automatic_recommended", "Automatic"),
("manual_easy", "Manual (Easy)"),
("manual_hard", "Manual (Hard)"),
("by_mouse_movement", "By mouse movement"),
("control_attack_direction", "Control Attack Direction:"),
("lance_control", "Lance Control:"),
("by_relative_enemy_position", "By relative enemy position"),
("by_inverse_mouse_movement", "By inverse mouse movement"),
("battle_size", "Battle Size:"),
("show_attack_direction", "Show Attack Direction"),
("show_targeting_reticule", "Show Targeting Reticle"),
("show_names_of_friendly_troops", "Show Banners on Friendly Troops"),
("report_damage", "Report Damage"),
("report_shot_difficulty", "Report Shot Difficulty"),
("difficulty_rating_percentage", "Difficulty Rating = %d%%"),
("controls", "Controls"),
("video_options", "Video Options"),
("done", "Done"),
("factions", "Factions"),
("item_itemname", "Item - %s"),
("prop_propname", "Prop - %s"),
("unknown_unknownname", "Unknown - %s"),
("entry_point_entrypointname", "Entry Point %d"),
("passage_menu_item_passagename", "Passage (menu item %d)"),
("plant_plantname", "Plant - %s"),
("export_file_for_character_playername_already_exists_overwrite_it", "Export file for character %s already exists. Overwrite it?"),
("yes", "Yes"),
("no", "No"),
("set_save_file_name", "Enter a name for this save-game:"),
("enter_new_name", "Enter a new name:"),
("export_character", "Export Character"),
("import_character", "Import Character"),
("character_playername_exported_successfully", "Character %s exported successfully."),
("character_playername_imported_successfully", "Character %s imported successfully."),
("unable_to_open_import_file", "Unable to open import file."),
("are_you_sure_you_want_to_import_the_character", "Are you sure you want to import the character?"),
("unable_to_find_character_import_file", "Unable to find character import file."),
("mount_and_blade_is_running_in_trial_mode_please_buy_the_game_for_importing_a_character", "Mount&Blade is running in trial mode. Please buy the game for importing a character."),
("change_skin", "Skin"),
("change_hair", "Hair"),
("change_hair_color", "Hair Color"),
("change_beard", "Beard"),
("tutorial", "Tutorial"),
("tutorial_face_generator", "Adjust your character's face using the buttons and the sliders. To rotate the head, click on it and drag the mouse."),
("restore", "Load"),
("cancel", "Cancel"),
("delete", "Delete"),
("confirm_delete_game", "Are you sure you want to delete this game?"),
("error_removing_file", "Error removing file..."),
("day_datedisplay", "Day %d (%d:%d%d)"),
("reset_changes", "Reset Changes"),
("weapon_proficiencies", "Proficiencies"),
("skills", "Skills"),
("attributes", "Attributes"),
("enter_name_here", "*Enter Name Here*"),
("edit_face", "Click to edit face"),
("statistics", "Statistics"),
("next", "Next"),
("prev", "Prev"),
("learn", "Learn"),
("question_saving_policy", "What will the game's saving policy be?"),
("saving_policy_realistic", "Realistic! No quitting without saving!"),
("saving_policy_nonrealistic", "Allow me to quit without saving."),
("tutorial_character_generation", "Now enter your name and distribute your attribute, skill and weapon points. You can click on various elements on the screen to learn how each one will affect your character."),
("str", "STR"),
("agi", "AGI"),
("int", "INT"),
("cha", "CHA"),
("at_learning_limit", "(At learning limit)"),
("not_enough_skill_points_to_learn", "(Not enough skill points to learn)"),
("strength", "strength"),
("agility", "agility"),
("intelligence", "intelligence"),
("charisma", "charisma"),
("not_enough_attributetype_to_learn_this_skill", "(Not enough %s to learn this skill)"),
("explanation_one_handed_weapon", "Covers usage of one handed swords, axes and blunt weapons."),
("explanation_two_handed_weapon", "Covers usage of two handed swords, great axes and mauls."),
("explanation_polearm", "Covers usage of pole weapons like spears, lances, staffs, etc."),
("explanation_archery", "Covers usage of bows."),
("explanation_crossbow", "Covers usage of crossbows."),
("explanation_throwing", "Covers usage of thrown weapons like javelins, darts, stones etc."),
("explanation_firearms", "Covers usage of pistols and muskets."),
("explanation_strength", "Strength: Every point adds +1 to hit points. The following skills can not be developed beyond 1/3 of Strength: ironflesh, Power-strike, Power-throw, Power-draw."),
("explanation_agility", "Agility: Each point gives five weapon points and slightly increases movement speed. The following skills can not be developed beyond 1/3 of Agility: weapon-master, Shield, Athletics, Riding, Horse archery, Looting."),
("explanation_intelligence", "Intelligence: Every point to intelligence immediately gives one extra skill point. The following skills can not be developed beyond 1/3 of Intelligence: Trainer, Tracking, Tactics, Path finding, Spotting, Inventory Management, Wound treatment, Surgery, First-aid, Engineer, Persuasion."),
("explanation_charisma", "Charisma: Each point increases your party size limit by +1. The following skills can not be developed beyond 1/3 of Charisma: Prisoner Management, Leadership, Trade."),
("level", "Level: %d"),
("xp", "Experience: %d"),
("next_level_at", "Next level at: %d"),
("health_player", "Health: %d/%d"),
("health", "Health: %d"),
("attribute_points", "Attribute points: %d"),
("skill_points", "Skill points: %d"),
("weapon_points", "Weapon points: %d"),
("mission_losses_none", " none."),
("mission_losses_wounded", "wounded :"),
("mission_losses_killed", "killed :"),
("party_losses", "%s : %d wounded --- %d killed of %d."),
("casualties_sustained", "Casualties sustained:"),
("advantage_change", "Advantage change = %c%d "),
("overall_battle_casualties", "Overall battle causalties:"),
("advantage_outnumbered", " You are hopelessly outnumbered."),
("advantage_major_disadvantage", " You have a major disadvantage."),
("advantage_slight_disadvantage", " You are slightly disadvantaged."),
("advantage_balanced", " The situation is balanced."),
("advantage_fair_advantage", " You have a fair advantage for winning."),
("advantage_greatly_favored", " The odds of battle favor you greatly."),
("tactical_advantage", "Tactical advantage: %d (%s)"),
("order_group", "Order group:"),
("question_save_changes", "You have made changes to the objects. Do you want to save changes?"),
("yes_save", "Yes, save"),
("no_discard_changes", "No, discard changes"),
("everyone_control", "Everyone!"),
("everyone_around_control", "Nearby Soldiers!"),
("others_control", "Others!"),
("question_give_up_fight", "Give up the fight?"),
("give_up", "Give up"),
("keep_fighting", "Keep fighting"),
("question_leave_area", "Leave Area"),
("cant_retreat_there_are_enemies_nearby", "Can't retreat. There are enemies nearby!"),
("question_retreat_battle", "Retreat battle?"),
("retreated_battle", "%s has been routed."),
("retreated_battle", "%s has fled from the battlefield."),
("retreat", "Retreat"),
("talk", "Talk"),
("duel", "Duel"),
("mount", "Mount"),
("riding_skill_not_adequate_to_mount", "(Riding skill not adequate to mount)"),
("dismount", "Dismount"),
("exit", "Exit"),
("door_to", "Door to "),
("open", "Open"),
("equip", "Equip"),
("baggage", "Baggage"),
("access_inventory", "Access inventory"),
("chest", "Chest"),
("passage", "Passage"),
("go", "Go"),
("retreat_battle", "Retreat Battle"),
("leave_area", "Leave Area"),
("reports", "Reports"),
("camp", "Camp"),
("terrain", "Terrain"),
("quests", "Notes"),
("inventory", "Inventory"),
("character", "Character"),
("party", "Party"),
("paused", "Paused"),
("click_left_button_to_cancel_wait", "Waiting... (Left click to return)"),
("midnight", "Midnight"),
("late_night", "Late night"),
("dawn", "Dawn"),
("early_morning", "Early morning"),
("morning", "Morning"),
("noon", "Noon"),
("afternoon", "Afternoon"),
("late_afternoon", "Late afternoon"),
("dusk", "Dusk"),
("evening", "Evening"),
("midnight", "Midnight"),
("level_limit_reached", "Level Limit Reached!"),
("explanation_level_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to Level 8. In order to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("time_limit_reached", "Time Limit Reached!"),
("explanation_time_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to 30 game days. In oder to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("target_lost", "Target lost"),
("waiting", "Waiting."),
("travelling_to", "Travelling to "),
("following", "Following "),
("accompanying", "Accompanying "),
("running_from", "Running from "),
("patrolling", "Patrolling"),
("patrolling_around", "Patrolling around "),
("holding", "Holding"),
("travelling", "Travelling"),
("fighting_against", "Fighting against "),
("speed_equals", "Speed = %2.1f"),
("defenders", "Garrison:"),
("prisoners", "Prisoners:"),
("1_hour", "1 hour"),
("n_hours", "%d hours"),
("between_hours", "%d - %d hours"),
("combatants", "Combatants: %d"),
("party_size", "Party size: %d"),
("party_size_between", "Party size: %d - %d"),
("merchant", "Merchant"),
("return", "Return"),
("no_cost", "No cost"),
("rename", "Rename"),
("use", "Use"),
("destroy", "Destroy"),
("destructible_target", "Destructible target"),
("tutorial_inventory", "This is the trade screen. Hold down control key while clicking on an item to quickly purchase or sell it."),
("head_armor", "Head Armor: %d"),
("body_armor", "Body Armor: %d"),
("leg_armor", "Leg Armor: %d"),
("encumbrance", "Encumbrance: %2.1f"),
("you_dont_have_value", "You don't have %s."),
("merchant_cant_afford_value", "%s: I can't afford %s. I have only %s."),
("merchant_pay_whatever", "Allright, just pay whatever you can."),
("merchant_think_of_something_else", "Hmm. Let us think of something else."),
("dumping_value_items", "%d items will be permanently lost, are you sure?"),
("dumping_value_item", "One item will be permanently lost, are you sure?"),
("question_slaughter_food_and_eat", "Slaughter this %s and eat it?"),
("money_value", "Money: %s"),
("dump", "Discard"),
("outfit", "Outfit"),
("arms", "Arms"),
("horse", "Horse"),
("food", "Food"),
("reclaim_your_sold_goods", "Reclaim your sold goods before buying that!"),
("return_your_bought_goods", "Return your bought goods before selling that!"),
("polearm_no_shield", "Polearm (No shield)"),
("polearm", "Polearm"),
("two_handed", "Two-handed"),
("two_handed_one_handed", "Two-handed/One-handed"),
("one_handed", "One-handed"),
("return_price", "Return price: %d"),
("sell_price", "Sell price: %d"),
("reclaim_price", "Reclaim price: %d"),
("buying_price", "Buying price: %d"),
("default_item", "Default item"),
("buying_price_free", "Buying price: Free"),
("weight", "Weight: %2.1f"),
("plus_value_to_head_armor", "+%d to head armor"),
("plus_value_to_body_armor", "+%d to body armor"),
("plus_value_to_leg_armor", "+%d to leg armor"),
("swing", "Swing: %d%s"),
("damage", "Damage: %d%s"),
("thrust", "Thrust: %d%s"),
("accuracy", "Accuracy: %d"),
("speed_rating", "Speed rating: %d"),
("value_to_damage", "%c%d to damage"),
("value_to_morale", "+%1.1f to party morale"),
("resistance", "Resistance: %d"),
("size", "Size: %d"),
("weapon_reach", "Weapon reach: %d"),
("armor", "Armor: %d"),
("speed", "Speed: %d"),
("maneuver", "Maneuver: %d"),
("charge", "Charge: %d"),
("hit_points", "Hit Points: %d/%d"),
("requires_value_difficulty", "Requires %s: %d"),
("bonus_against_shields", "Bonus against shields"),
("cant_be_used_to_block", "Can't be used to block"),
("troop_cant_use_item", "%s: I can't use that item!"),
("notification_riding_skill_not_enough", "Your riding skill is not high enough to mount this horse."),
("notification_requirements_not_met", "You don't have the required skills or attributes for this weapon."),
("notification_payment_value", "You must pay %s."),
("notification_payment_receive_value", "You will receive %s."),
("one_handed_weapons", "One Handed Weapons"),
("two_handed_weapons", "Two Handed Weapons"),
("polearms", "Polearms"),
("archery", "Archery"),
("crossbows", "Crossbows"),
("throwing", "Throwing"),
("firearms", "Firearms"),
("reset", "Reset"),
("release_one", "Release one"),
("move_up", "Move Up"),
("move_down", " Move Down "),
("upgrade_one", "Upgrade one"),
("party_skills", "Party Skills"),
("morale", "Morale: %s"),
("terrible", "Terrible"),
("very_low", "Very low"),
("low", "Low"),
("below_average", "Below average"),
("average", "Average"),
("above_average", "Above average"),
("high", "High"),
("very_high", "Very high"),
("excellent", "Excellent"),
("starving", "Starving! %d%%"),
("weekly_cost_value", "Weekly cost: %s"),
("company", "Company: %d / %d"),
("prisoners_equal_value", "Prisoners: %d / %d"),
("choose_prisoners", "Choose Prisoners"),
("choose_companions", "Choose Companions"),
("rescued_prisoners", "Rescued Prisoners"),
("captured_enemies", "Captured Enemies"),
("disband", "Disband"),
("take_prisoner", "Take prisoner"),
("take_back", "Take back"),
("give", "Give"),
("take", "Take"),
("sell", "Sell"),
("hire", "Hire"),
("notification_cant_hire", "(Can't hire: not enough money)"),
("uncapture", "Release"),
("capture", "Capture"),
("party_capcity_reached", "(Party capacity reached)"),
("all", " all"),
("joining_cost_weekly_wage", "Joining cost: %d, Weekly wage: %d"),
("weekly_wage", "Weekly wage: %d denars"),
("price", "Price: %d"),
("number_ready_to_upgrade", "%d ready to be upgraded."),
("upgrade_to_value", " Upgrade to %s (%dd)"),
("notification_no_slot_for_upgrade", "No slot for upgrading to %s!"),
("shield_broken", "Shield broken."),
("shield_cracked", "Shield cracked."),
("shield_deformed", "Shield deformed."),
("you_hit_a_friendly_troop", "You hit a friendly troop!"),
("hit_shield_on_back", "Hit shield on back!"),
("delivered_couched_lance_damage", "Delivered couched lance damage!"),
("received_couched_lance_damage", "Received couched lance damage!"),
("speed_bonus_plus", "Speed bonus: +%d%%"),
("speed_bonus", "Speed bonus: %d%%"),
("cant_reload_this_weapon_on_horseback", "Can't reload this weapon on horseback."),
("no_more_bolts", "No more bolts..."),
("you_are_not_carrying_any_bolts", "You are not carrying any bolts."),
("no_more_arrows", "No more arrows..."),
("you_are_not_carrying_any_arrows", "You are not carrying any arrows."),
("head_shot", "Head shot!"),
("delivered_number_damage", "Delivered %d damage."),
("delivered_number_damage_to_horse", "Delivered %d damage to horse."),
("horse_charged_for_number_damage", "Horse charged for %d damage."),
("received_number_damage", "Received %d damage."),
("horse_received_number_damage", "Horse received %d damage."),
("value_killed_teammate", "%s has killed a teammate!"),
("horse_fell_dead", "Horse fell dead..."),
("horse_crippled", "Horse crippled..."),
("shot_difficulty", "Shot difficulty: %2.1f"),
("you_have_improved_your_proficiency_in_value_to_number", "You have improved your proficiency in %s to %d."),
("your_proficiency_in_value_has_improved_by_number_to_number", "Your proficiency in %s has improved by +%d to %d."),
("value_killed_by_value", "%s killed by %s."),
("value_fell_dead", "%s fell dead."),
("value_knocked_unconscious_by_value", "%s knocked unconscious by %s."),
("value_fell_unconscious", "%s fell unconscious."),
("troop_routed", "%s has been routed."),
("troop_panicked", "%s has panicked."),
("troop_fled", "%s has fled the battle."),
("you_got_number_experience", "You got %d experience."),
("you_have_advanced_to_level_number", "You have advanced to level %d."),
("value_has_advanced_to_level_number", "%s has advanced to level %d."),
("you_got_value", "You got %s."),
("new_quest_taken", "New quest taken: %s."),
("quest_completed_value", "Quest completed: %s."),
("quest_succeeded_value", "Quest succeeded: %s."),
("quest_failed_value", "Quest failed: %s."),
("quest_concluded_value", "Quest concluded: %s."),
("quest_cancelled_value", "Quest cancelled: %s."),
("lost_value", " (Lost: %s)"),
("items_lost", " (Items lost:"),
("party_has_nothing_to_eat", "Party has nothing to eat!"),
("days_training_is_complete", "Day's training is complete..."),
("total_experience_gained_through_training_number", "Total experience gained through training: %d"),
("some_soldiers_are_ready_to_upgrade", "Some soldiers are ready to upgrade."),
("number_of_companions_exceeds_leadership_limit", " Number of companions exceeds leadership limit."),
("number_of_prisoners_exceeds_prisoner_management_limit", " Number of prisoners exceeds prisoner management limit."),
("party_morale_is_low", " Party morale is low!"),
("and_one_space", " and"),
("has_deserted_the_party", " has deserted the party."),
("have_deserted_the_party", " have deserted the party."),
("weekly_report", "Weekly report"),
("shared_number_experience_within_party", "Shared %d experience within party."),
("got_item_value", "Got item: %s."),
("game_saved_successfully", "Game saved successfully."),
("autosaving", "Autosaving..."),
("quick_saving", "Quick-saving..."),
("cant_quick_save", "Can't Quick-save during battle..."),
("screenshot_taken_to_value", "Screenshot is saved to %s"),
("screenshot_failed", "Can't save screenshot."),
("value_joined_your_party", "%s joined your party."),
("value_joined_party_as_prisoner", "%s joined party as prisoner."),
("value_has_joined_party", "%s has joined party."),
("value_has_been_taken_prisoner", "%s has been taken prisoner."),
("value_left_the_party", "%s left the party."),
("number_values_left_the_party", "%d %s(s) left the party."),
("number_value_left_the_party", "%d %s left the party."),
("your_relations_with_value_has_improved_from_number_to_number", "Your relations with %s has improved from %d to %d."),
("your_relations_with_value_has_deteriorated_from_number_to_number", "Your relations with %s has deteriorated from %d to %d."),
("you_lost_value", "You lost %s."),
("lost_item_value", "Lost item: %s."),
("got_number_value", "Got %d %s."),
("lost_number_value", "Lost %d %s."),
("set_default_keys", "Set default keys"),
("undo_changes", "Undo changes"),
("press_a_key", "Press a key"),
("return_to_game", "Return to Game"),
("options", "Options"),
("save_and_exit", "Save & Exit"),
("save", "Save"),
("save_as", "Save As"),
("quit_without_saving", "Quit without Saving"),
("empty_slot", "Empty Slot"),
("game_saved", "Game saved..."),
("confirm_overwrite", "Savegame for %s will be overwritten. Are you sure?"),
("dynamic_lighting", "Dynamic Lighting"),
("character_shadows", "Character Shadows"),
("grass_density", "Grass Density:"),
("environment_shadows", "Environment Shadows"),
("realistic_shadows_on_plants", "Realistic Shadows on Plants:"),
("particle_systems", "Particle Systems"),
("gamma", "Monitor Gamma:"),
("character_detail", "Character Detail:"),
("character_shadow_detail", "Character Shadow Detail:"),
("blood_stains", "Blood Stains:"),
("on", "On"),
("off", "Off"),
("near_player_only", "Near player only"),
("default", "Default"),
("3d_grass", "3D Grass:"),
("number_of_ragdolls", "Number of Rag Dolls:"),
("number_of_corpses", "Number of Corpses:"),
("unlimited", "Unlimited"),
("anisotropic_filtering", "Anisotropic Filtering"),
("fast_water_reflection", "Fast Water Reflections"),
("maximum_framerate", "Max. Frame-rate:"),
("show_framerate", "Show Frame-rate:"),
("estimated_performance", "Estimated Performance: %d%%"),
("change_graphics_settings_explanation", "Some changes you have made will take effect when you enter a new area."),
("start_tutorial", "Play Tutorial"),
("start_a_new_game", "Start a New Game"),
("restore_a_saved_game", "Load Game"),
("exit_to_windows", "Exit"),
("credits", "Credits"),
("version_value", "v%s"),
("active_quests", "Active Quests"),
("finished_quests", "Finished Quests"),
("given_on_date", "Given on: %s"),
("days_since_given", "Days since given: %d"),
("quest_progression_number", "Quest progression: %d%%"),
("too_many_quests", "Too many quests"),
("ok", "OK"),
("move_forward", "Move Forward"),
("move_backward", "Move Backward"),
("move_left", "Move Left"),
("move_right", "Move Right"),
("action", "Action"),
("jump", "Jump"),
("attack", "Attack"),
("parry_then_attack", "Counter Attack"),
("defend", "Defend"),
("kick", "Kick"),
("equip_weapon_1", "Equip Item 1"),
("equip_weapon_2", "Equip Item 2"),
("equip_weapon_3", "Equip Item 3"),
("equip_weapon_4", "Equip Item 4"),
("equip_next_weapon", "Equip Next Weapon"),
("equip_next_shield", "Equip Next Shield"),
("sheath_weapon", "Sheath Weapon"),
("character_window", "Character Window"),
("inventory_window", "Inventory Window"),
("party_window", "Party Window"),
("quests_window", "Quests Window"),
("game_log_window", "Game Log Window"),
("leave_location_retreat", "Leave Location/Retreat"),
("zoom", "Zoom"),
("view_outfit", "View Outfit"),
("toggle_first_person_view", "Toggle First Person View"),
("view_orders", "View Orders"),
("quick_save", "Quick Save"),
("no_key_assigned", "No key assigned"),
("new_enemies_have_arrived", "New enemies have arrived."),
("reinforcements_have_arrived", "Reinforcements have arrived."),
("report_casualties", "Report Casualties"),
("report_experience", "Report Experience"),
("current_level_value", "Current Level: %d"),
("base_attribute_value", "Base Attribute: %s"),
("battle_controls", "Battle Controls"),
("map_controls", "Map Controls"),
("general_controls", "General Controls"),
("zoom_in", "Zoom In"),
("zoom_out", "Zoom Out"),
("wait", "Wait"),
("take_screenshot", "Take Screenshot"),
("randomize", "Randomize"),
("hint", "Hint"),
("press_left_mouse_button_to_continue", "Press left mouse button to continue..."),
("loot", "Loot"),
("chest", "Chest"),
("cut_short", "c"),
("pierce_short", "p"),
("blunt_short", "b"),
("battle", "Battle"),
("siege", "Siege"),
("troops", "Troops:"),
("loading_module_info_file", "Loading Module Info File..."),
("processing_ini_file", "Processing INI File..."),
("loading_music", "Loading Music..."),
("loading_data", "Loading Data..."),
("loading_setting_data", "Loading Setting Data..."),
("loading_textures", "Loading Textures..."),
("finished", "Finished."),
("creating_game", "Creating Game..."),
("loading_savegame_file", "Loading Savegame File..."),
("loading_map_file", "Loading Map File..."),
("initializing_map", "Initializing Map..."),
("launching_game", "Launching Game..."),
("capital_battle", "BATTLE:"),
("capital_versus", "--VERSUS--"),
("tracks", "Tracks"),
("battleground", "Battleground"),
("order_1", "Select Order 1"),
("order_2", "Select Order 2"),
("order_3", "Select Order 3"),
("order_4", "Select Order 4"),
("order_5", "Select Order 5"),
("order_6", "Select Order 6"),
("order_button_hold_this_position", "Hold this position"),
("order_button_follow_me", "Follow me"),
("order_button_charge", "Charge"),
("order_button_stand_ground", "Stand ground"),
("order_button_retreat", "Retreat"),
("order_button_advance", "Advance ten paces"),
("order_button_fall_back", "Fall back ten paces"),
("order_button_spread_out", "Spread out"),
("order_button_stand_closer", "Stand closer"),
("order_button_mount_horses", "Mount horses"),
("order_button_dismount", "Dismount"),
("order_button_hold_fire", "Hold your fire"),
("order_button_fire_at_will", "Fire at will"),
("order_button_use_blunt_weapons", "Use only blunt weapons"),
("order_button_use_any_weapon", "Use weapons at will"),
("order_button_movement_orders", "Movement orders"),
("order_button_formation_orders", "Formation orders"),
("order_button_fire_orders", "Fire orders"),
("follow_me_e_", "%s, follow me!"),
("charge_e_", "%s, charge!!!"),
("stand_ground_e_", "%s, stand ground!"),
("retreat_e_", "%s, retreat!"),
("mount_horses_e_", "%s, mount horses!"),
("dismount_e_", "%s, dismount!"),
("advance_e_", "%s, advance ten paces!"),
("fall_back_e_", "%s, fall back ten paces!"),
("stand_closer_e_", "%s, stand closer!"),
("spread_out_e_", "%s, spread out!"),
("use_blunt_weapons_e_", "%s, use only blunt weapons!"),
("use_any_weapon_e_", "%s, use weapons at will!"),
("hold_fire_e_", "%s, hold your fire!"),
("fire_at_will_e_", "%s, fire at will!"),
("hold_this_position_e_", "%s, hold this position!"),
("infantry", "Infantry"),
("archers", "Archers"),
("cavalry", "Cavalry"),
("companions", "Companions"),
("everyone_hear_me", "Everyone, hear me!"),
("everyone", "Everyone"),
("everyone_around_me", "Nearby Soldiers"),
("str_hear_me", "%s, hear me!"),
("str_and_str", "%s and %s"),
("str_comma_str", "%s, %s"),
("need_to_learn_prisoner_management", "You need to learn Prisoner Management skill in order to take prisoners."),
("game_log", "Game Log"),
("recent_messages", "Recent Messages"),
("custom_battle", "Custom Battle"),
("player", "Player"),
("value_denars", "%d denars"),
("back", "Back"),
("forward", "Forward"),
("display_on_map", "Show On Map"),
("info_pages", "Game Concepts"),
("troops2", "Characters"),
("locations", "Locations"),
("click_button_to_view_note", "Click on a link to view the notes"),
("this_page_contains_no_information", "This page contains no information"),
("other_pages_that_link_here", "Other pages that link here: "),
("report_is_value_days_old", " (Report is %d days old)"),
("report_is_current", " (Report is current)"),
("button_party_member_healthy_total", "%s (%d/%d)"),
("button_party_member_total", "%s (%d)"),
("button_party_member_hero_percentage_wounded", "%s (%d%% - Wounded)"),
("button_party_member_hero_percentage", "%s (%d%%)"),
("percentage_value", "%d%%"),
("full", "Full"),
("quick", "Quick"),
("none", "None"),
("change", "Change"),
("how_to_change", "How to change this?"),
("change_directx_explanation", "You can change the render method between DirectX 7 and DirectX 9 by clicking on the Configure button at the launch menu that comes up when you first start the game."),
("dropping_picking_up", "Dropping %s; picking up %s."),
("dropping", "Dropping %s."),
("picking_up", "Picking up %s."),
("unable_to_take", "Unable to take that."),
("age", "Age"),
("cannot_be_used_on_horseback", "Cannot be used on horseback"),
("enable_vertex_shaders2", "Render Method:"),
("screen_size2", "Screen Resolution:"),
("use_desktop_resolution2", "Use Desktop Resolution"),
("shadow_quality2", "Shadow Quality:"),
("m_low2", "Low"),
("m_high2", "High"),
("m_ultra_high2", "Ultra High"),
("off2", "Off"),
("group_header", "Class of troop"),
("group_rename", "Rename group"),
("group_1", "Infantry"),
("group_2", "Archers"),
("group_3", "Cavalry"),
("group_4", "Unnamed 1"),
("group_5", "Unnamed 2"),
("group_6", "Unnamed 3"),
("group_7", "Unnamed 4"),
("group_8", "Unnamed 5"),
("group_9", "Unnamed 6"),
("group_rename", "Rename Group"),
("group_close", "Close"),
("party_b_group_information", "%s belongs to %s group"),
("thrown_or_s", "Thrown/%s"),
("ranged_damage", "Ranged: %d%s"),
("overall_quality", "Overall Quality"),
("shader_quality", "Shader Quality:"),
("flora_lod_detail", "Tree Detail:"),
("flora_degrade_distance", "Tree Degrade Distance:"),
("antialiasing", "AntiAliasing:"),
("use_depth_effects", "Use Depth Effects"),
("hdr_mode", "HDR Mode:"),
("autoexpore", "Auto-exposure"),
("choose_profile", "Choose Profile"),
("create", "Create"),
("edit", "Edit"),
("join_game", "Join a Game"),
("host_game", "Host a Game"),
("custom", "Custom"),
("medium", "Medium"),
("male", "Male"),
("female", "Female"),
("gender", "Choose Gender:"),
("edit_profile", "Edit Profile"),
("new_profile", "New Profile"),
("enter_username", "Enter Username:"),
("invalid_username", "Usernames may only contain letters, numbers or _ - * [ ] ~ characters."),
("confirmation", "Are you sure?"),
("multiplayer", "Multiplayer"),
("server_name", "Server"),
("module_name", "Module"),
("game_type", "Game Type"),
("map_name", "Map"),
("ping", "Ping"),
("dedicated", "Dedicated"),
("number_of_players", "Players"),
("password_protected", "Password"),
("connect", "Connect"),
("local_area_network", "Local Area Network"),
("internet", "Internet"),
("favorites", "Favorites"),
("source", "Source:"),
("server_password", "Server Password:"),
("refresh", "Refresh"),
("start_search", "Start Search"),
("add_to_favorites", "Add to Favorites"),
("remove_from_favorites", "Remove from Favorites"),
("use_speedtree", "Use Speedtree"),
("use_instancing", "Use Instancing"),
("error", "Error"),
("error_server_full", "Server is full."),
("error_server_full_for_non_private", "Server is full for players without a private member password."),
("error_server_password_incorrect", "Incorrect password."),
("error_incorrect_serial", "Incorrect serial number."),
("error_incorrect_authorization_key", "Incorrect authorization key."),
("error_banned_from_server", "You are banned from this server."),
("error_username_taken", "Your profile name is used by another player."),
("error_authentication_failed", "Authentication failed."),
("unable_to_connect_to_server", "Unable to connect to server."),
("connection_to_server_is_lost", "Connection to server is lost."),
("kicked_from_server", "Kicked from server."),
("switch_to_module_question", "This server is running another module than the one you are currently running. Do you want Mount&Blade to switch to this module?"),
("download_module_question", "This server is running a module that is not installed on your computer. Would you like to visit the download site for this module now?"),
("download_mb_new_version_question", "This server is running a newer version (%d.%d%d%d) of Mount&Blade than the one you are currently running (%d.%d%d%d). Would you like to visit TaleWorlds download site now?"),
("download_mb_old_version_question", "This server is running an older version (%d.%d%d%d) of Mount&Blade and than the one you are currently running (%d.%d%d%d)."),
("download_module_new_version_question", "This server is running a newer version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d). Would you like to visit the download site for this module now?"),
("download_module_old_version_question", "This server is running an older version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d)."),
("authenticating_with_steam", "Authenticating with Steam..."),
("validating_serial_number", "Validating serial number..."),
("scanning_lan", "Scanning local area network..."),
("retrieving_servers", "Retrieving server list..."),
("shield_size2", "Size: %dx%d"),
("click_to_view_notes", "Click to view notes"),
("retrieving_server_infos", "Retrieving information from servers (%d)..."),
("connecting_to_server", "Connecting to server..."),
("requesting_to_join_the_game", "Requesting to join the game..."),
("loading", "Loading..."),
("group_value_control", "Group %d!"),
("drop_weapon", "Drop Weapon"),
("multiplayer_message_all", "Send Message to Everyone"),
("multiplayer_message_team", "Send Message to Team"),
("command_line", "Command Line"),
("use_ranged_weapon_as_melee", "Toggle Weapon Mode"),
("send_message_all", "Send Message to Everyone"),
("send_message_team", "Send Message to Team"),
("select", "Select"),
("context_menu", "Context Menu"),
("round_starts_in_value_seconds", "Round starts in %d seconds..."),
("watching_value", "Following %s"),
("capital_spec", "SPEC"),
("capital_dead", "DEAD"),
("instancing_error1", "Could not lock Instance Buffer (size: %d), Disabled mesh-instancing (Error Code: %d)"),
("instancing_error2", "Could not fit instanced objects, Disabled mesh-instancing"),
("by_keyboard", "By movement keys"),
("combat_speed_slowest", "Slowest"),
("combat_speed_slower", "Slower"),
("combat_speed_normal", "Normal"),
("combat_speed_faster", "Faster"),
("combat_speed_fastest", "Fastest"),
("module_newer_than_application", "The module you have selected requires a newer version of the game."),
("module_older_than_application", "The module you have selected requires an older version of the game."),
("unbalanced", "Unbalanced"),
("can_crush_through_blocks", "Can crush through blocks"),
("turn_camera_with_horse", "Turn Camera with Horse in First Person:"),
("widescreen_mode_on", "Multiple Screen Mode Enabled"),
("widescreen_mode_off", "Multiple Screen Mode Disabled"),
("notification_cant_upgrade", "(Can't upgrade: not enough money)"),
("turn_never", "Never"),
("turn_ranged_only", "Ranged only"),
("turn_melee_only", "Melee only"),
("turn_always", "Always"),
("general_options", "General Options"),
("vac_enabled", "Valve Anti Cheat Enabled"),
("campaign_ai", "Campaign AI:"),
("downloading_map", "Downloading map (%d KB)"),
("download_completed", "Download completed."),
("server_filter", "Server filter"),
("has_players", "Has players"),
("is_not_full", "Not full"),
("is_password_free", "No password"),
("native_only", "Native only"),
("ping_limit", "Ping limit"),
("filter_info", "%d games and %d players filtered"),
("is_version_compatible", "Compatible with module"),
("ttnet_account", "TTNET Oyun account"),
("username", "Username"),
("password", "Password"),
("error_incorrect_username_or_password", "Incorrect username or password"),
("validating_account", "Validating account..."),
("plase_enter_your_serial_key", "Please enter your serial key"),
("texture_detail2", "Texture Detail:"),
("antialiasing2", "Antialiasing:"),
("napoleonic_key_does_not_exist", "This mod requires the Napoleonic Wars DLC to play!"),
("delete_module_workshop", "Are you sure you want to unsubscribe from this module?"),
("delete_module", "Are you sure you want to delete the module?"),
("delete_native_module", "You cannot delete native mods."),
("incompatible_module", "This server is incompatible with your current module. You can use the configuration utility to change module."),
] | module_ui_strings.py | from compiler import *
ui_strings = [
("music_volume", "Music Volume:"),
("sound_volume", "Sound Volume:"),
("mouse_sensitivity", "Mouse Sensitivity:"),
("invert_mouse_y_axis", "Invert Mouse Y Axis"),
("enabled", "Enabled"),
("disabled", "Disabled"),
("damage_to_player", "Damage to Player:"),
("reduced_to_1_over_4_easiest", "Reduced to 1/4 (Easiest)"),
("reduced_to_1_over_2_easy", "Reduced to 1/2 (Easy)"),
("damage_to_friends", "Damage to Friends:"),
("reduced_to_1_over_2_easiest", "Reduced to 1/2 (Easiest)"),
("reduced_to_3_over_4_easy", "Reduced to 3/4 (Easy)"),
("normal", "Normal"),
("combat_ai", "Combat AI:"),
("combat_speed", "Combat Speed:"),
("good", "Good"),
("average_caps", "Average"),
("poor", "Poor"),
("faster", "Faster"),
("slower", "Slower"),
("control_block_direction", "Control Block Direction:"),
("automatic_recommended", "Automatic"),
("manual_easy", "Manual (Easy)"),
("manual_hard", "Manual (Hard)"),
("by_mouse_movement", "By mouse movement"),
("control_attack_direction", "Control Attack Direction:"),
("lance_control", "Lance Control:"),
("by_relative_enemy_position", "By relative enemy position"),
("by_inverse_mouse_movement", "By inverse mouse movement"),
("battle_size", "Battle Size:"),
("show_attack_direction", "Show Attack Direction"),
("show_targeting_reticule", "Show Targeting Reticle"),
("show_names_of_friendly_troops", "Show Banners on Friendly Troops"),
("report_damage", "Report Damage"),
("report_shot_difficulty", "Report Shot Difficulty"),
("difficulty_rating_percentage", "Difficulty Rating = %d%%"),
("controls", "Controls"),
("video_options", "Video Options"),
("done", "Done"),
("factions", "Factions"),
("item_itemname", "Item - %s"),
("prop_propname", "Prop - %s"),
("unknown_unknownname", "Unknown - %s"),
("entry_point_entrypointname", "Entry Point %d"),
("passage_menu_item_passagename", "Passage (menu item %d)"),
("plant_plantname", "Plant - %s"),
("export_file_for_character_playername_already_exists_overwrite_it", "Export file for character %s already exists. Overwrite it?"),
("yes", "Yes"),
("no", "No"),
("set_save_file_name", "Enter a name for this save-game:"),
("enter_new_name", "Enter a new name:"),
("export_character", "Export Character"),
("import_character", "Import Character"),
("character_playername_exported_successfully", "Character %s exported successfully."),
("character_playername_imported_successfully", "Character %s imported successfully."),
("unable_to_open_import_file", "Unable to open import file."),
("are_you_sure_you_want_to_import_the_character", "Are you sure you want to import the character?"),
("unable_to_find_character_import_file", "Unable to find character import file."),
("mount_and_blade_is_running_in_trial_mode_please_buy_the_game_for_importing_a_character", "Mount&Blade is running in trial mode. Please buy the game for importing a character."),
("change_skin", "Skin"),
("change_hair", "Hair"),
("change_hair_color", "Hair Color"),
("change_beard", "Beard"),
("tutorial", "Tutorial"),
("tutorial_face_generator", "Adjust your character's face using the buttons and the sliders. To rotate the head, click on it and drag the mouse."),
("restore", "Load"),
("cancel", "Cancel"),
("delete", "Delete"),
("confirm_delete_game", "Are you sure you want to delete this game?"),
("error_removing_file", "Error removing file..."),
("day_datedisplay", "Day %d (%d:%d%d)"),
("reset_changes", "Reset Changes"),
("weapon_proficiencies", "Proficiencies"),
("skills", "Skills"),
("attributes", "Attributes"),
("enter_name_here", "*Enter Name Here*"),
("edit_face", "Click to edit face"),
("statistics", "Statistics"),
("next", "Next"),
("prev", "Prev"),
("learn", "Learn"),
("question_saving_policy", "What will the game's saving policy be?"),
("saving_policy_realistic", "Realistic! No quitting without saving!"),
("saving_policy_nonrealistic", "Allow me to quit without saving."),
("tutorial_character_generation", "Now enter your name and distribute your attribute, skill and weapon points. You can click on various elements on the screen to learn how each one will affect your character."),
("str", "STR"),
("agi", "AGI"),
("int", "INT"),
("cha", "CHA"),
("at_learning_limit", "(At learning limit)"),
("not_enough_skill_points_to_learn", "(Not enough skill points to learn)"),
("strength", "strength"),
("agility", "agility"),
("intelligence", "intelligence"),
("charisma", "charisma"),
("not_enough_attributetype_to_learn_this_skill", "(Not enough %s to learn this skill)"),
("explanation_one_handed_weapon", "Covers usage of one handed swords, axes and blunt weapons."),
("explanation_two_handed_weapon", "Covers usage of two handed swords, great axes and mauls."),
("explanation_polearm", "Covers usage of pole weapons like spears, lances, staffs, etc."),
("explanation_archery", "Covers usage of bows."),
("explanation_crossbow", "Covers usage of crossbows."),
("explanation_throwing", "Covers usage of thrown weapons like javelins, darts, stones etc."),
("explanation_firearms", "Covers usage of pistols and muskets."),
("explanation_strength", "Strength: Every point adds +1 to hit points. The following skills can not be developed beyond 1/3 of Strength: ironflesh, Power-strike, Power-throw, Power-draw."),
("explanation_agility", "Agility: Each point gives five weapon points and slightly increases movement speed. The following skills can not be developed beyond 1/3 of Agility: weapon-master, Shield, Athletics, Riding, Horse archery, Looting."),
("explanation_intelligence", "Intelligence: Every point to intelligence immediately gives one extra skill point. The following skills can not be developed beyond 1/3 of Intelligence: Trainer, Tracking, Tactics, Path finding, Spotting, Inventory Management, Wound treatment, Surgery, First-aid, Engineer, Persuasion."),
("explanation_charisma", "Charisma: Each point increases your party size limit by +1. The following skills can not be developed beyond 1/3 of Charisma: Prisoner Management, Leadership, Trade."),
("level", "Level: %d"),
("xp", "Experience: %d"),
("next_level_at", "Next level at: %d"),
("health_player", "Health: %d/%d"),
("health", "Health: %d"),
("attribute_points", "Attribute points: %d"),
("skill_points", "Skill points: %d"),
("weapon_points", "Weapon points: %d"),
("mission_losses_none", " none."),
("mission_losses_wounded", "wounded :"),
("mission_losses_killed", "killed :"),
("party_losses", "%s : %d wounded --- %d killed of %d."),
("casualties_sustained", "Casualties sustained:"),
("advantage_change", "Advantage change = %c%d "),
("overall_battle_casualties", "Overall battle causalties:"),
("advantage_outnumbered", " You are hopelessly outnumbered."),
("advantage_major_disadvantage", " You have a major disadvantage."),
("advantage_slight_disadvantage", " You are slightly disadvantaged."),
("advantage_balanced", " The situation is balanced."),
("advantage_fair_advantage", " You have a fair advantage for winning."),
("advantage_greatly_favored", " The odds of battle favor you greatly."),
("tactical_advantage", "Tactical advantage: %d (%s)"),
("order_group", "Order group:"),
("question_save_changes", "You have made changes to the objects. Do you want to save changes?"),
("yes_save", "Yes, save"),
("no_discard_changes", "No, discard changes"),
("everyone_control", "Everyone!"),
("everyone_around_control", "Nearby Soldiers!"),
("others_control", "Others!"),
("question_give_up_fight", "Give up the fight?"),
("give_up", "Give up"),
("keep_fighting", "Keep fighting"),
("question_leave_area", "Leave Area"),
("cant_retreat_there_are_enemies_nearby", "Can't retreat. There are enemies nearby!"),
("question_retreat_battle", "Retreat battle?"),
("retreated_battle", "%s has been routed."),
("retreated_battle", "%s has fled from the battlefield."),
("retreat", "Retreat"),
("talk", "Talk"),
("duel", "Duel"),
("mount", "Mount"),
("riding_skill_not_adequate_to_mount", "(Riding skill not adequate to mount)"),
("dismount", "Dismount"),
("exit", "Exit"),
("door_to", "Door to "),
("open", "Open"),
("equip", "Equip"),
("baggage", "Baggage"),
("access_inventory", "Access inventory"),
("chest", "Chest"),
("passage", "Passage"),
("go", "Go"),
("retreat_battle", "Retreat Battle"),
("leave_area", "Leave Area"),
("reports", "Reports"),
("camp", "Camp"),
("terrain", "Terrain"),
("quests", "Notes"),
("inventory", "Inventory"),
("character", "Character"),
("party", "Party"),
("paused", "Paused"),
("click_left_button_to_cancel_wait", "Waiting... (Left click to return)"),
("midnight", "Midnight"),
("late_night", "Late night"),
("dawn", "Dawn"),
("early_morning", "Early morning"),
("morning", "Morning"),
("noon", "Noon"),
("afternoon", "Afternoon"),
("late_afternoon", "Late afternoon"),
("dusk", "Dusk"),
("evening", "Evening"),
("midnight", "Midnight"),
("level_limit_reached", "Level Limit Reached!"),
("explanation_level_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to Level 8. In order to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("time_limit_reached", "Time Limit Reached!"),
("explanation_time_limit", "Hail Adventurer, Mount&Blade has not been activated yet and is running in trial mode. In this mode, the game is limited to 30 game days. In oder to continue playing, please restart the game and activate it with your 16-digit serial key which is included in your boxed copy. After activating, you can continue playing right from here. Now, Mount&Blade will save your game and exit."),
("target_lost", "Target lost"),
("waiting", "Waiting."),
("travelling_to", "Travelling to "),
("following", "Following "),
("accompanying", "Accompanying "),
("running_from", "Running from "),
("patrolling", "Patrolling"),
("patrolling_around", "Patrolling around "),
("holding", "Holding"),
("travelling", "Travelling"),
("fighting_against", "Fighting against "),
("speed_equals", "Speed = %2.1f"),
("defenders", "Garrison:"),
("prisoners", "Prisoners:"),
("1_hour", "1 hour"),
("n_hours", "%d hours"),
("between_hours", "%d - %d hours"),
("combatants", "Combatants: %d"),
("party_size", "Party size: %d"),
("party_size_between", "Party size: %d - %d"),
("merchant", "Merchant"),
("return", "Return"),
("no_cost", "No cost"),
("rename", "Rename"),
("use", "Use"),
("destroy", "Destroy"),
("destructible_target", "Destructible target"),
("tutorial_inventory", "This is the trade screen. Hold down control key while clicking on an item to quickly purchase or sell it."),
("head_armor", "Head Armor: %d"),
("body_armor", "Body Armor: %d"),
("leg_armor", "Leg Armor: %d"),
("encumbrance", "Encumbrance: %2.1f"),
("you_dont_have_value", "You don't have %s."),
("merchant_cant_afford_value", "%s: I can't afford %s. I have only %s."),
("merchant_pay_whatever", "Allright, just pay whatever you can."),
("merchant_think_of_something_else", "Hmm. Let us think of something else."),
("dumping_value_items", "%d items will be permanently lost, are you sure?"),
("dumping_value_item", "One item will be permanently lost, are you sure?"),
("question_slaughter_food_and_eat", "Slaughter this %s and eat it?"),
("money_value", "Money: %s"),
("dump", "Discard"),
("outfit", "Outfit"),
("arms", "Arms"),
("horse", "Horse"),
("food", "Food"),
("reclaim_your_sold_goods", "Reclaim your sold goods before buying that!"),
("return_your_bought_goods", "Return your bought goods before selling that!"),
("polearm_no_shield", "Polearm (No shield)"),
("polearm", "Polearm"),
("two_handed", "Two-handed"),
("two_handed_one_handed", "Two-handed/One-handed"),
("one_handed", "One-handed"),
("return_price", "Return price: %d"),
("sell_price", "Sell price: %d"),
("reclaim_price", "Reclaim price: %d"),
("buying_price", "Buying price: %d"),
("default_item", "Default item"),
("buying_price_free", "Buying price: Free"),
("weight", "Weight: %2.1f"),
("plus_value_to_head_armor", "+%d to head armor"),
("plus_value_to_body_armor", "+%d to body armor"),
("plus_value_to_leg_armor", "+%d to leg armor"),
("swing", "Swing: %d%s"),
("damage", "Damage: %d%s"),
("thrust", "Thrust: %d%s"),
("accuracy", "Accuracy: %d"),
("speed_rating", "Speed rating: %d"),
("value_to_damage", "%c%d to damage"),
("value_to_morale", "+%1.1f to party morale"),
("resistance", "Resistance: %d"),
("size", "Size: %d"),
("weapon_reach", "Weapon reach: %d"),
("armor", "Armor: %d"),
("speed", "Speed: %d"),
("maneuver", "Maneuver: %d"),
("charge", "Charge: %d"),
("hit_points", "Hit Points: %d/%d"),
("requires_value_difficulty", "Requires %s: %d"),
("bonus_against_shields", "Bonus against shields"),
("cant_be_used_to_block", "Can't be used to block"),
("troop_cant_use_item", "%s: I can't use that item!"),
("notification_riding_skill_not_enough", "Your riding skill is not high enough to mount this horse."),
("notification_requirements_not_met", "You don't have the required skills or attributes for this weapon."),
("notification_payment_value", "You must pay %s."),
("notification_payment_receive_value", "You will receive %s."),
("one_handed_weapons", "One Handed Weapons"),
("two_handed_weapons", "Two Handed Weapons"),
("polearms", "Polearms"),
("archery", "Archery"),
("crossbows", "Crossbows"),
("throwing", "Throwing"),
("firearms", "Firearms"),
("reset", "Reset"),
("release_one", "Release one"),
("move_up", "Move Up"),
("move_down", " Move Down "),
("upgrade_one", "Upgrade one"),
("party_skills", "Party Skills"),
("morale", "Morale: %s"),
("terrible", "Terrible"),
("very_low", "Very low"),
("low", "Low"),
("below_average", "Below average"),
("average", "Average"),
("above_average", "Above average"),
("high", "High"),
("very_high", "Very high"),
("excellent", "Excellent"),
("starving", "Starving! %d%%"),
("weekly_cost_value", "Weekly cost: %s"),
("company", "Company: %d / %d"),
("prisoners_equal_value", "Prisoners: %d / %d"),
("choose_prisoners", "Choose Prisoners"),
("choose_companions", "Choose Companions"),
("rescued_prisoners", "Rescued Prisoners"),
("captured_enemies", "Captured Enemies"),
("disband", "Disband"),
("take_prisoner", "Take prisoner"),
("take_back", "Take back"),
("give", "Give"),
("take", "Take"),
("sell", "Sell"),
("hire", "Hire"),
("notification_cant_hire", "(Can't hire: not enough money)"),
("uncapture", "Release"),
("capture", "Capture"),
("party_capcity_reached", "(Party capacity reached)"),
("all", " all"),
("joining_cost_weekly_wage", "Joining cost: %d, Weekly wage: %d"),
("weekly_wage", "Weekly wage: %d denars"),
("price", "Price: %d"),
("number_ready_to_upgrade", "%d ready to be upgraded."),
("upgrade_to_value", " Upgrade to %s (%dd)"),
("notification_no_slot_for_upgrade", "No slot for upgrading to %s!"),
("shield_broken", "Shield broken."),
("shield_cracked", "Shield cracked."),
("shield_deformed", "Shield deformed."),
("you_hit_a_friendly_troop", "You hit a friendly troop!"),
("hit_shield_on_back", "Hit shield on back!"),
("delivered_couched_lance_damage", "Delivered couched lance damage!"),
("received_couched_lance_damage", "Received couched lance damage!"),
("speed_bonus_plus", "Speed bonus: +%d%%"),
("speed_bonus", "Speed bonus: %d%%"),
("cant_reload_this_weapon_on_horseback", "Can't reload this weapon on horseback."),
("no_more_bolts", "No more bolts..."),
("you_are_not_carrying_any_bolts", "You are not carrying any bolts."),
("no_more_arrows", "No more arrows..."),
("you_are_not_carrying_any_arrows", "You are not carrying any arrows."),
("head_shot", "Head shot!"),
("delivered_number_damage", "Delivered %d damage."),
("delivered_number_damage_to_horse", "Delivered %d damage to horse."),
("horse_charged_for_number_damage", "Horse charged for %d damage."),
("received_number_damage", "Received %d damage."),
("horse_received_number_damage", "Horse received %d damage."),
("value_killed_teammate", "%s has killed a teammate!"),
("horse_fell_dead", "Horse fell dead..."),
("horse_crippled", "Horse crippled..."),
("shot_difficulty", "Shot difficulty: %2.1f"),
("you_have_improved_your_proficiency_in_value_to_number", "You have improved your proficiency in %s to %d."),
("your_proficiency_in_value_has_improved_by_number_to_number", "Your proficiency in %s has improved by +%d to %d."),
("value_killed_by_value", "%s killed by %s."),
("value_fell_dead", "%s fell dead."),
("value_knocked_unconscious_by_value", "%s knocked unconscious by %s."),
("value_fell_unconscious", "%s fell unconscious."),
("troop_routed", "%s has been routed."),
("troop_panicked", "%s has panicked."),
("troop_fled", "%s has fled the battle."),
("you_got_number_experience", "You got %d experience."),
("you_have_advanced_to_level_number", "You have advanced to level %d."),
("value_has_advanced_to_level_number", "%s has advanced to level %d."),
("you_got_value", "You got %s."),
("new_quest_taken", "New quest taken: %s."),
("quest_completed_value", "Quest completed: %s."),
("quest_succeeded_value", "Quest succeeded: %s."),
("quest_failed_value", "Quest failed: %s."),
("quest_concluded_value", "Quest concluded: %s."),
("quest_cancelled_value", "Quest cancelled: %s."),
("lost_value", " (Lost: %s)"),
("items_lost", " (Items lost:"),
("party_has_nothing_to_eat", "Party has nothing to eat!"),
("days_training_is_complete", "Day's training is complete..."),
("total_experience_gained_through_training_number", "Total experience gained through training: %d"),
("some_soldiers_are_ready_to_upgrade", "Some soldiers are ready to upgrade."),
("number_of_companions_exceeds_leadership_limit", " Number of companions exceeds leadership limit."),
("number_of_prisoners_exceeds_prisoner_management_limit", " Number of prisoners exceeds prisoner management limit."),
("party_morale_is_low", " Party morale is low!"),
("and_one_space", " and"),
("has_deserted_the_party", " has deserted the party."),
("have_deserted_the_party", " have deserted the party."),
("weekly_report", "Weekly report"),
("shared_number_experience_within_party", "Shared %d experience within party."),
("got_item_value", "Got item: %s."),
("game_saved_successfully", "Game saved successfully."),
("autosaving", "Autosaving..."),
("quick_saving", "Quick-saving..."),
("cant_quick_save", "Can't Quick-save during battle..."),
("screenshot_taken_to_value", "Screenshot is saved to %s"),
("screenshot_failed", "Can't save screenshot."),
("value_joined_your_party", "%s joined your party."),
("value_joined_party_as_prisoner", "%s joined party as prisoner."),
("value_has_joined_party", "%s has joined party."),
("value_has_been_taken_prisoner", "%s has been taken prisoner."),
("value_left_the_party", "%s left the party."),
("number_values_left_the_party", "%d %s(s) left the party."),
("number_value_left_the_party", "%d %s left the party."),
("your_relations_with_value_has_improved_from_number_to_number", "Your relations with %s has improved from %d to %d."),
("your_relations_with_value_has_deteriorated_from_number_to_number", "Your relations with %s has deteriorated from %d to %d."),
("you_lost_value", "You lost %s."),
("lost_item_value", "Lost item: %s."),
("got_number_value", "Got %d %s."),
("lost_number_value", "Lost %d %s."),
("set_default_keys", "Set default keys"),
("undo_changes", "Undo changes"),
("press_a_key", "Press a key"),
("return_to_game", "Return to Game"),
("options", "Options"),
("save_and_exit", "Save & Exit"),
("save", "Save"),
("save_as", "Save As"),
("quit_without_saving", "Quit without Saving"),
("empty_slot", "Empty Slot"),
("game_saved", "Game saved..."),
("confirm_overwrite", "Savegame for %s will be overwritten. Are you sure?"),
("dynamic_lighting", "Dynamic Lighting"),
("character_shadows", "Character Shadows"),
("grass_density", "Grass Density:"),
("environment_shadows", "Environment Shadows"),
("realistic_shadows_on_plants", "Realistic Shadows on Plants:"),
("particle_systems", "Particle Systems"),
("gamma", "Monitor Gamma:"),
("character_detail", "Character Detail:"),
("character_shadow_detail", "Character Shadow Detail:"),
("blood_stains", "Blood Stains:"),
("on", "On"),
("off", "Off"),
("near_player_only", "Near player only"),
("default", "Default"),
("3d_grass", "3D Grass:"),
("number_of_ragdolls", "Number of Rag Dolls:"),
("number_of_corpses", "Number of Corpses:"),
("unlimited", "Unlimited"),
("anisotropic_filtering", "Anisotropic Filtering"),
("fast_water_reflection", "Fast Water Reflections"),
("maximum_framerate", "Max. Frame-rate:"),
("show_framerate", "Show Frame-rate:"),
("estimated_performance", "Estimated Performance: %d%%"),
("change_graphics_settings_explanation", "Some changes you have made will take effect when you enter a new area."),
("start_tutorial", "Play Tutorial"),
("start_a_new_game", "Start a New Game"),
("restore_a_saved_game", "Load Game"),
("exit_to_windows", "Exit"),
("credits", "Credits"),
("version_value", "v%s"),
("active_quests", "Active Quests"),
("finished_quests", "Finished Quests"),
("given_on_date", "Given on: %s"),
("days_since_given", "Days since given: %d"),
("quest_progression_number", "Quest progression: %d%%"),
("too_many_quests", "Too many quests"),
("ok", "OK"),
("move_forward", "Move Forward"),
("move_backward", "Move Backward"),
("move_left", "Move Left"),
("move_right", "Move Right"),
("action", "Action"),
("jump", "Jump"),
("attack", "Attack"),
("parry_then_attack", "Counter Attack"),
("defend", "Defend"),
("kick", "Kick"),
("equip_weapon_1", "Equip Item 1"),
("equip_weapon_2", "Equip Item 2"),
("equip_weapon_3", "Equip Item 3"),
("equip_weapon_4", "Equip Item 4"),
("equip_next_weapon", "Equip Next Weapon"),
("equip_next_shield", "Equip Next Shield"),
("sheath_weapon", "Sheath Weapon"),
("character_window", "Character Window"),
("inventory_window", "Inventory Window"),
("party_window", "Party Window"),
("quests_window", "Quests Window"),
("game_log_window", "Game Log Window"),
("leave_location_retreat", "Leave Location/Retreat"),
("zoom", "Zoom"),
("view_outfit", "View Outfit"),
("toggle_first_person_view", "Toggle First Person View"),
("view_orders", "View Orders"),
("quick_save", "Quick Save"),
("no_key_assigned", "No key assigned"),
("new_enemies_have_arrived", "New enemies have arrived."),
("reinforcements_have_arrived", "Reinforcements have arrived."),
("report_casualties", "Report Casualties"),
("report_experience", "Report Experience"),
("current_level_value", "Current Level: %d"),
("base_attribute_value", "Base Attribute: %s"),
("battle_controls", "Battle Controls"),
("map_controls", "Map Controls"),
("general_controls", "General Controls"),
("zoom_in", "Zoom In"),
("zoom_out", "Zoom Out"),
("wait", "Wait"),
("take_screenshot", "Take Screenshot"),
("randomize", "Randomize"),
("hint", "Hint"),
("press_left_mouse_button_to_continue", "Press left mouse button to continue..."),
("loot", "Loot"),
("chest", "Chest"),
("cut_short", "c"),
("pierce_short", "p"),
("blunt_short", "b"),
("battle", "Battle"),
("siege", "Siege"),
("troops", "Troops:"),
("loading_module_info_file", "Loading Module Info File..."),
("processing_ini_file", "Processing INI File..."),
("loading_music", "Loading Music..."),
("loading_data", "Loading Data..."),
("loading_setting_data", "Loading Setting Data..."),
("loading_textures", "Loading Textures..."),
("finished", "Finished."),
("creating_game", "Creating Game..."),
("loading_savegame_file", "Loading Savegame File..."),
("loading_map_file", "Loading Map File..."),
("initializing_map", "Initializing Map..."),
("launching_game", "Launching Game..."),
("capital_battle", "BATTLE:"),
("capital_versus", "--VERSUS--"),
("tracks", "Tracks"),
("battleground", "Battleground"),
("order_1", "Select Order 1"),
("order_2", "Select Order 2"),
("order_3", "Select Order 3"),
("order_4", "Select Order 4"),
("order_5", "Select Order 5"),
("order_6", "Select Order 6"),
("order_button_hold_this_position", "Hold this position"),
("order_button_follow_me", "Follow me"),
("order_button_charge", "Charge"),
("order_button_stand_ground", "Stand ground"),
("order_button_retreat", "Retreat"),
("order_button_advance", "Advance ten paces"),
("order_button_fall_back", "Fall back ten paces"),
("order_button_spread_out", "Spread out"),
("order_button_stand_closer", "Stand closer"),
("order_button_mount_horses", "Mount horses"),
("order_button_dismount", "Dismount"),
("order_button_hold_fire", "Hold your fire"),
("order_button_fire_at_will", "Fire at will"),
("order_button_use_blunt_weapons", "Use only blunt weapons"),
("order_button_use_any_weapon", "Use weapons at will"),
("order_button_movement_orders", "Movement orders"),
("order_button_formation_orders", "Formation orders"),
("order_button_fire_orders", "Fire orders"),
("follow_me_e_", "%s, follow me!"),
("charge_e_", "%s, charge!!!"),
("stand_ground_e_", "%s, stand ground!"),
("retreat_e_", "%s, retreat!"),
("mount_horses_e_", "%s, mount horses!"),
("dismount_e_", "%s, dismount!"),
("advance_e_", "%s, advance ten paces!"),
("fall_back_e_", "%s, fall back ten paces!"),
("stand_closer_e_", "%s, stand closer!"),
("spread_out_e_", "%s, spread out!"),
("use_blunt_weapons_e_", "%s, use only blunt weapons!"),
("use_any_weapon_e_", "%s, use weapons at will!"),
("hold_fire_e_", "%s, hold your fire!"),
("fire_at_will_e_", "%s, fire at will!"),
("hold_this_position_e_", "%s, hold this position!"),
("infantry", "Infantry"),
("archers", "Archers"),
("cavalry", "Cavalry"),
("companions", "Companions"),
("everyone_hear_me", "Everyone, hear me!"),
("everyone", "Everyone"),
("everyone_around_me", "Nearby Soldiers"),
("str_hear_me", "%s, hear me!"),
("str_and_str", "%s and %s"),
("str_comma_str", "%s, %s"),
("need_to_learn_prisoner_management", "You need to learn Prisoner Management skill in order to take prisoners."),
("game_log", "Game Log"),
("recent_messages", "Recent Messages"),
("custom_battle", "Custom Battle"),
("player", "Player"),
("value_denars", "%d denars"),
("back", "Back"),
("forward", "Forward"),
("display_on_map", "Show On Map"),
("info_pages", "Game Concepts"),
("troops2", "Characters"),
("locations", "Locations"),
("click_button_to_view_note", "Click on a link to view the notes"),
("this_page_contains_no_information", "This page contains no information"),
("other_pages_that_link_here", "Other pages that link here: "),
("report_is_value_days_old", " (Report is %d days old)"),
("report_is_current", " (Report is current)"),
("button_party_member_healthy_total", "%s (%d/%d)"),
("button_party_member_total", "%s (%d)"),
("button_party_member_hero_percentage_wounded", "%s (%d%% - Wounded)"),
("button_party_member_hero_percentage", "%s (%d%%)"),
("percentage_value", "%d%%"),
("full", "Full"),
("quick", "Quick"),
("none", "None"),
("change", "Change"),
("how_to_change", "How to change this?"),
("change_directx_explanation", "You can change the render method between DirectX 7 and DirectX 9 by clicking on the Configure button at the launch menu that comes up when you first start the game."),
("dropping_picking_up", "Dropping %s; picking up %s."),
("dropping", "Dropping %s."),
("picking_up", "Picking up %s."),
("unable_to_take", "Unable to take that."),
("age", "Age"),
("cannot_be_used_on_horseback", "Cannot be used on horseback"),
("enable_vertex_shaders2", "Render Method:"),
("screen_size2", "Screen Resolution:"),
("use_desktop_resolution2", "Use Desktop Resolution"),
("shadow_quality2", "Shadow Quality:"),
("m_low2", "Low"),
("m_high2", "High"),
("m_ultra_high2", "Ultra High"),
("off2", "Off"),
("group_header", "Class of troop"),
("group_rename", "Rename group"),
("group_1", "Infantry"),
("group_2", "Archers"),
("group_3", "Cavalry"),
("group_4", "Unnamed 1"),
("group_5", "Unnamed 2"),
("group_6", "Unnamed 3"),
("group_7", "Unnamed 4"),
("group_8", "Unnamed 5"),
("group_9", "Unnamed 6"),
("group_rename", "Rename Group"),
("group_close", "Close"),
("party_b_group_information", "%s belongs to %s group"),
("thrown_or_s", "Thrown/%s"),
("ranged_damage", "Ranged: %d%s"),
("overall_quality", "Overall Quality"),
("shader_quality", "Shader Quality:"),
("flora_lod_detail", "Tree Detail:"),
("flora_degrade_distance", "Tree Degrade Distance:"),
("antialiasing", "AntiAliasing:"),
("use_depth_effects", "Use Depth Effects"),
("hdr_mode", "HDR Mode:"),
("autoexpore", "Auto-exposure"),
("choose_profile", "Choose Profile"),
("create", "Create"),
("edit", "Edit"),
("join_game", "Join a Game"),
("host_game", "Host a Game"),
("custom", "Custom"),
("medium", "Medium"),
("male", "Male"),
("female", "Female"),
("gender", "Choose Gender:"),
("edit_profile", "Edit Profile"),
("new_profile", "New Profile"),
("enter_username", "Enter Username:"),
("invalid_username", "Usernames may only contain letters, numbers or _ - * [ ] ~ characters."),
("confirmation", "Are you sure?"),
("multiplayer", "Multiplayer"),
("server_name", "Server"),
("module_name", "Module"),
("game_type", "Game Type"),
("map_name", "Map"),
("ping", "Ping"),
("dedicated", "Dedicated"),
("number_of_players", "Players"),
("password_protected", "Password"),
("connect", "Connect"),
("local_area_network", "Local Area Network"),
("internet", "Internet"),
("favorites", "Favorites"),
("source", "Source:"),
("server_password", "Server Password:"),
("refresh", "Refresh"),
("start_search", "Start Search"),
("add_to_favorites", "Add to Favorites"),
("remove_from_favorites", "Remove from Favorites"),
("use_speedtree", "Use Speedtree"),
("use_instancing", "Use Instancing"),
("error", "Error"),
("error_server_full", "Server is full."),
("error_server_full_for_non_private", "Server is full for players without a private member password."),
("error_server_password_incorrect", "Incorrect password."),
("error_incorrect_serial", "Incorrect serial number."),
("error_incorrect_authorization_key", "Incorrect authorization key."),
("error_banned_from_server", "You are banned from this server."),
("error_username_taken", "Your profile name is used by another player."),
("error_authentication_failed", "Authentication failed."),
("unable_to_connect_to_server", "Unable to connect to server."),
("connection_to_server_is_lost", "Connection to server is lost."),
("kicked_from_server", "Kicked from server."),
("switch_to_module_question", "This server is running another module than the one you are currently running. Do you want Mount&Blade to switch to this module?"),
("download_module_question", "This server is running a module that is not installed on your computer. Would you like to visit the download site for this module now?"),
("download_mb_new_version_question", "This server is running a newer version (%d.%d%d%d) of Mount&Blade than the one you are currently running (%d.%d%d%d). Would you like to visit TaleWorlds download site now?"),
("download_mb_old_version_question", "This server is running an older version (%d.%d%d%d) of Mount&Blade and than the one you are currently running (%d.%d%d%d)."),
("download_module_new_version_question", "This server is running a newer version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d). Would you like to visit the download site for this module now?"),
("download_module_old_version_question", "This server is running an older version (%d.%d%d%d) of the current module than the one you are running (%d.%d%d%d)."),
("authenticating_with_steam", "Authenticating with Steam..."),
("validating_serial_number", "Validating serial number..."),
("scanning_lan", "Scanning local area network..."),
("retrieving_servers", "Retrieving server list..."),
("shield_size2", "Size: %dx%d"),
("click_to_view_notes", "Click to view notes"),
("retrieving_server_infos", "Retrieving information from servers (%d)..."),
("connecting_to_server", "Connecting to server..."),
("requesting_to_join_the_game", "Requesting to join the game..."),
("loading", "Loading..."),
("group_value_control", "Group %d!"),
("drop_weapon", "Drop Weapon"),
("multiplayer_message_all", "Send Message to Everyone"),
("multiplayer_message_team", "Send Message to Team"),
("command_line", "Command Line"),
("use_ranged_weapon_as_melee", "Toggle Weapon Mode"),
("send_message_all", "Send Message to Everyone"),
("send_message_team", "Send Message to Team"),
("select", "Select"),
("context_menu", "Context Menu"),
("round_starts_in_value_seconds", "Round starts in %d seconds..."),
("watching_value", "Following %s"),
("capital_spec", "SPEC"),
("capital_dead", "DEAD"),
("instancing_error1", "Could not lock Instance Buffer (size: %d), Disabled mesh-instancing (Error Code: %d)"),
("instancing_error2", "Could not fit instanced objects, Disabled mesh-instancing"),
("by_keyboard", "By movement keys"),
("combat_speed_slowest", "Slowest"),
("combat_speed_slower", "Slower"),
("combat_speed_normal", "Normal"),
("combat_speed_faster", "Faster"),
("combat_speed_fastest", "Fastest"),
("module_newer_than_application", "The module you have selected requires a newer version of the game."),
("module_older_than_application", "The module you have selected requires an older version of the game."),
("unbalanced", "Unbalanced"),
("can_crush_through_blocks", "Can crush through blocks"),
("turn_camera_with_horse", "Turn Camera with Horse in First Person:"),
("widescreen_mode_on", "Multiple Screen Mode Enabled"),
("widescreen_mode_off", "Multiple Screen Mode Disabled"),
("notification_cant_upgrade", "(Can't upgrade: not enough money)"),
("turn_never", "Never"),
("turn_ranged_only", "Ranged only"),
("turn_melee_only", "Melee only"),
("turn_always", "Always"),
("general_options", "General Options"),
("vac_enabled", "Valve Anti Cheat Enabled"),
("campaign_ai", "Campaign AI:"),
("downloading_map", "Downloading map (%d KB)"),
("download_completed", "Download completed."),
("server_filter", "Server filter"),
("has_players", "Has players"),
("is_not_full", "Not full"),
("is_password_free", "No password"),
("native_only", "Native only"),
("ping_limit", "Ping limit"),
("filter_info", "%d games and %d players filtered"),
("is_version_compatible", "Compatible with module"),
("ttnet_account", "TTNET Oyun account"),
("username", "Username"),
("password", "Password"),
("error_incorrect_username_or_password", "Incorrect username or password"),
("validating_account", "Validating account..."),
("plase_enter_your_serial_key", "Please enter your serial key"),
("texture_detail2", "Texture Detail:"),
("antialiasing2", "Antialiasing:"),
("napoleonic_key_does_not_exist", "This mod requires the Napoleonic Wars DLC to play!"),
("delete_module_workshop", "Are you sure you want to unsubscribe from this module?"),
("delete_module", "Are you sure you want to delete the module?"),
("delete_native_module", "You cannot delete native mods."),
("incompatible_module", "This server is incompatible with your current module. You can use the configuration utility to change module."),
] | 0.441914 | 0.283174 |
from enum import IntEnum
import struct
from matplotlib.patches import Ellipse
import numpy as np
class Options(IntEnum):
"""
Option flags for ROIs.
"""
SPLINE_FIT = 1
DOUBLE_HEADED = 2
OUTLINE = 4
OVERLAY_LABELS = 8
OVERLAY_NAMES = 16
OVERLAY_BACKGROUNDS = 32
OVERLAY_BOLD = 64
SUB_PIXEL_RESOLUTION = 128
DRAW_OFFSET = 256
def encode_roi(roi_type, points, file_handler):
"""
Function which encodes a single ImageJ ROI.
The following ROI types are supported:
===== =========== =========
Type Description Supported
===== =========== =========
0 Polygon Yes
1 Rect Yes
2 Oval Yes
3 Line Yes
4 Freeline Yes
5 Polyline No
6 NoRoi No
7 Freehand Yes
8 Traced No
9 Angle No
10 Point No
===== =========== =========
.. note::
This function does not support any options.
:param roi_type: The roi type
:param points: The xy coordinates
:param file_handler: A file-like object
"""
points = np.asarray(points).astype(np.int32)
y_coords = points[:, 0]
x_coords = points[:, 1]
n_coords = 0
top, left, bottom, right = 0, 0, 0, 0
x1_coord, y1_coord, x2_coord, y2_coord = 0, 0, 0, 0
if roi_type in [0, 4, 7]:
n_coords = len(points)
elif roi_type == 1:
top, bottom = np.min(y_coords), np.max(y_coords)
left, right = np.min(x_coords), np.max(x_coords)
y_coords = []
x_coords = []
elif roi_type == 2:
bottom, top = np.min(y_coords), np.max(y_coords)
left, right = np.min(x_coords), np.max(x_coords)
y_coords = []
x_coords = []
elif roi_type == 3:
y1_coord, y2_coord = y_coords
x1_coord, x2_coord = x_coords
y_coords = []
x_coords = []
data = struct.pack(
'>4shcchhhhhffffhiiihhcchii%sh' % (n_coords * 2),
b'Iout', # magic number
227, # version
bytes([roi_type]), # roi type
b'0',
top, # top
left, # left
bottom, # bottom
right, # right
n_coords, # NCoordinates
x1_coord, # x1 (straight line) | x (double rect) | size (npoints)
y1_coord, # y1 (straight line) | y (double rect) | size (npoints)
x2_coord, # x2 (straight line) | width (double rect) | size (npoints)
y2_coord, # y2 (straight line) | height (double rect) | size (npoints)
0, # stroke width
0, # ShapeRoi size
0, # stroke color
0, # fill color
0, # subtype
0, # options
bytes([0]), # arrow style or aspect ratio
bytes([0]), # arrow head size
0, # rounded rect arc size
0, # position
0, # header2 offset
*x_coords,
*y_coords
)
file_handler.write(data)
def decode_roi(file_handler):
# pylint: disable=R0914,W0612
"""
Function which decodes a single ImageJ ROI.
The following ROI types are supported:
===== ======== =========
ID Type Supported
===== ======== =========
0 Polygon Yes
1 Rect Yes
2 Oval Yes
3 Line Yes
4 Freeline Yes
5 Polyline No
6 NoRoi No
7 Freehand Yes
8 Traced No
9 Angle No
10 Point No
===== ======== =========
.. note::
This function only supports the SUB_PIXEL_RESOLUTION option.
:param file_handler: A file-like object
:return: (RoiType, Points) tuple
"""
(
magic, version, roi_type, _, top, left, bottom, right, n_coords,
x1_coord, y1_coord, x2_coord, y2_coord,
stroke_width, shape_roi_size, stroke_color, fill_color, subtype, options,
arrow_style, arrow_head_size, rect_arc_size, position, header2_offset
) = struct.unpack('>4shcchhhhhffffhiiihhcchii', file_handler.read(64))
roi_type = ord(roi_type)
if magic != b'Iout':
raise IOError('Magic number not found')
if not 0 <= roi_type < 11:
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if roi_type not in [0, 1, 2, 3, 4, 7]:
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if shape_roi_size > 0:
raise ValueError(
'roireader: Shape ROI size {} not supported (!= 0)'.format(shape_roi_size)
)
if subtype != 0:
raise ValueError('roireader: ROI subtype {} not supported (!= 0)'.format(subtype))
if options & Options.SUB_PIXEL_RESOLUTION:
cformat = '>%sf' % n_coords
ctype = np.float32
else:
cformat = '>%sh' % n_coords
ctype = np.int16
if roi_type in [0, 4, 7]: # polygon, freeline, freehand
points = np.empty((n_coords, 2), dtype=ctype)
points[:, 1] = struct.unpack(cformat, file_handler.read(n_coords * 2))
points[:, 0] = struct.unpack(cformat, file_handler.read(n_coords * 2))
points[:, 1] += left
points[:, 0] += top
elif roi_type == 1: # rect
points = np.empty((4, 2), dtype=ctype)
points[0, :] = (top, left)
points[1, :] = (top, right)
points[2, :] = (bottom, right)
points[3, :] = (bottom, left)
elif roi_type == 2: # oval
height = bottom - top
width = right - left
points = Ellipse(
(top + (height // 2), left + (width // 2)), height, width
).get_verts()
elif roi_type == 3: # line
points = np.empty((2, 2), dtype=ctype)
points[0, :] = (y1_coord, x1_coord)
points[1, :] = (y2_coord, x2_coord)
return (roi_type, points) | src/napari_ijroi_reader/ijroi_utils.py | from enum import IntEnum
import struct
from matplotlib.patches import Ellipse
import numpy as np
class Options(IntEnum):
"""
Option flags for ROIs.
"""
SPLINE_FIT = 1
DOUBLE_HEADED = 2
OUTLINE = 4
OVERLAY_LABELS = 8
OVERLAY_NAMES = 16
OVERLAY_BACKGROUNDS = 32
OVERLAY_BOLD = 64
SUB_PIXEL_RESOLUTION = 128
DRAW_OFFSET = 256
def encode_roi(roi_type, points, file_handler):
"""
Function which encodes a single ImageJ ROI.
The following ROI types are supported:
===== =========== =========
Type Description Supported
===== =========== =========
0 Polygon Yes
1 Rect Yes
2 Oval Yes
3 Line Yes
4 Freeline Yes
5 Polyline No
6 NoRoi No
7 Freehand Yes
8 Traced No
9 Angle No
10 Point No
===== =========== =========
.. note::
This function does not support any options.
:param roi_type: The roi type
:param points: The xy coordinates
:param file_handler: A file-like object
"""
points = np.asarray(points).astype(np.int32)
y_coords = points[:, 0]
x_coords = points[:, 1]
n_coords = 0
top, left, bottom, right = 0, 0, 0, 0
x1_coord, y1_coord, x2_coord, y2_coord = 0, 0, 0, 0
if roi_type in [0, 4, 7]:
n_coords = len(points)
elif roi_type == 1:
top, bottom = np.min(y_coords), np.max(y_coords)
left, right = np.min(x_coords), np.max(x_coords)
y_coords = []
x_coords = []
elif roi_type == 2:
bottom, top = np.min(y_coords), np.max(y_coords)
left, right = np.min(x_coords), np.max(x_coords)
y_coords = []
x_coords = []
elif roi_type == 3:
y1_coord, y2_coord = y_coords
x1_coord, x2_coord = x_coords
y_coords = []
x_coords = []
data = struct.pack(
'>4shcchhhhhffffhiiihhcchii%sh' % (n_coords * 2),
b'Iout', # magic number
227, # version
bytes([roi_type]), # roi type
b'0',
top, # top
left, # left
bottom, # bottom
right, # right
n_coords, # NCoordinates
x1_coord, # x1 (straight line) | x (double rect) | size (npoints)
y1_coord, # y1 (straight line) | y (double rect) | size (npoints)
x2_coord, # x2 (straight line) | width (double rect) | size (npoints)
y2_coord, # y2 (straight line) | height (double rect) | size (npoints)
0, # stroke width
0, # ShapeRoi size
0, # stroke color
0, # fill color
0, # subtype
0, # options
bytes([0]), # arrow style or aspect ratio
bytes([0]), # arrow head size
0, # rounded rect arc size
0, # position
0, # header2 offset
*x_coords,
*y_coords
)
file_handler.write(data)
def decode_roi(file_handler):
# pylint: disable=R0914,W0612
"""
Function which decodes a single ImageJ ROI.
The following ROI types are supported:
===== ======== =========
ID Type Supported
===== ======== =========
0 Polygon Yes
1 Rect Yes
2 Oval Yes
3 Line Yes
4 Freeline Yes
5 Polyline No
6 NoRoi No
7 Freehand Yes
8 Traced No
9 Angle No
10 Point No
===== ======== =========
.. note::
This function only supports the SUB_PIXEL_RESOLUTION option.
:param file_handler: A file-like object
:return: (RoiType, Points) tuple
"""
(
magic, version, roi_type, _, top, left, bottom, right, n_coords,
x1_coord, y1_coord, x2_coord, y2_coord,
stroke_width, shape_roi_size, stroke_color, fill_color, subtype, options,
arrow_style, arrow_head_size, rect_arc_size, position, header2_offset
) = struct.unpack('>4shcchhhhhffffhiiihhcchii', file_handler.read(64))
roi_type = ord(roi_type)
if magic != b'Iout':
raise IOError('Magic number not found')
if not 0 <= roi_type < 11:
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if roi_type not in [0, 1, 2, 3, 4, 7]:
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if shape_roi_size > 0:
raise ValueError(
'roireader: Shape ROI size {} not supported (!= 0)'.format(shape_roi_size)
)
if subtype != 0:
raise ValueError('roireader: ROI subtype {} not supported (!= 0)'.format(subtype))
if options & Options.SUB_PIXEL_RESOLUTION:
cformat = '>%sf' % n_coords
ctype = np.float32
else:
cformat = '>%sh' % n_coords
ctype = np.int16
if roi_type in [0, 4, 7]: # polygon, freeline, freehand
points = np.empty((n_coords, 2), dtype=ctype)
points[:, 1] = struct.unpack(cformat, file_handler.read(n_coords * 2))
points[:, 0] = struct.unpack(cformat, file_handler.read(n_coords * 2))
points[:, 1] += left
points[:, 0] += top
elif roi_type == 1: # rect
points = np.empty((4, 2), dtype=ctype)
points[0, :] = (top, left)
points[1, :] = (top, right)
points[2, :] = (bottom, right)
points[3, :] = (bottom, left)
elif roi_type == 2: # oval
height = bottom - top
width = right - left
points = Ellipse(
(top + (height // 2), left + (width // 2)), height, width
).get_verts()
elif roi_type == 3: # line
points = np.empty((2, 2), dtype=ctype)
points[0, :] = (y1_coord, x1_coord)
points[1, :] = (y2_coord, x2_coord)
return (roi_type, points) | 0.725843 | 0.335596 |
import optparse
import os
import sys
import tempfile
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build/android/gyp/util'))
import build_utils
JAVA_PACKAGE_PREFIX = 'org/chromium/'
def JarSources(src_dir, src_files, jar_path):
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.src_dir so the .java file paths in the jar are correct.
jar_cwd = src_dir
jar_path = os.path.abspath(jar_path)
if os.path.exists(jar_path):
jar_cmd = ['jar', 'uf', jar_path]
else:
jar_cmd = ['jar', 'cf', jar_path]
jar_cmd.extend(src_files)
build_utils.CheckOutput(jar_cmd, cwd=jar_cwd)
# Uncompress source jars so that they can be combined with other sources
def UnzipSourceJar(jar, unzipped_jar_path):
if os.path.exists(jar):
jar_cmd = ['jar', 'xf', os.path.abspath(jar)]
build_utils.CheckOutput(jar_cmd, cwd=unzipped_jar_path)
else:
raise Exception('Jar file does not exist %s' % jar)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--src-search-dirs', action="append",
help='A list of directories that should be searched'
' for the source files.')
parser.add_option('--src-files', action="append",
help='A list of source files to jar.')
parser.add_option('--src-jars', action="append",
help='A list of source jars to include in addition to source files.')
parser.add_option('--src-list-files', action="append",
help='A list of files that contain a list of sources,'
' e.g. a list of \'.sources\' files generated by GN.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
# A temporary directory to put the output of jar files.
unzipped_jar_path = None
if options.src_jars:
unzipped_jar_path = tempfile.mkdtemp(dir=os.path.dirname(options.jar_path))
jar_list = []
for gn_list in options.src_jars:
jar_list.extend(build_utils.ParseGnList(gn_list))
for jar in jar_list:
UnzipSourceJar(jar, unzipped_jar_path)
src_search_dirs = []
for gn_src_search_dirs in options.src_search_dirs:
src_search_dirs.extend(build_utils.ParseGnList(gn_src_search_dirs))
src_list_files = []
if options.src_list_files:
for gn_src_list_file in options.src_list_files:
src_list_files.extend(build_utils.ParseGnList(gn_src_list_file))
src_files = []
for gn_src_files in options.src_files:
src_files.extend(build_utils.ParseGnList(gn_src_files))
# Add files from --source_list_files
for src_list_file in src_list_files:
with open(src_list_file, 'r') as f:
src_files.extend(f.read().splitlines())
# Preprocess source files by removing any prefix that comes before
# the Java package name.
for i, s in enumerate(src_files):
prefix_position = s.find(JAVA_PACKAGE_PREFIX)
if prefix_position != -1:
src_files[i] = s[prefix_position:]
# Create a dictionary that maps every source directory
# to source files that it contains.
dir_to_files_map = {}
# Initialize the map.
for src_search_dir in src_search_dirs:
dir_to_files_map[src_search_dir] = []
# Fill the map.
for src_file in src_files:
number_of_file_instances = 0
for src_search_dir in src_search_dirs:
if os.path.isfile(os.path.join(src_search_dir, src_file)):
number_of_file_instances += 1
dir_to_files_map[src_search_dir].append(src_file)
if (number_of_file_instances > 1):
raise Exception(
'There is more than one instance of file %s in %s'
% (src_file, src_search_dirs))
if (number_of_file_instances < 1):
raise Exception(
'Unable to find file %s in %s' % (src_file, src_search_dirs))
# Delete the old output file if any.
if os.path.isfile(options.jar_path):
os.remove(options.jar_path)
# Jar the sources from every source search directory.
for src_search_dir in src_search_dirs:
if len(dir_to_files_map[src_search_dir]) > 0:
JarSources(src_search_dir, dir_to_files_map[src_search_dir],
options.jar_path)
else:
raise Exception(
'Directory %s does not contain any files and can be'
' removed from the list of directories to search' % src_search_dir)
# Jar additional src jars
if unzipped_jar_path:
JarSources(unzipped_jar_path, ['.'], options.jar_path)
if options.depfile:
deps = []
for src_dir in src_search_dirs:
for root, _, filenames in os.walk(src_dir):
deps.extend(os.path.join(root, f) for f in filenames)
# Srcjar deps already captured in GN rules (no need to list them here).
build_utils.WriteDepfile(options.depfile, options.jar_path, deps)
# Clean up temporary output directory.
if unzipped_jar_path:
build_utils.DeleteDirectory(unzipped_jar_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main()) | components/cronet/tools/jar_src.py |
import optparse
import os
import sys
import tempfile
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build/android/gyp/util'))
import build_utils
JAVA_PACKAGE_PREFIX = 'org/chromium/'
def JarSources(src_dir, src_files, jar_path):
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.src_dir so the .java file paths in the jar are correct.
jar_cwd = src_dir
jar_path = os.path.abspath(jar_path)
if os.path.exists(jar_path):
jar_cmd = ['jar', 'uf', jar_path]
else:
jar_cmd = ['jar', 'cf', jar_path]
jar_cmd.extend(src_files)
build_utils.CheckOutput(jar_cmd, cwd=jar_cwd)
# Uncompress source jars so that they can be combined with other sources
def UnzipSourceJar(jar, unzipped_jar_path):
if os.path.exists(jar):
jar_cmd = ['jar', 'xf', os.path.abspath(jar)]
build_utils.CheckOutput(jar_cmd, cwd=unzipped_jar_path)
else:
raise Exception('Jar file does not exist %s' % jar)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--src-search-dirs', action="append",
help='A list of directories that should be searched'
' for the source files.')
parser.add_option('--src-files', action="append",
help='A list of source files to jar.')
parser.add_option('--src-jars', action="append",
help='A list of source jars to include in addition to source files.')
parser.add_option('--src-list-files', action="append",
help='A list of files that contain a list of sources,'
' e.g. a list of \'.sources\' files generated by GN.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
# A temporary directory to put the output of jar files.
unzipped_jar_path = None
if options.src_jars:
unzipped_jar_path = tempfile.mkdtemp(dir=os.path.dirname(options.jar_path))
jar_list = []
for gn_list in options.src_jars:
jar_list.extend(build_utils.ParseGnList(gn_list))
for jar in jar_list:
UnzipSourceJar(jar, unzipped_jar_path)
src_search_dirs = []
for gn_src_search_dirs in options.src_search_dirs:
src_search_dirs.extend(build_utils.ParseGnList(gn_src_search_dirs))
src_list_files = []
if options.src_list_files:
for gn_src_list_file in options.src_list_files:
src_list_files.extend(build_utils.ParseGnList(gn_src_list_file))
src_files = []
for gn_src_files in options.src_files:
src_files.extend(build_utils.ParseGnList(gn_src_files))
# Add files from --source_list_files
for src_list_file in src_list_files:
with open(src_list_file, 'r') as f:
src_files.extend(f.read().splitlines())
# Preprocess source files by removing any prefix that comes before
# the Java package name.
for i, s in enumerate(src_files):
prefix_position = s.find(JAVA_PACKAGE_PREFIX)
if prefix_position != -1:
src_files[i] = s[prefix_position:]
# Create a dictionary that maps every source directory
# to source files that it contains.
dir_to_files_map = {}
# Initialize the map.
for src_search_dir in src_search_dirs:
dir_to_files_map[src_search_dir] = []
# Fill the map.
for src_file in src_files:
number_of_file_instances = 0
for src_search_dir in src_search_dirs:
if os.path.isfile(os.path.join(src_search_dir, src_file)):
number_of_file_instances += 1
dir_to_files_map[src_search_dir].append(src_file)
if (number_of_file_instances > 1):
raise Exception(
'There is more than one instance of file %s in %s'
% (src_file, src_search_dirs))
if (number_of_file_instances < 1):
raise Exception(
'Unable to find file %s in %s' % (src_file, src_search_dirs))
# Delete the old output file if any.
if os.path.isfile(options.jar_path):
os.remove(options.jar_path)
# Jar the sources from every source search directory.
for src_search_dir in src_search_dirs:
if len(dir_to_files_map[src_search_dir]) > 0:
JarSources(src_search_dir, dir_to_files_map[src_search_dir],
options.jar_path)
else:
raise Exception(
'Directory %s does not contain any files and can be'
' removed from the list of directories to search' % src_search_dir)
# Jar additional src jars
if unzipped_jar_path:
JarSources(unzipped_jar_path, ['.'], options.jar_path)
if options.depfile:
deps = []
for src_dir in src_search_dirs:
for root, _, filenames in os.walk(src_dir):
deps.extend(os.path.join(root, f) for f in filenames)
# Srcjar deps already captured in GN rules (no need to list them here).
build_utils.WriteDepfile(options.depfile, options.jar_path, deps)
# Clean up temporary output directory.
if unzipped_jar_path:
build_utils.DeleteDirectory(unzipped_jar_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main()) | 0.247714 | 0.097176 |
from pathlib import Path
import pandas as pd
import random
from Object.Transporter import Transporter
def transporter_data(trans_num, trans_manager, graph):
fileName = r".\Data\transporter_data.xlsx"
fileObj = Path(fileName)
if not fileObj.is_file():
trans_df = pd.read_excel(r"transporter_list.xlsx")
trans_data = zip(trans_df['size'], trans_df['availble_weight'], trans_df['work_speed'], trans_df['empty_speed'], trans_df['turn_speed'])
trans_execl = pd.DataFrame(
{
'no': [],
'size': [],
'available_weight': [],
'work_speed': [],
'empty_speed': [],
'turn_speed': []
})
no_list = []
size_list = []
available_weight_list = []
work_speed_list = []
empty_speed_list = []
turn_speed_list = []
for no in range(trans_num):
p = random.random()
if p < 0.07:
p = 0
elif p < 0.18:
p = 1
elif p < 0.29:
p = 2
elif p < 0.47:
p = 3
elif p < 0.66:
p = 4
elif p < 0.81:
p = 5
elif p < 0.92:
p = 6
else:
p = 7
model, size, a_w, w_s, e_s, t_s = trans_df.loc[p]
# print(size, type(size), type(w_s), type(a_w))
w_s, e_s, t_s = trans_mpers(w_s, e_s, t_s)
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
trans_manager.add_trans(Transporter(no, size, a_w, w_s, e_s, t_s, graph, color))
no_list.append(no)
size_list.append(size)
available_weight_list.append(a_w)
work_speed_list.append(w_s)
empty_speed_list.append(e_s)
turn_speed_list.append(t_s)
trans_execl['no'] = no_list
trans_execl['size'] = size_list
trans_execl['available_weight'] = available_weight_list
trans_execl['work_speed'] = work_speed_list
trans_execl['empty_speed'] = empty_speed_list
trans_execl['turn_speed'] = turn_speed_list
trans_execl.to_excel(fileName)
else:
# else문 실행
trans_df = pd.read_excel(r".\Data\transporter_data.xlsx")
for i in range(len(trans_df)):
# print("trans_df.loc[]: ", trans_df.loc[i])
no, size, a_w, w_s, e_s, t_s = trans_df[['no', 'size', 'available_weight', 'work_speed', 'empty_speed', 'turn_speed']].loc[i]
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
trans_manager.add_trans(Transporter(no, size, a_w, w_s, e_s, t_s, graph, color))
# 미터 퍼 세크으로 변환
def trans_mpers(w_s, e_s, t_s):
w = w_s * 1000 // 3600
e = e_s * 1000 // 3600
t = t_s * 1000 // 3600
return w, e, t | transporter/Data_create/create_trans.py | from pathlib import Path
import pandas as pd
import random
from Object.Transporter import Transporter
def transporter_data(trans_num, trans_manager, graph):
fileName = r".\Data\transporter_data.xlsx"
fileObj = Path(fileName)
if not fileObj.is_file():
trans_df = pd.read_excel(r"transporter_list.xlsx")
trans_data = zip(trans_df['size'], trans_df['availble_weight'], trans_df['work_speed'], trans_df['empty_speed'], trans_df['turn_speed'])
trans_execl = pd.DataFrame(
{
'no': [],
'size': [],
'available_weight': [],
'work_speed': [],
'empty_speed': [],
'turn_speed': []
})
no_list = []
size_list = []
available_weight_list = []
work_speed_list = []
empty_speed_list = []
turn_speed_list = []
for no in range(trans_num):
p = random.random()
if p < 0.07:
p = 0
elif p < 0.18:
p = 1
elif p < 0.29:
p = 2
elif p < 0.47:
p = 3
elif p < 0.66:
p = 4
elif p < 0.81:
p = 5
elif p < 0.92:
p = 6
else:
p = 7
model, size, a_w, w_s, e_s, t_s = trans_df.loc[p]
# print(size, type(size), type(w_s), type(a_w))
w_s, e_s, t_s = trans_mpers(w_s, e_s, t_s)
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
trans_manager.add_trans(Transporter(no, size, a_w, w_s, e_s, t_s, graph, color))
no_list.append(no)
size_list.append(size)
available_weight_list.append(a_w)
work_speed_list.append(w_s)
empty_speed_list.append(e_s)
turn_speed_list.append(t_s)
trans_execl['no'] = no_list
trans_execl['size'] = size_list
trans_execl['available_weight'] = available_weight_list
trans_execl['work_speed'] = work_speed_list
trans_execl['empty_speed'] = empty_speed_list
trans_execl['turn_speed'] = turn_speed_list
trans_execl.to_excel(fileName)
else:
# else문 실행
trans_df = pd.read_excel(r".\Data\transporter_data.xlsx")
for i in range(len(trans_df)):
# print("trans_df.loc[]: ", trans_df.loc[i])
no, size, a_w, w_s, e_s, t_s = trans_df[['no', 'size', 'available_weight', 'work_speed', 'empty_speed', 'turn_speed']].loc[i]
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
trans_manager.add_trans(Transporter(no, size, a_w, w_s, e_s, t_s, graph, color))
# 미터 퍼 세크으로 변환
def trans_mpers(w_s, e_s, t_s):
w = w_s * 1000 // 3600
e = e_s * 1000 // 3600
t = t_s * 1000 // 3600
return w, e, t | 0.106836 | 0.18228 |
import frappe
from frappe.model.document import Document
from ..lms_sketch.lms_sketch import LMSSketch
class ContestEntry(LMSSketch):
def to_dict(self, include_author=False):
owner = self.get_owner()
return {
"name": self.name,
"image_url": self.get_image_url(mode="s"),
"s_image_url": self.get_image_url(mode="s"),
"m_image_url": self.get_image_url(mode="m"),
"is_submitted": self.is_submitted,
"author": {
"full_name": owner.full_name,
"username": owner.username
}
}
@property
def title(self):
# XXX-Anand: fix this
return "Pookkalam"
@property
def sketch_id(self):
"""Returns the unique id to reference the contest sketch.
This will be x-{name}
"""
return 'x-' + self.name
@property
def runtime(self):
return "joy"
def get_ratings(self, user=None):
"""Returns the ratings for this entry from the specified user as a dict.
The `user` defaults to the current user is not specified.
The output would be in the following format:
{
"Novelty": 5,
"Color Composition": 4,
"Complexity": 3
}
The keys would be the categories under which the entry is rated on
and the value is the rating given by the user in the scale of 0 to 5.
Returns None if no rating is providied by the user for this entry.
"""
filters = {
"user": user or frappe.session.user,
"entry": self.name
}
name = frappe.db.get_value(
"Contest Entry Review",
filters=filters,
fieldname="name")
if not name:
return
review = frappe.get_doc("Contest Entry Review", name)
return {rating.category: rating.rating for rating in review.ratings}
def update_ratings(self, ratings, user=None):
"""Updates the ratings for this entry by a user.
"""
user = user or frappe.session.user
ratings_list = [{"category": k, "rating": v} for k, v in ratings.items()]
filters = {
"user": user,
"entry": self.name
}
name = frappe.db.get_value(
"Contest Entry Review",
filters=filters,
fieldname="name")
if name is None:
doc = frappe.get_doc({
"doctype": "Contest Entry Review",
"entry": self.name,
"user" : user,
})
doc.update({"ratings": ratings_list})
doc.insert(ignore_permissions=True)
else:
doc = frappe.get_doc("Contest Entry Review", name)
doc.update({"ratings": ratings_list})
doc.save(ignore_permissions=True) | mon_school/mon_school/doctype/contest_entry/contest_entry.py |
import frappe
from frappe.model.document import Document
from ..lms_sketch.lms_sketch import LMSSketch
class ContestEntry(LMSSketch):
def to_dict(self, include_author=False):
owner = self.get_owner()
return {
"name": self.name,
"image_url": self.get_image_url(mode="s"),
"s_image_url": self.get_image_url(mode="s"),
"m_image_url": self.get_image_url(mode="m"),
"is_submitted": self.is_submitted,
"author": {
"full_name": owner.full_name,
"username": owner.username
}
}
@property
def title(self):
# XXX-Anand: fix this
return "Pookkalam"
@property
def sketch_id(self):
"""Returns the unique id to reference the contest sketch.
This will be x-{name}
"""
return 'x-' + self.name
@property
def runtime(self):
return "joy"
def get_ratings(self, user=None):
"""Returns the ratings for this entry from the specified user as a dict.
The `user` defaults to the current user is not specified.
The output would be in the following format:
{
"Novelty": 5,
"Color Composition": 4,
"Complexity": 3
}
The keys would be the categories under which the entry is rated on
and the value is the rating given by the user in the scale of 0 to 5.
Returns None if no rating is providied by the user for this entry.
"""
filters = {
"user": user or frappe.session.user,
"entry": self.name
}
name = frappe.db.get_value(
"Contest Entry Review",
filters=filters,
fieldname="name")
if not name:
return
review = frappe.get_doc("Contest Entry Review", name)
return {rating.category: rating.rating for rating in review.ratings}
def update_ratings(self, ratings, user=None):
"""Updates the ratings for this entry by a user.
"""
user = user or frappe.session.user
ratings_list = [{"category": k, "rating": v} for k, v in ratings.items()]
filters = {
"user": user,
"entry": self.name
}
name = frappe.db.get_value(
"Contest Entry Review",
filters=filters,
fieldname="name")
if name is None:
doc = frappe.get_doc({
"doctype": "Contest Entry Review",
"entry": self.name,
"user" : user,
})
doc.update({"ratings": ratings_list})
doc.insert(ignore_permissions=True)
else:
doc = frappe.get_doc("Contest Entry Review", name)
doc.update({"ratings": ratings_list})
doc.save(ignore_permissions=True) | 0.711932 | 0.278784 |
import requests
import json
import random
import time
# The address of your first wallet
addressA = "Fill me in!"
# The address of your second wallet
addressB = "Fill me in!"
if len(addressA) != 99 or len(addressB) != 99:
print("Please fill in your addresses and re-run the script.")
quit()
walletdPortA = "8070"
walletdAddressA = "127.0.0.1"
walletdPortB = "8071"
walletdAddressB = "127.0.0.1"
rpcPasswordA = "<PASSWORD>"
rpcPasswordB = "<PASSWORD>"
def sendTransaction(host, port, rpcPassword, **kwargs):
payload = {
'jsonrpc': '2.0',
'method': "sendTransaction",
'password': <PASSWORD>,
'id': 'test',
'params': kwargs
}
url = 'http://' + host + ':' + port + '/json_rpc'
response = requests.post(url, data=json.dumps(payload),
headers={'content-type': 'application/json'}).json()
if 'error' in response:
print(response['error'])
return False
else:
print(response['result'])
return True
def sendTXs(host, port, rpcPassword, sender, receiver):
def loop():
n = 1000
while(n < 100000000000):
yield n
n *= 10
sleepAmount = 0.001
while True:
for i in loop():
# give it a bit more randomness, maybe this helps
amount = random.randint(i, i+10000)
params = {'transfers': [{'address': receiver, 'amount': amount}],
'fee': 10,
'anonymity': 5,
'changeAddress': sender}
if not sendTransaction(host, port, rpcPassword, **params):
time.sleep(sleepAmount)
print("Sleeping for " + str(sleepAmount) + " seconds...")
sleepAmount *= 2
break
else:
sleepAmount = 0.001
while True:
sendTXs(walletdAddressA, walletdPortA, rpcPasswordA, sender=addressA, receiver=addressB)
sendTXs(walletdAddressB, walletdPortB, rpcPasswordB, sender=addressB, receiver=addressA) | scripts/makechange.py | import requests
import json
import random
import time
# The address of your first wallet
addressA = "Fill me in!"
# The address of your second wallet
addressB = "Fill me in!"
if len(addressA) != 99 or len(addressB) != 99:
print("Please fill in your addresses and re-run the script.")
quit()
walletdPortA = "8070"
walletdAddressA = "127.0.0.1"
walletdPortB = "8071"
walletdAddressB = "127.0.0.1"
rpcPasswordA = "<PASSWORD>"
rpcPasswordB = "<PASSWORD>"
def sendTransaction(host, port, rpcPassword, **kwargs):
payload = {
'jsonrpc': '2.0',
'method': "sendTransaction",
'password': <PASSWORD>,
'id': 'test',
'params': kwargs
}
url = 'http://' + host + ':' + port + '/json_rpc'
response = requests.post(url, data=json.dumps(payload),
headers={'content-type': 'application/json'}).json()
if 'error' in response:
print(response['error'])
return False
else:
print(response['result'])
return True
def sendTXs(host, port, rpcPassword, sender, receiver):
def loop():
n = 1000
while(n < 100000000000):
yield n
n *= 10
sleepAmount = 0.001
while True:
for i in loop():
# give it a bit more randomness, maybe this helps
amount = random.randint(i, i+10000)
params = {'transfers': [{'address': receiver, 'amount': amount}],
'fee': 10,
'anonymity': 5,
'changeAddress': sender}
if not sendTransaction(host, port, rpcPassword, **params):
time.sleep(sleepAmount)
print("Sleeping for " + str(sleepAmount) + " seconds...")
sleepAmount *= 2
break
else:
sleepAmount = 0.001
while True:
sendTXs(walletdAddressA, walletdPortA, rpcPasswordA, sender=addressA, receiver=addressB)
sendTXs(walletdAddressB, walletdPortB, rpcPasswordB, sender=addressB, receiver=addressA) | 0.311217 | 0.132346 |
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "CHR"
addresses_name = (
"2021-04-16T12:59:18.049121/Re-submission - Democracy_Club__06May2021.tsv"
)
stations_name = (
"2021-04-16T12:59:18.049121/Re-submission - Democracy_Club__06May2021.tsv"
)
elections = ["2021-05-06"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def station_record_to_dict(self, record):
# Kidlington Youth Football Club
if record.polling_place_id == "23006":
record = record._replace(
polling_place_uprn="10011878924",
polling_place_easting="",
polling_place_northing="",
)
# Following updates based on last year's corrections
# The Peoples Church, Horsefair, Banbury, Oxon
if record.polling_place_id == "22810":
record = record._replace(polling_place_easting="445258")
record = record._replace(polling_place_northing="240545")
# West Bicester Community Centre, Bowmont Square, Bicester, Oxon
if record.polling_place_id == "22911":
record = record._replace(polling_place_easting="456935")
record = record._replace(polling_place_northing="222867")
# Sibford Gower Village Hall
if record.polling_place_id == "23154":
record = record._replace(polling_place_easting="435614")
record = record._replace(polling_place_northing="237845")
# Drayton Village Hall
if record.polling_place_id == "23186":
record = record._replace(polling_place_easting="442908")
record = record._replace(polling_place_northing="241900")
# St Mary the Virgin Church, Cottisford, Brackley, Northants
if record.polling_place_id == "23075":
record = record._replace(polling_place_easting="458727")
record = record._replace(polling_place_northing="231062")
# The Church of St John the Baptist, Broadway, Kidlington, Oxon
if record.polling_place_id == "22985":
record = record._replace(polling_place_easting="449651")
record = record._replace(polling_place_northing="212578")
# Kidlington Baptist Church, High Street, Kidlington, Oxon
if record.polling_place_id == "23012":
record = record._replace(polling_place_easting="449488")
record = record._replace(polling_place_northing="214376")
# Confirmed with Council
# Heyford Park Community Centre, Brice Road, Upper Heyford
if record.polling_place_id == "23115":
record = record._replace(polling_place_easting="451146")
record = record._replace(polling_place_northing="225741")
# Pingle Field Pavilion, Pingle Field, Bicester
if record.polling_place_id == "22904":
record = record._replace(polling_place_postcode="OX26 6AU")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
if record.addressline6 in [
"OX16 9JU",
"OX16 9QF",
"OX16 5AW",
"OX16 2BN",
"OX16 2AS",
"OX26 3EB",
"OX26 3EZ",
"OX26 6BP",
"OX5 3DJ",
"OX25 3QU",
"OX5 1LZ",
"OX5 1AJ",
"OX27 7AE",
]:
return None # split
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
# Group 1
"10011909033",
"10011917099",
# Group 2
"10011924957",
"10011924949",
"10011924948",
"10011924947",
"10011924946",
# Group 3
"10011927441",
"10011927442",
"10011927443",
"10011927445",
"10011927446",
"10011927448",
"10011931230",
# Group 4
"10011889761",
]:
return None # long distance; crosses another polling district
return super().address_record_to_dict(record) | polling_stations/apps/data_importers/management/commands/import_cherwell.py | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "CHR"
addresses_name = (
"2021-04-16T12:59:18.049121/Re-submission - Democracy_Club__06May2021.tsv"
)
stations_name = (
"2021-04-16T12:59:18.049121/Re-submission - Democracy_Club__06May2021.tsv"
)
elections = ["2021-05-06"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def station_record_to_dict(self, record):
# Kidlington Youth Football Club
if record.polling_place_id == "23006":
record = record._replace(
polling_place_uprn="10011878924",
polling_place_easting="",
polling_place_northing="",
)
# Following updates based on last year's corrections
# The Peoples Church, Horsefair, Banbury, Oxon
if record.polling_place_id == "22810":
record = record._replace(polling_place_easting="445258")
record = record._replace(polling_place_northing="240545")
# West Bicester Community Centre, Bowmont Square, Bicester, Oxon
if record.polling_place_id == "22911":
record = record._replace(polling_place_easting="456935")
record = record._replace(polling_place_northing="222867")
# Sibford Gower Village Hall
if record.polling_place_id == "23154":
record = record._replace(polling_place_easting="435614")
record = record._replace(polling_place_northing="237845")
# Drayton Village Hall
if record.polling_place_id == "23186":
record = record._replace(polling_place_easting="442908")
record = record._replace(polling_place_northing="241900")
# St Mary the Virgin Church, Cottisford, Brackley, Northants
if record.polling_place_id == "23075":
record = record._replace(polling_place_easting="458727")
record = record._replace(polling_place_northing="231062")
# The Church of St John the Baptist, Broadway, Kidlington, Oxon
if record.polling_place_id == "22985":
record = record._replace(polling_place_easting="449651")
record = record._replace(polling_place_northing="212578")
# Kidlington Baptist Church, High Street, Kidlington, Oxon
if record.polling_place_id == "23012":
record = record._replace(polling_place_easting="449488")
record = record._replace(polling_place_northing="214376")
# Confirmed with Council
# Heyford Park Community Centre, Brice Road, Upper Heyford
if record.polling_place_id == "23115":
record = record._replace(polling_place_easting="451146")
record = record._replace(polling_place_northing="225741")
# Pingle Field Pavilion, Pingle Field, Bicester
if record.polling_place_id == "22904":
record = record._replace(polling_place_postcode="OX26 6AU")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
if record.addressline6 in [
"OX16 9JU",
"OX16 9QF",
"OX16 5AW",
"OX16 2BN",
"OX16 2AS",
"OX26 3EB",
"OX26 3EZ",
"OX26 6BP",
"OX5 3DJ",
"OX25 3QU",
"OX5 1LZ",
"OX5 1AJ",
"OX27 7AE",
]:
return None # split
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
# Group 1
"10011909033",
"10011917099",
# Group 2
"10011924957",
"10011924949",
"10011924948",
"10011924947",
"10011924946",
# Group 3
"10011927441",
"10011927442",
"10011927443",
"10011927445",
"10011927446",
"10011927448",
"10011931230",
# Group 4
"10011889761",
]:
return None # long distance; crosses another polling district
return super().address_record_to_dict(record) | 0.477554 | 0.308594 |
import logging
import os
import pathlib
import pickle
import numpy as np
import pandas as pd
from datasketch import MinHashLSHForest, MinHash, MinHashLSH, LeanMinHash
import src.QueryDatabase as queryDatabase
NUM_PERM = 128
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
def tokenize(values):
"""
Tokenize each string in the values array. Return a flattened array of tokens.
@param values:
@return:
"""
tokens = set()
# nlp = spacy.load('en_core_web_sm', disable=['tagger', 'parser', 'ner'])
for value in values:
if value is not None:
# tokens.extend(list(nlp(value)))
tokens.update(value.lower().split())
return tokens
def build_lsh_forest(columns, override=False):
"""
Builds a minHash LSH forest which can be used to query top-k columns with maximum Jaccard similarity
@param override:
@param columns:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/forest.obj'
if override or not os.path.isfile(file_path):
forest = MinHashLSHForest(num_perm=NUM_PERM)
for column in columns:
forest.add(f'{column["table"]}.{column["column"]}', deserialize_minhash(column))
forest.index()
with open(file_path, 'wb') as file:
pickle.dump(forest, file)
return forest
with open(file_path, 'rb') as file:
forest = pickle.load(file)
return forest
def build_minhash_lsh(columns, threshold=0.5, override=False):
"""
Builds a minhash LSH which can be used to find columns with Jaccard similarity greater than threshold.
@param override:
@param columns:
@param threshold:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/lsh_{threshold}.obj'
if override or not os.path.isfile(file_path):
minhash_lsh = MinHashLSH(threshold=threshold, num_perm=NUM_PERM)
for column in columns:
minhash_lsh.insert(f'{column["table"]}.{column["column"]}', deserialize_minhash(column))
with open(file_path, 'wb') as file:
pickle.dump(minhash_lsh, file)
return minhash_lsh
with open(file_path, 'rb') as file:
minhash_lsh = pickle.load(file)
return minhash_lsh
def get_top_k(forest, column, k=5):
"""
Get top k columns with best Jaccard similarity
@param forest:
@param column:
@param k:
@return:
"""
minhash = deserialize_minhash(column)
return forest.query(minhash, k)
def get_all_similar_columns(minhash_lsh, column):
"""
Get all similar columns (with Jaccard similarity greater than a threshold value)
@param minhash_lsh:
@param column:
@return:
"""
minhash = deserialize_minhash(column)
return minhash_lsh.query(minhash)
def calculate_jaccard_similarity(override=False):
"""
Calculates Jaccrd similarity between all pairs of columns
@type override:
@return:
"""
columns = queryDatabase.get_columns('STRING')
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/jaccard.obj'
if override or not os.path.isfile(file_path):
minhash_list = []
for column in columns:
minhash_list.append(deserialize_minhash(column))
n = len(columns)
matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
matrix[i][j] = minhash_list[i].jaccard(minhash_list[j])
df = pd.DataFrame(matrix)
column_names = list(map(lambda column: column['table'] + "." + column['column'], columns))
df.columns = column_names
df.index = column_names
with open(file_path, 'wb') as file:
pickle.dump(df, file)
return df
with open(file_path, 'rb') as file:
df = pickle.load(file)
return df
def serialize_min_hash(columns, override=False):
"""
Writes min hash values to local files
@param override:
@param columns:
@return:
"""
for column in columns:
output_file = f'{os.environ["WORKING_DIRECTORY"]}/results/minhashes/{column["table"]}.{column["column"]}.txt'
if os.path.isfile(output_file) and not override:
continue
values = queryDatabase.get_distnct_column_values(column['table'], column)
tokens = tokenize(values)
minhash = MinHash(num_perm=NUM_PERM)
for token in tokens:
minhash.update(token.encode('utf8'))
leanMinHash = LeanMinHash(minhash)
buf = bytearray(leanMinHash.bytesize())
leanMinHash.serialize(buf)
with open(output_file, 'wb') as file:
file.write(buf)
print(f'Serialization is complete for {column["table"]}.{column["column"]}.')
return
def deserialize_minhash(column):
"""
Deserializes minhash binary file for the given column and returns the minhash
@param column:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/minhashes/{column["table"]}.{column["column"]}.txt'
if not os.path.isfile(file_path):
serialize_min_hash([column])
with open(file_path, 'rb') as file:
minhash = LeanMinHash.deserialize(bytearray(file.read()))
return minhash
def main():
string_columns = queryDatabase.get_columns('STRING', limit=10)
# forest = build_lsh_forest(string_columns)
# lsh = build_minhash_lsh(string_columns, 0.7)
# print(
# f'top 10 similar columns to bigquery-public-data.covid19_aha.hospital_beds.state_name: '
# f'{get_top_k(forest, "bigquery-public-data.covid19_aha.hospital_beds.state_name")}')
# print()
# print(calculate_jaccard_similarity(string_columns))
# serialize_min_hash(string_columns)
# minhash1 = deserialize_minhash(string_columns[0])
# minhash2 = deserialize_minhash(string_columns[1])
# print(minhash1.jaccard(minhash2))
forest = build_lsh_forest(string_columns, override=True)
minhash_lsh = build_minhash_lsh(string_columns)
print(forest.query(deserialize_minhash(string_columns[0]), 10))
print(minhash_lsh.query(deserialize_minhash(string_columns[0])))
if __name__ == '__main__':
os.environ["WORKING_DIRECTORY"] = f'{pathlib.Path(__file__).parent.parent}'
os.environ[
'GOOGLE_APPLICATION_CREDENTIALS'] = 'C:/Users/public.DESKTOP-5H03UEQ/Documents/IntroDB-35dbe741f4c7.json'
main() | src/ClusterColumns.py | import logging
import os
import pathlib
import pickle
import numpy as np
import pandas as pd
from datasketch import MinHashLSHForest, MinHash, MinHashLSH, LeanMinHash
import src.QueryDatabase as queryDatabase
NUM_PERM = 128
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
def tokenize(values):
"""
Tokenize each string in the values array. Return a flattened array of tokens.
@param values:
@return:
"""
tokens = set()
# nlp = spacy.load('en_core_web_sm', disable=['tagger', 'parser', 'ner'])
for value in values:
if value is not None:
# tokens.extend(list(nlp(value)))
tokens.update(value.lower().split())
return tokens
def build_lsh_forest(columns, override=False):
"""
Builds a minHash LSH forest which can be used to query top-k columns with maximum Jaccard similarity
@param override:
@param columns:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/forest.obj'
if override or not os.path.isfile(file_path):
forest = MinHashLSHForest(num_perm=NUM_PERM)
for column in columns:
forest.add(f'{column["table"]}.{column["column"]}', deserialize_minhash(column))
forest.index()
with open(file_path, 'wb') as file:
pickle.dump(forest, file)
return forest
with open(file_path, 'rb') as file:
forest = pickle.load(file)
return forest
def build_minhash_lsh(columns, threshold=0.5, override=False):
"""
Builds a minhash LSH which can be used to find columns with Jaccard similarity greater than threshold.
@param override:
@param columns:
@param threshold:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/lsh_{threshold}.obj'
if override or not os.path.isfile(file_path):
minhash_lsh = MinHashLSH(threshold=threshold, num_perm=NUM_PERM)
for column in columns:
minhash_lsh.insert(f'{column["table"]}.{column["column"]}', deserialize_minhash(column))
with open(file_path, 'wb') as file:
pickle.dump(minhash_lsh, file)
return minhash_lsh
with open(file_path, 'rb') as file:
minhash_lsh = pickle.load(file)
return minhash_lsh
def get_top_k(forest, column, k=5):
"""
Get top k columns with best Jaccard similarity
@param forest:
@param column:
@param k:
@return:
"""
minhash = deserialize_minhash(column)
return forest.query(minhash, k)
def get_all_similar_columns(minhash_lsh, column):
"""
Get all similar columns (with Jaccard similarity greater than a threshold value)
@param minhash_lsh:
@param column:
@return:
"""
minhash = deserialize_minhash(column)
return minhash_lsh.query(minhash)
def calculate_jaccard_similarity(override=False):
"""
Calculates Jaccrd similarity between all pairs of columns
@type override:
@return:
"""
columns = queryDatabase.get_columns('STRING')
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/jaccard.obj'
if override or not os.path.isfile(file_path):
minhash_list = []
for column in columns:
minhash_list.append(deserialize_minhash(column))
n = len(columns)
matrix = np.zeros((n, n))
for i in range(n):
for j in range(n):
matrix[i][j] = minhash_list[i].jaccard(minhash_list[j])
df = pd.DataFrame(matrix)
column_names = list(map(lambda column: column['table'] + "." + column['column'], columns))
df.columns = column_names
df.index = column_names
with open(file_path, 'wb') as file:
pickle.dump(df, file)
return df
with open(file_path, 'rb') as file:
df = pickle.load(file)
return df
def serialize_min_hash(columns, override=False):
"""
Writes min hash values to local files
@param override:
@param columns:
@return:
"""
for column in columns:
output_file = f'{os.environ["WORKING_DIRECTORY"]}/results/minhashes/{column["table"]}.{column["column"]}.txt'
if os.path.isfile(output_file) and not override:
continue
values = queryDatabase.get_distnct_column_values(column['table'], column)
tokens = tokenize(values)
minhash = MinHash(num_perm=NUM_PERM)
for token in tokens:
minhash.update(token.encode('utf8'))
leanMinHash = LeanMinHash(minhash)
buf = bytearray(leanMinHash.bytesize())
leanMinHash.serialize(buf)
with open(output_file, 'wb') as file:
file.write(buf)
print(f'Serialization is complete for {column["table"]}.{column["column"]}.')
return
def deserialize_minhash(column):
"""
Deserializes minhash binary file for the given column and returns the minhash
@param column:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/minhashes/{column["table"]}.{column["column"]}.txt'
if not os.path.isfile(file_path):
serialize_min_hash([column])
with open(file_path, 'rb') as file:
minhash = LeanMinHash.deserialize(bytearray(file.read()))
return minhash
def main():
string_columns = queryDatabase.get_columns('STRING', limit=10)
# forest = build_lsh_forest(string_columns)
# lsh = build_minhash_lsh(string_columns, 0.7)
# print(
# f'top 10 similar columns to bigquery-public-data.covid19_aha.hospital_beds.state_name: '
# f'{get_top_k(forest, "bigquery-public-data.covid19_aha.hospital_beds.state_name")}')
# print()
# print(calculate_jaccard_similarity(string_columns))
# serialize_min_hash(string_columns)
# minhash1 = deserialize_minhash(string_columns[0])
# minhash2 = deserialize_minhash(string_columns[1])
# print(minhash1.jaccard(minhash2))
forest = build_lsh_forest(string_columns, override=True)
minhash_lsh = build_minhash_lsh(string_columns)
print(forest.query(deserialize_minhash(string_columns[0]), 10))
print(minhash_lsh.query(deserialize_minhash(string_columns[0])))
if __name__ == '__main__':
os.environ["WORKING_DIRECTORY"] = f'{pathlib.Path(__file__).parent.parent}'
os.environ[
'GOOGLE_APPLICATION_CREDENTIALS'] = 'C:/Users/public.DESKTOP-5H03UEQ/Documents/IntroDB-35dbe741f4c7.json'
main() | 0.591133 | 0.264317 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AdvancedFilter(Base):
"""The AdvancedFilter class encapsulates a user managed advancedFilter node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the AdvancedFilter property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'advancedFilter'
def __init__(self, parent):
super(AdvancedFilter, self).__init__(parent)
@property
def Expression(self):
"""Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Returns:
str
"""
return self._get_attribute('expression')
@Expression.setter
def Expression(self, value):
self._set_attribute('expression', value)
@property
def Name(self):
"""Specifies the filter name. It must be unique per view.
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def SortingStats(self):
"""Specifies the list of statistics by which the view will be sorted.
Returns:
str
"""
return self._get_attribute('sortingStats')
@SortingStats.setter
def SortingStats(self, value):
self._set_attribute('sortingStats', value)
@property
def TrackingFilterId(self):
"""Gets the id of the filter, which is used to add the filter to a view.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)
"""
return self._get_attribute('trackingFilterId')
@TrackingFilterId.setter
def TrackingFilterId(self, value):
self._set_attribute('trackingFilterId', value)
def add(self, Expression=None, Name=None, SortingStats=None, TrackingFilterId=None):
"""Adds a new advancedFilter node on the server and retrieves it in this instance.
Args:
Expression (str): Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Name (str): Specifies the filter name. It must be unique per view.
SortingStats (str): Specifies the list of statistics by which the view will be sorted.
TrackingFilterId (str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)): Gets the id of the filter, which is used to add the filter to a view.
Returns:
self: This instance with all currently retrieved advancedFilter data using find and the newly added advancedFilter data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the advancedFilter data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Expression=None, Name=None, SortingStats=None, TrackingFilterId=None):
"""Finds and retrieves advancedFilter data from the server.
All named parameters support regex and can be used to selectively retrieve advancedFilter data from the server.
By default the find method takes no parameters and will retrieve all advancedFilter data from the server.
Args:
Expression (str): Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Name (str): Specifies the filter name. It must be unique per view.
SortingStats (str): Specifies the list of statistics by which the view will be sorted.
TrackingFilterId (str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)): Gets the id of the filter, which is used to add the filter to a view.
Returns:
self: This instance with matching advancedFilter data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of advancedFilter data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the advancedFilter data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href) | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/statistics/view/layer47applibrarytrafficfilter/advancedfilter/advancedfilter.py |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AdvancedFilter(Base):
"""The AdvancedFilter class encapsulates a user managed advancedFilter node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the AdvancedFilter property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'advancedFilter'
def __init__(self, parent):
super(AdvancedFilter, self).__init__(parent)
@property
def Expression(self):
"""Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Returns:
str
"""
return self._get_attribute('expression')
@Expression.setter
def Expression(self, value):
self._set_attribute('expression', value)
@property
def Name(self):
"""Specifies the filter name. It must be unique per view.
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def SortingStats(self):
"""Specifies the list of statistics by which the view will be sorted.
Returns:
str
"""
return self._get_attribute('sortingStats')
@SortingStats.setter
def SortingStats(self, value):
self._set_attribute('sortingStats', value)
@property
def TrackingFilterId(self):
"""Gets the id of the filter, which is used to add the filter to a view.
Returns:
str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)
"""
return self._get_attribute('trackingFilterId')
@TrackingFilterId.setter
def TrackingFilterId(self, value):
self._set_attribute('trackingFilterId', value)
def add(self, Expression=None, Name=None, SortingStats=None, TrackingFilterId=None):
"""Adds a new advancedFilter node on the server and retrieves it in this instance.
Args:
Expression (str): Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Name (str): Specifies the filter name. It must be unique per view.
SortingStats (str): Specifies the list of statistics by which the view will be sorted.
TrackingFilterId (str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)): Gets the id of the filter, which is used to add the filter to a view.
Returns:
self: This instance with all currently retrieved advancedFilter data using find and the newly added advancedFilter data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the advancedFilter data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Expression=None, Name=None, SortingStats=None, TrackingFilterId=None):
"""Finds and retrieves advancedFilter data from the server.
All named parameters support regex and can be used to selectively retrieve advancedFilter data from the server.
By default the find method takes no parameters and will retrieve all advancedFilter data from the server.
Args:
Expression (str): Specifies the filter body. This is a string that must have a specific format.This can be empty (no filter). The available operations and statistics can be obtained from availableAdvancedFilterOptions.
Name (str): Specifies the filter name. It must be unique per view.
SortingStats (str): Specifies the list of statistics by which the view will be sorted.
TrackingFilterId (str(None|/api/v1/sessions/1/ixnetwork/statistics?deepchild=availableAdvancedFilters)): Gets the id of the filter, which is used to add the filter to a view.
Returns:
self: This instance with matching advancedFilter data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of advancedFilter data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the advancedFilter data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href) | 0.867036 | 0.321713 |
from collections import OrderedDict
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from models.psgn_fc import PSGN_CONV, PSGN_FC
from models.projection import Projector
from models.edge_detection import EdgeDetector
from losses.proj_losses import *
import utils.network_utils
class Pixel2Pointcloud_PSGN_FC(nn.Module):
def __init__(self, cfg, optimizer_conv=None, optimizer_fc=None, scheduler=None):
super().__init__()
self.cfg = cfg
# PSGN FC
self.psgn_conv = PSGN_CONV(self.cfg)
self.psgn_fc = PSGN_FC(self.cfg)
self.optimizer_conv = None if optimizer_conv is None else optimizer_conv(self.psgn_conv.parameters())
self.optimizer_fc = None if optimizer_fc is None else optimizer_fc(self.psgn_fc.parameters())
self.scheduler = None if scheduler or optimizer is None else scheduler(self.optimizer)
# 2D supervision part
self.projector = Projector(self.cfg)
# proj loss
self.proj_loss = ProjectLoss(self.cfg)
if torch.cuda.is_available():
self.psgn_conv = torch.nn.DataParallel(self.psgn_conv, device_ids=cfg.CONST.DEVICE).cuda()
self.psgn_fc = torch.nn.DataParallel(self.psgn_fc, device_ids=cfg.CONST.DEVICE).cuda()
self.projector = torch.nn.DataParallel(self.projector, device_ids=cfg.CONST.DEVICE).cuda()
self.proj_loss = torch.nn.DataParallel(self.proj_loss, device_ids=cfg.CONST.DEVICE).cuda()
self.cuda()
# edge detector
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
self.edge_detector = EdgeDetector(self.cfg)
self.edge_proj_loss = ProjectLoss(self.cfg)
if torch.cuda.is_available():
self.edge_detector = torch.nn.DataParallel(self.edge_detector, device_ids=cfg.CONST.DEVICE).cuda()
self.edge_proj_loss = torch.nn.DataParallel(self.edge_proj_loss, device_ids=cfg.CONST.DEVICE).cuda()
def forward(self, input):
conv_features = self.psgn_conv(input)
points = self.psgn_fc(conv_features)
return points
def loss(self, input, init_pc, view_az, view_el, proj_gt, edge_gt):
pred_pc = self(input)
grid_dist_np = grid_dist(grid_h=self.cfg.PROJECTION.GRID_H, grid_w=self.cfg.PROJECTION.GRID_W).astype(np.float32)
grid_dist_tensor = utils.network_utils.var_or_cuda(torch.from_numpy(grid_dist_np))
# Use 2D projection loss to train
proj_pred = {}
loss_bce = {}
fwd = {}
bwd = {}
loss_fwd = {}
loss_bwd = {}
loss = 0.
if not self.cfg.SUPERVISION_2D.USE_2D_LOSS:
loss_2d = torch.tensor(loss_2d)
# For edge loss
edge_proj_pred = {}
edge_loss_bce = {}
edge_fwd = {}
edge_bwd = {}
edge_loss_fwd = {}
edge_loss_bwd = {}
edge_loss = 0.
for idx in range(0, self.cfg.PROJECTION.NUM_VIEWS):
# Projection
proj_pred[idx] = self.projector(pred_pc, view_az[:,idx], view_el[:,idx])
# Projection loss
loss_bce[idx], fwd[idx], bwd[idx] = self.proj_loss(preds=proj_pred[idx], gts=proj_gt[:,idx], grid_dist_tensor=grid_dist_tensor)
loss_fwd[idx] = 1e-4 * torch.mean(fwd[idx])
loss_bwd[idx] = 1e-4 * torch.mean(bwd[idx])
# Loss = projection loss + edge projection loss
loss += self.cfg.PROJECTION.LAMDA_BCE * torch.mean(loss_bce[idx]) +\
self.cfg.PROJECTION.LAMDA_AFF_FWD * loss_fwd[idx] +\
self.cfg.PROJECTION.LAMDA_AFF_BWD * loss_bwd[idx]
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
# Edge prediction of projection
proj_pred[idx] = proj_pred[idx].unsqueeze(1) # (BS, 1, H, W)
edge_proj_pred[idx] = self.edge_detector(img=proj_pred[idx])
edge_proj_pred[idx] = edge_proj_pred[idx].squeeze(1) # (BS, H, W)
# Edge projection loss
edge_loss_bce[idx], edge_fwd[idx], edge_bwd[idx] = self.proj_loss(preds=edge_proj_pred[idx], gts=edge_gt[:,idx], grid_dist_tensor=grid_dist_tensor)
edge_loss_fwd[idx] = 1e-4 * torch.mean(edge_fwd[idx])
edge_loss_bwd[idx] = 1e-4 * torch.mean(edge_bwd[idx])
edge_loss += self.cfg.PROJECTION.LAMDA_BCE * torch.mean(edge_loss_bce[idx]) +\
self.cfg.PROJECTION.LAMDA_AFF_FWD * edge_loss_fwd[idx] +\
self.cfg.PROJECTION.LAMDA_AFF_BWD * edge_loss_bwd[idx]
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
total_loss = ((loss + edge_loss*self.cfg.EDGE_LOSS.LAMDA_EDGE_LOSS) / self.cfg.PROJECTION.NUM_VIEWS)
else:
total_loss = loss / self.cfg.PROJECTION.NUM_VIEWS
return total_loss, pred_pc
def learn(self, input, init_pc, view_az, view_el, proj_gt, edge_gt):
self.train(True)
self.psgn_conv.zero_grad()
self.psgn_fc.zero_grad()
total_loss, _ = self.loss(input, init_pc, view_az, view_el, proj_gt, edge_gt)
total_loss.backward()
self.optimizer_conv.step()
self.optimizer_fc.step()
total_loss_np = total_loss.detach().item()
del total_loss
return total_loss_np | models/networks_psgn.py |
from collections import OrderedDict
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from models.psgn_fc import PSGN_CONV, PSGN_FC
from models.projection import Projector
from models.edge_detection import EdgeDetector
from losses.proj_losses import *
import utils.network_utils
class Pixel2Pointcloud_PSGN_FC(nn.Module):
def __init__(self, cfg, optimizer_conv=None, optimizer_fc=None, scheduler=None):
super().__init__()
self.cfg = cfg
# PSGN FC
self.psgn_conv = PSGN_CONV(self.cfg)
self.psgn_fc = PSGN_FC(self.cfg)
self.optimizer_conv = None if optimizer_conv is None else optimizer_conv(self.psgn_conv.parameters())
self.optimizer_fc = None if optimizer_fc is None else optimizer_fc(self.psgn_fc.parameters())
self.scheduler = None if scheduler or optimizer is None else scheduler(self.optimizer)
# 2D supervision part
self.projector = Projector(self.cfg)
# proj loss
self.proj_loss = ProjectLoss(self.cfg)
if torch.cuda.is_available():
self.psgn_conv = torch.nn.DataParallel(self.psgn_conv, device_ids=cfg.CONST.DEVICE).cuda()
self.psgn_fc = torch.nn.DataParallel(self.psgn_fc, device_ids=cfg.CONST.DEVICE).cuda()
self.projector = torch.nn.DataParallel(self.projector, device_ids=cfg.CONST.DEVICE).cuda()
self.proj_loss = torch.nn.DataParallel(self.proj_loss, device_ids=cfg.CONST.DEVICE).cuda()
self.cuda()
# edge detector
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
self.edge_detector = EdgeDetector(self.cfg)
self.edge_proj_loss = ProjectLoss(self.cfg)
if torch.cuda.is_available():
self.edge_detector = torch.nn.DataParallel(self.edge_detector, device_ids=cfg.CONST.DEVICE).cuda()
self.edge_proj_loss = torch.nn.DataParallel(self.edge_proj_loss, device_ids=cfg.CONST.DEVICE).cuda()
def forward(self, input):
conv_features = self.psgn_conv(input)
points = self.psgn_fc(conv_features)
return points
def loss(self, input, init_pc, view_az, view_el, proj_gt, edge_gt):
pred_pc = self(input)
grid_dist_np = grid_dist(grid_h=self.cfg.PROJECTION.GRID_H, grid_w=self.cfg.PROJECTION.GRID_W).astype(np.float32)
grid_dist_tensor = utils.network_utils.var_or_cuda(torch.from_numpy(grid_dist_np))
# Use 2D projection loss to train
proj_pred = {}
loss_bce = {}
fwd = {}
bwd = {}
loss_fwd = {}
loss_bwd = {}
loss = 0.
if not self.cfg.SUPERVISION_2D.USE_2D_LOSS:
loss_2d = torch.tensor(loss_2d)
# For edge loss
edge_proj_pred = {}
edge_loss_bce = {}
edge_fwd = {}
edge_bwd = {}
edge_loss_fwd = {}
edge_loss_bwd = {}
edge_loss = 0.
for idx in range(0, self.cfg.PROJECTION.NUM_VIEWS):
# Projection
proj_pred[idx] = self.projector(pred_pc, view_az[:,idx], view_el[:,idx])
# Projection loss
loss_bce[idx], fwd[idx], bwd[idx] = self.proj_loss(preds=proj_pred[idx], gts=proj_gt[:,idx], grid_dist_tensor=grid_dist_tensor)
loss_fwd[idx] = 1e-4 * torch.mean(fwd[idx])
loss_bwd[idx] = 1e-4 * torch.mean(bwd[idx])
# Loss = projection loss + edge projection loss
loss += self.cfg.PROJECTION.LAMDA_BCE * torch.mean(loss_bce[idx]) +\
self.cfg.PROJECTION.LAMDA_AFF_FWD * loss_fwd[idx] +\
self.cfg.PROJECTION.LAMDA_AFF_BWD * loss_bwd[idx]
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
# Edge prediction of projection
proj_pred[idx] = proj_pred[idx].unsqueeze(1) # (BS, 1, H, W)
edge_proj_pred[idx] = self.edge_detector(img=proj_pred[idx])
edge_proj_pred[idx] = edge_proj_pred[idx].squeeze(1) # (BS, H, W)
# Edge projection loss
edge_loss_bce[idx], edge_fwd[idx], edge_bwd[idx] = self.proj_loss(preds=edge_proj_pred[idx], gts=edge_gt[:,idx], grid_dist_tensor=grid_dist_tensor)
edge_loss_fwd[idx] = 1e-4 * torch.mean(edge_fwd[idx])
edge_loss_bwd[idx] = 1e-4 * torch.mean(edge_bwd[idx])
edge_loss += self.cfg.PROJECTION.LAMDA_BCE * torch.mean(edge_loss_bce[idx]) +\
self.cfg.PROJECTION.LAMDA_AFF_FWD * edge_loss_fwd[idx] +\
self.cfg.PROJECTION.LAMDA_AFF_BWD * edge_loss_bwd[idx]
if self.cfg.EDGE_LOSS.USE_EDGE_LOSS:
total_loss = ((loss + edge_loss*self.cfg.EDGE_LOSS.LAMDA_EDGE_LOSS) / self.cfg.PROJECTION.NUM_VIEWS)
else:
total_loss = loss / self.cfg.PROJECTION.NUM_VIEWS
return total_loss, pred_pc
def learn(self, input, init_pc, view_az, view_el, proj_gt, edge_gt):
self.train(True)
self.psgn_conv.zero_grad()
self.psgn_fc.zero_grad()
total_loss, _ = self.loss(input, init_pc, view_az, view_el, proj_gt, edge_gt)
total_loss.backward()
self.optimizer_conv.step()
self.optimizer_fc.step()
total_loss_np = total_loss.detach().item()
del total_loss
return total_loss_np | 0.938491 | 0.308985 |
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionDataTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.conn_classes = (None, DimensionDataMockHttp)
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
try:
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
except ValueError:
pass
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
self.assertTrue(
False) # Above command should have thrown an InvalidCredsException
except InvalidCredsError:
pass
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_node = ret[0]
self.assertEqual(first_node.id, 'NA3')
self.assertEqual(first_node.name, 'US - West')
self.assertEqual(first_node.country, 'US')
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 3)
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, 'default')
def test_reboot_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.reboot()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_destroy_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.destroy()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_create_node_response(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword('<PASSWORD>')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_no_network(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
try:
self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=None,
ex_isStarted=False)
except ValueError:
pass
def test_ex_shutdown_graceful(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_shutdown_graceful(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_start_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_start_node(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_power_off(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_power_off(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_reset(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, 'Aurora')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main()) | libcloud/test/compute/test_dimensiondata.py | import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionDataTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.conn_classes = (None, DimensionDataMockHttp)
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
try:
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
except ValueError:
pass
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
self.assertTrue(
False) # Above command should have thrown an InvalidCredsException
except InvalidCredsError:
pass
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_node = ret[0]
self.assertEqual(first_node.id, 'NA3')
self.assertEqual(first_node.name, 'US - West')
self.assertEqual(first_node.country, 'US')
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 3)
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, 'default')
def test_reboot_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.reboot()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_destroy_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.destroy()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_create_node_response(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword('<PASSWORD>')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_no_network(self):
rootPw = NodeAuthPassword('<PASSWORD>')
image = self.driver.list_images()[0]
try:
self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=None,
ex_isStarted=False)
except ValueError:
pass
def test_ex_shutdown_graceful(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_shutdown_graceful(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_start_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_start_node(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_power_off(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_power_off(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_reset(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, 'Aurora')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main()) | 0.380068 | 0.23803 |
import numpy as np
from numba import njit
# read binary raw image files for xviii camera
@njit
def load_xviii_bayer_from_binary(binary_data, image_height, image_width):
"""Read XVIII binary images into bayer array
Parameters
-----------
binary_data : numpy.ndarray
binary image data from XVIII
image_height : int
image height
image_width : int
image width
Returns
--------
numpy.ndarray
Bayer image
"""
img_h = image_height
img_w = image_width
bayer_img = np.zeros((img_h, img_w), dtype=np.uint32)
# read raw data and put them into bayer pattern.
count = 0
for i in range(0, img_h, 1):
for j in range(0, img_w, 4):
chunk = binary_data[count : count + 12] # noqa
bayer_img[i, j] = (
((chunk[3] & 0xFF) << 16)
| ((chunk[2] & 0xFF) << 8)
| (chunk[1] & 0xFF)
)
bayer_img[i, j + 1] = (
((chunk[0] & 0xFF) << 16)
| ((chunk[7] & 0xFF) << 8)
| (chunk[6] & 0xFF)
)
bayer_img[i, j + 2] = (
((chunk[5] & 0xFF) << 16)
| ((chunk[4] & 0xFF) << 8)
| (chunk[11] & 0xFF)
)
bayer_img[i, j + 3] = (
((chunk[10] & 0xFF) << 16)
| ((chunk[9] & 0xFF) << 8)
| (chunk[8] & 0xFF)
)
count += 12
bayer_img = bayer_img.astype(np.float32)
return bayer_img
def loader(raw_filename, image_width=1280, image_height=1024, src_bit=18):
"""XVIII image loader
Parameters
----------
raw_filename : Path
Image file path
image_width : int
Image height
image_height : int
Image height
Returns
-------
np.ndarray
Loaded image in matrix form (numpy)
"""
binary_data = np.fromfile(raw_filename, dtype=np.uint8)
bayer_img = load_xviii_bayer_from_binary(
binary_data[:], image_height, image_width
)
# Scale down from 18 bits to unitary to process with OpenCV debayer
bayer_img *= 2 ** (-src_bit)
return bayer_img | correct_images/loaders/xviii.py | import numpy as np
from numba import njit
# read binary raw image files for xviii camera
@njit
def load_xviii_bayer_from_binary(binary_data, image_height, image_width):
"""Read XVIII binary images into bayer array
Parameters
-----------
binary_data : numpy.ndarray
binary image data from XVIII
image_height : int
image height
image_width : int
image width
Returns
--------
numpy.ndarray
Bayer image
"""
img_h = image_height
img_w = image_width
bayer_img = np.zeros((img_h, img_w), dtype=np.uint32)
# read raw data and put them into bayer pattern.
count = 0
for i in range(0, img_h, 1):
for j in range(0, img_w, 4):
chunk = binary_data[count : count + 12] # noqa
bayer_img[i, j] = (
((chunk[3] & 0xFF) << 16)
| ((chunk[2] & 0xFF) << 8)
| (chunk[1] & 0xFF)
)
bayer_img[i, j + 1] = (
((chunk[0] & 0xFF) << 16)
| ((chunk[7] & 0xFF) << 8)
| (chunk[6] & 0xFF)
)
bayer_img[i, j + 2] = (
((chunk[5] & 0xFF) << 16)
| ((chunk[4] & 0xFF) << 8)
| (chunk[11] & 0xFF)
)
bayer_img[i, j + 3] = (
((chunk[10] & 0xFF) << 16)
| ((chunk[9] & 0xFF) << 8)
| (chunk[8] & 0xFF)
)
count += 12
bayer_img = bayer_img.astype(np.float32)
return bayer_img
def loader(raw_filename, image_width=1280, image_height=1024, src_bit=18):
"""XVIII image loader
Parameters
----------
raw_filename : Path
Image file path
image_width : int
Image height
image_height : int
Image height
Returns
-------
np.ndarray
Loaded image in matrix form (numpy)
"""
binary_data = np.fromfile(raw_filename, dtype=np.uint8)
bayer_img = load_xviii_bayer_from_binary(
binary_data[:], image_height, image_width
)
# Scale down from 18 bits to unitary to process with OpenCV debayer
bayer_img *= 2 ** (-src_bit)
return bayer_img | 0.805096 | 0.574335 |
class tree_node :
def __init__(self ,value=None) :
self.value = value
self.left = None
self.right = None
def __str__(self) :
return str(self.value)
class Binary_tree (tree_node):
def __init__(self,root=None) :
self.root=root
# code challeng 15 part 1
def pre_order(self):
first_list= []
if self.root is None :
raise Exception("Empty Tree")
def throgh_in(root) :
first_list.append(root.value)
if root.left :
throgh_in(root.left )
if root.right :
throgh_in(root.right)
throgh_in(self.root)
return (first_list )
def in_order(self):
in_list=[]
if self.root is None :
raise Exception("empty tree")
def throgh_in(root):
if root.left:
throgh_in(root.left)
in_list.append (root.value)
if root.right:
throgh_in(root.right)
throgh_in (self.root)
return (in_list )
def post_order(self, root):
thr = []
if root:
thr = thr + self.post_order(root.left)
thr = thr + self.post_order(root.right)
thr.append(root.value)
return thr
#code challeng 16
def Tree_max(self):
max =0
if not self.root.value :
return "you have an empty list "
else:
list = self.in_order()
for i in list :
if i > max:
max= i
return max
#code challenge 17
def breadth_first(tree):
if tree.root is None :
return "empty"
else :
node_list=[]
tree_list = []
tree_list+=[tree.root]
while tree_list :
our_Node = tree_list[0]
if our_Node.left:
tree_list+=[currentNode.left]
if our_Node.right:
tree_list+=[our_Node.right]
node_list += [ tree_list.pop(0).value]
return node_list
#code challeng 18
def fizz_buzz_tree(kary):
my_fbuz = BinaryTree()
def throgh_in(node):
if node != None:
if node.value % 3 == 0 and node.value % 5 == 0:
my_fbuz_node = Node("FizzBuzz")
elif node.value % 3 == 0:
my_fbuz_node = Node("Fizz")
elif node.value % 5 == 0:
my_fbuz_node = Node("Buzz")
else:
my_fbuz_node = Node(str(node.value))
if node.left:
my_fbuz_node.left = throgh_in(node.left)
if node.right:
my_fbuz_node.right = throgh_in(node.right)
return my_fbuz_node
my_fbuz.root = throgh_in(kary.root)
return my_fbuz
#code challeng 15 part 2
class Binary_Search (Binary_tree) :
def add(self,value) :
if self.root:
def throgh_in(root):
if value < root.value:
if root.left is None :
root.left=tree_node(value)
return
else:
throgh_in(root.left)
elif value > root.value:
if root.right is None :
root.right=tree_node(value)
return
else :
throgh_in(root.right)
return throgh_in(self.root)
else:
self.root=tree_node(value)
def complete(self,value):
if not self.root:
return False
def throgh_in(root):
if root:
if root.value == value:
return True
if throgh_in(root.left):
return True
if throgh_in(root.right):
return True
return False
return throgh_in(self.root)
root = tree_node(2)
root.left = tree_node(7)
root.right = tree_node(5)
root.left.left = tree_node(2)
root.left.right = tree_node(6)
root.right.right = tree_node(9)
root.left.right.left = tree_node(5)
root.left.right.right = tree_node(11)
root.right.right.left = tree_node(4)
breadthfirst(root)
my_fbuz = fizz_buzz_tree(kary)
print(my_fbuz.pre_order()) | python/code_challenges/all_tree_challenge.py/tree.py | class tree_node :
def __init__(self ,value=None) :
self.value = value
self.left = None
self.right = None
def __str__(self) :
return str(self.value)
class Binary_tree (tree_node):
def __init__(self,root=None) :
self.root=root
# code challeng 15 part 1
def pre_order(self):
first_list= []
if self.root is None :
raise Exception("Empty Tree")
def throgh_in(root) :
first_list.append(root.value)
if root.left :
throgh_in(root.left )
if root.right :
throgh_in(root.right)
throgh_in(self.root)
return (first_list )
def in_order(self):
in_list=[]
if self.root is None :
raise Exception("empty tree")
def throgh_in(root):
if root.left:
throgh_in(root.left)
in_list.append (root.value)
if root.right:
throgh_in(root.right)
throgh_in (self.root)
return (in_list )
def post_order(self, root):
thr = []
if root:
thr = thr + self.post_order(root.left)
thr = thr + self.post_order(root.right)
thr.append(root.value)
return thr
#code challeng 16
def Tree_max(self):
max =0
if not self.root.value :
return "you have an empty list "
else:
list = self.in_order()
for i in list :
if i > max:
max= i
return max
#code challenge 17
def breadth_first(tree):
if tree.root is None :
return "empty"
else :
node_list=[]
tree_list = []
tree_list+=[tree.root]
while tree_list :
our_Node = tree_list[0]
if our_Node.left:
tree_list+=[currentNode.left]
if our_Node.right:
tree_list+=[our_Node.right]
node_list += [ tree_list.pop(0).value]
return node_list
#code challeng 18
def fizz_buzz_tree(kary):
my_fbuz = BinaryTree()
def throgh_in(node):
if node != None:
if node.value % 3 == 0 and node.value % 5 == 0:
my_fbuz_node = Node("FizzBuzz")
elif node.value % 3 == 0:
my_fbuz_node = Node("Fizz")
elif node.value % 5 == 0:
my_fbuz_node = Node("Buzz")
else:
my_fbuz_node = Node(str(node.value))
if node.left:
my_fbuz_node.left = throgh_in(node.left)
if node.right:
my_fbuz_node.right = throgh_in(node.right)
return my_fbuz_node
my_fbuz.root = throgh_in(kary.root)
return my_fbuz
#code challeng 15 part 2
class Binary_Search (Binary_tree) :
def add(self,value) :
if self.root:
def throgh_in(root):
if value < root.value:
if root.left is None :
root.left=tree_node(value)
return
else:
throgh_in(root.left)
elif value > root.value:
if root.right is None :
root.right=tree_node(value)
return
else :
throgh_in(root.right)
return throgh_in(self.root)
else:
self.root=tree_node(value)
def complete(self,value):
if not self.root:
return False
def throgh_in(root):
if root:
if root.value == value:
return True
if throgh_in(root.left):
return True
if throgh_in(root.right):
return True
return False
return throgh_in(self.root)
root = tree_node(2)
root.left = tree_node(7)
root.right = tree_node(5)
root.left.left = tree_node(2)
root.left.right = tree_node(6)
root.right.right = tree_node(9)
root.left.right.left = tree_node(5)
root.left.right.right = tree_node(11)
root.right.right.left = tree_node(4)
breadthfirst(root)
my_fbuz = fizz_buzz_tree(kary)
print(my_fbuz.pre_order()) | 0.510985 | 0.318227 |
import time
import math
import json
import spidev
class PT100(object):
def __init__(self, number, average_number=5):
self.__average_number = average_number
self.spi = spidev.SpiDev()
self.spi.open(0, number)
self.spi.max_speed_hz = 100000
self.spi.mode = 1
"""
Configuration bits:
Vbias 1=on
Conversion mode 1=auto,0=normally off
1-shot 1=1-shot (auto-clear)
3-wire 1=3-wire,0=2/4 wire
Fault detection
Fault detection
Fault Status 1=clear
Filter 1=50Hz,2=60Hz
"""
config = 0b11110011
# Write Config
self.spi.xfer([0x80, config])
self.spi.xfer([0x05, 0x00])
self.spi.xfer([0x06, 0x00])
self.spi.xfer([0x03, 0xff])
self.spi.xfer([0x04, 0xff])
with open('max31865_table.json', 'r') as table:
self.table = json.loads(table.read())
# The first read after start is 0 0
self.read()
def _interpolation(self, rtdRaw):
for index, item in enumerate(self.table):
if rtdRaw <= int(item['code_dec']):
break
a1 = self.table[index - 1]
a1_code_dec = float(a1['code_dec'])
a1_temperature = float(a1['temperature'])
a2 = self.table[index]
a2_code_dec = float(a2['code_dec'])
a2_temperature = float(a2['temperature'])
return ((rtdRaw - a1_code_dec) / (a2_code_dec - a1_code_dec) * (a2_temperature - a1_temperature)) + a1_temperature
def _RawToTemp(self, msb_rtd, lsb_rtd):
a = 3.90830e-3
b = -5.77500e-7
rtdR = 400
rtd0 = 100
rtdRaw = ((msb_rtd << 7) + ((lsb_rtd & 0xFE) >> 1))
return self._interpolation(rtdRaw)
def read(self):
# Read RTD multiple times and average these
temp = 0
for index in range(0, self.__average_number):
MSB = self.spi.xfer([0x01, 0x00])[1]
LSB = self.spi.xfer([0x02, 0x00])[1]
temp = temp + self._RawToTemp(MSB, LSB)
return (temp / self.__average_number) | src/hardware/pt100.py | import time
import math
import json
import spidev
class PT100(object):
def __init__(self, number, average_number=5):
self.__average_number = average_number
self.spi = spidev.SpiDev()
self.spi.open(0, number)
self.spi.max_speed_hz = 100000
self.spi.mode = 1
"""
Configuration bits:
Vbias 1=on
Conversion mode 1=auto,0=normally off
1-shot 1=1-shot (auto-clear)
3-wire 1=3-wire,0=2/4 wire
Fault detection
Fault detection
Fault Status 1=clear
Filter 1=50Hz,2=60Hz
"""
config = 0b11110011
# Write Config
self.spi.xfer([0x80, config])
self.spi.xfer([0x05, 0x00])
self.spi.xfer([0x06, 0x00])
self.spi.xfer([0x03, 0xff])
self.spi.xfer([0x04, 0xff])
with open('max31865_table.json', 'r') as table:
self.table = json.loads(table.read())
# The first read after start is 0 0
self.read()
def _interpolation(self, rtdRaw):
for index, item in enumerate(self.table):
if rtdRaw <= int(item['code_dec']):
break
a1 = self.table[index - 1]
a1_code_dec = float(a1['code_dec'])
a1_temperature = float(a1['temperature'])
a2 = self.table[index]
a2_code_dec = float(a2['code_dec'])
a2_temperature = float(a2['temperature'])
return ((rtdRaw - a1_code_dec) / (a2_code_dec - a1_code_dec) * (a2_temperature - a1_temperature)) + a1_temperature
def _RawToTemp(self, msb_rtd, lsb_rtd):
a = 3.90830e-3
b = -5.77500e-7
rtdR = 400
rtd0 = 100
rtdRaw = ((msb_rtd << 7) + ((lsb_rtd & 0xFE) >> 1))
return self._interpolation(rtdRaw)
def read(self):
# Read RTD multiple times and average these
temp = 0
for index in range(0, self.__average_number):
MSB = self.spi.xfer([0x01, 0x00])[1]
LSB = self.spi.xfer([0x02, 0x00])[1]
temp = temp + self._RawToTemp(MSB, LSB)
return (temp / self.__average_number) | 0.493409 | 0.198258 |
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__)) # NOQA
sys.path.append(dir_path) # NOQA
import importlib
from flask import Flask, jsonify
from flask_cors import CORS
from flake8.api import legacy as flake8
from logging_config import logger
app = Flask(__name__)
CORS(app)
TASK_NUM = 8
@app.route('/hi', methods=['GET'])
def hi():
return jsonify(
{"message": "Hi! This is the server for Introduction to Computer."})
def grade():
'''
Get test results of all students in src/students/
'''
# Get path of this file
# Save results to a dict
results = {}
student_ids = os.listdir(os.path.join(dir_path, 'students'))
student_ids = [x[:-3] for x in student_ids if x[-3:] == '.py']
for student_id in student_ids:
student_result = {}
student_module = None
try:
student_module = importlib.import_module(f'src.students.{student_id}') # NOQA
except Exception as err:
logger.info(err, exc_info=True)
student_result['import'] = "Failed"
else:
student_result['import'] = "Success"
# Check each task
for task_id in range(1, TASK_NUM + 1):
logger.info(f"Testing {student_id} Task {task_id}")
try:
eval(f"student_module.task_{task_id}()")
except Exception as err:
logger.error(err, exc_info=True)
student_result[f"task_{task_id}"] = "WA"
else:
student_result[f"task_{task_id}"] = "AC"
# Check flake8
style_guide = flake8.get_style_guide()
student_file = os.path.join(dir_path, 'students', student_id + '.py')
report = style_guide.check_files(
[student_file]
)
if (report.get_statistics('E') == [] and
report.get_statistics('W') == []):
logger.info(report.get_statistics('E'))
logger.info(report.get_statistics('W'))
student_result['flake8'] = "Pass"
else:
student_result['flake8'] = "Fail"
results[student_id] = student_result
return {
"results": results,
"task_num": TASK_NUM,
"student_num": len(student_ids)
}
@app.route('/get_results', methods=['GET'])
def get_results():
return jsonify(results)
results = grade()
if __name__ == "__main__":
app.run() | src/server.py | import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__)) # NOQA
sys.path.append(dir_path) # NOQA
import importlib
from flask import Flask, jsonify
from flask_cors import CORS
from flake8.api import legacy as flake8
from logging_config import logger
app = Flask(__name__)
CORS(app)
TASK_NUM = 8
@app.route('/hi', methods=['GET'])
def hi():
return jsonify(
{"message": "Hi! This is the server for Introduction to Computer."})
def grade():
'''
Get test results of all students in src/students/
'''
# Get path of this file
# Save results to a dict
results = {}
student_ids = os.listdir(os.path.join(dir_path, 'students'))
student_ids = [x[:-3] for x in student_ids if x[-3:] == '.py']
for student_id in student_ids:
student_result = {}
student_module = None
try:
student_module = importlib.import_module(f'src.students.{student_id}') # NOQA
except Exception as err:
logger.info(err, exc_info=True)
student_result['import'] = "Failed"
else:
student_result['import'] = "Success"
# Check each task
for task_id in range(1, TASK_NUM + 1):
logger.info(f"Testing {student_id} Task {task_id}")
try:
eval(f"student_module.task_{task_id}()")
except Exception as err:
logger.error(err, exc_info=True)
student_result[f"task_{task_id}"] = "WA"
else:
student_result[f"task_{task_id}"] = "AC"
# Check flake8
style_guide = flake8.get_style_guide()
student_file = os.path.join(dir_path, 'students', student_id + '.py')
report = style_guide.check_files(
[student_file]
)
if (report.get_statistics('E') == [] and
report.get_statistics('W') == []):
logger.info(report.get_statistics('E'))
logger.info(report.get_statistics('W'))
student_result['flake8'] = "Pass"
else:
student_result['flake8'] = "Fail"
results[student_id] = student_result
return {
"results": results,
"task_num": TASK_NUM,
"student_num": len(student_ids)
}
@app.route('/get_results', methods=['GET'])
def get_results():
return jsonify(results)
results = grade()
if __name__ == "__main__":
app.run() | 0.252016 | 0.096493 |
import numpy as np
import os, sys
import copy
import torch
import torch.utils.data
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from common.geometry import Camera
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from mesh_dataset import MeshLoader
class LoaderSingle(torch.utils.data.Dataset):
def __init__(self, synthetic_data_dir, mesh_data_dir, experiment_directory, split_file, dataset_name='syn_shapenet', flag_multi=False, load_mesh=False, load_code=False):
self.dataset_name = dataset_name
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
if self.dataset_name == 'syn_shapenet':
from syn_dataset import SynShapenetLoader
self.image_loader = SynShapenetLoader(synthetic_data_dir, split_file=split_file)
else:
raise ValueError('dataset name {0} not defined'.format(self.dataset_name))
self.mesh_loader = MeshLoader(mesh_data_dir, experiment_directory, split_file, load_mesh=load_mesh, load_code=load_code)
self.load_mesh = load_mesh
self.load_code = load_code
# special matrix to transform the point cloud
self.transform_matrix = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
def __len__(self):
return len(self.image_loader)
def get_data_from_md5(self, shape_md5, idx):
image_data = self.image_loader.get_data_from_md5(shape_md5, idx)
mesh_data = self.mesh_loader.get_data_from_md5(shape_md5)
return mesh_data, image_data
def transform_camera(self, camera, norm_param):
'''
transform the extrinsic parameters to align with the sdf volume.
'''
K = camera.intrinsic
R, T = camera.extrinsic[:,:3], camera.extrinsic[:,3]
offset, scale = norm_param['offset'], norm_param['scale']
T_new = (np.dot(np.dot(R, self.transform_matrix), -offset) + T) * scale
RT_new = np.concatenate([R, T_new[:,None]], 1)
camera_transformed = Camera(K, RT_new)
return camera_transformed
def transform_depth(self, depth, norm_param):
depth_new = copy.deepcopy(depth)
mask = depth < 1e5
depth_new[mask] = depth[mask] * norm_param['scale']
return depth_new
def __getitem__(self, idx):
image_data = self.image_loader[idx]
instance_name = self.image_loader.get_instance_name(idx)
mesh_data = self.mesh_loader.get_data_from_md5(instance_name)
img, depth, normal, camera = image_data
if self.load_code:
if self.load_mesh:
mesh_recon, latent_code, gt_samples, norm_param = mesh_data
else:
latent_code, gt_samples, norm_param = mesh_data
else:
gt_samples, norm_param = mesh_data
# transform camera
camera_transformed = self.transform_camera(camera, norm_param)
depth_transformed = self.transform_depth(depth, norm_param)
return instance_name, image_data, mesh_data, camera_transformed, depth_transformed | core/dataset/loader_single.py | import numpy as np
import os, sys
import copy
import torch
import torch.utils.data
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from common.geometry import Camera
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from mesh_dataset import MeshLoader
class LoaderSingle(torch.utils.data.Dataset):
def __init__(self, synthetic_data_dir, mesh_data_dir, experiment_directory, split_file, dataset_name='syn_shapenet', flag_multi=False, load_mesh=False, load_code=False):
self.dataset_name = dataset_name
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
if self.dataset_name == 'syn_shapenet':
from syn_dataset import SynShapenetLoader
self.image_loader = SynShapenetLoader(synthetic_data_dir, split_file=split_file)
else:
raise ValueError('dataset name {0} not defined'.format(self.dataset_name))
self.mesh_loader = MeshLoader(mesh_data_dir, experiment_directory, split_file, load_mesh=load_mesh, load_code=load_code)
self.load_mesh = load_mesh
self.load_code = load_code
# special matrix to transform the point cloud
self.transform_matrix = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
def __len__(self):
return len(self.image_loader)
def get_data_from_md5(self, shape_md5, idx):
image_data = self.image_loader.get_data_from_md5(shape_md5, idx)
mesh_data = self.mesh_loader.get_data_from_md5(shape_md5)
return mesh_data, image_data
def transform_camera(self, camera, norm_param):
'''
transform the extrinsic parameters to align with the sdf volume.
'''
K = camera.intrinsic
R, T = camera.extrinsic[:,:3], camera.extrinsic[:,3]
offset, scale = norm_param['offset'], norm_param['scale']
T_new = (np.dot(np.dot(R, self.transform_matrix), -offset) + T) * scale
RT_new = np.concatenate([R, T_new[:,None]], 1)
camera_transformed = Camera(K, RT_new)
return camera_transformed
def transform_depth(self, depth, norm_param):
depth_new = copy.deepcopy(depth)
mask = depth < 1e5
depth_new[mask] = depth[mask] * norm_param['scale']
return depth_new
def __getitem__(self, idx):
image_data = self.image_loader[idx]
instance_name = self.image_loader.get_instance_name(idx)
mesh_data = self.mesh_loader.get_data_from_md5(instance_name)
img, depth, normal, camera = image_data
if self.load_code:
if self.load_mesh:
mesh_recon, latent_code, gt_samples, norm_param = mesh_data
else:
latent_code, gt_samples, norm_param = mesh_data
else:
gt_samples, norm_param = mesh_data
# transform camera
camera_transformed = self.transform_camera(camera, norm_param)
depth_transformed = self.transform_depth(depth, norm_param)
return instance_name, image_data, mesh_data, camera_transformed, depth_transformed | 0.524638 | 0.313893 |
import struct
import json
# Table 5.3 Some of the type values for DOS partitions.
# More partition values can be found here:
# http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
DOS_PARTITIONS = {
0x00: "Empty",
0x01: "FAT12, CHS",
0x04: "FAT16, 16-32 MB, CHS",
0x05: "Microsoft Extended, CHS",
0x06: "FAT16, 32 MB-2GB, CHS",
0x07: "NTFS",
0x0b: "FAT32, CHS",
0x0c: "FAT32, LBA",
0x0e: "FAT16, 32 MB-2GB, LBA",
0x0f: "Microsoft Extended, LBA",
0x11: "Hidden Fat12, CHS",
0x14: "Hidden FAT16, 16-32 MB, CHS",
0x16: "Hidden FAT16, 32 MB-2GB, CHS",
0x1b: "Hidden FAT32, CHS",
0x1c: "Hidden FAT32, LBA",
0x1e: "Hidden FAT16, 32 MB-2GB, LBA",
0x42: "Microsoft MBR, Dynamic Disk",
0x82: "Solaris x86 -or- Linux Swap",
0x83: "Linux",
0x84: "Hibernation",
0x85: "Linux Extended",
0x86: "NTFS Volume Set",
0x87: "NTFS Volume SET",
0xa0: "Hibernation",
0xa1: "Hibernation",
0xa5: "FreeBSD",
0xa6: "OpenBSD",
0xa8: "Mac OSX",
0xa9: "NetBSD",
0xab: "Mac OSX Boot",
0xb7: "BSDI",
0xb8: "BSDI swap",
# FIXME: I'm pretty sure 0xdb is a recovery partition
0xdb: "Recovery Partition",
0xde: "Dell Diagnostic Partition",
0xee: "EFI GPT Disk",
0xef: "EFI System Partition",
0xfb: "Vmware File System",
0xfc: "Vmware swap",
# FIXME Add flag for VirtualBox Partitions
}
# FIXME find way to determine sector size
SECTOR_SIZE = 512
class Partition(object):
"""
Object for storing Partition Data
"""
def __init__(self, data, parent=None):
"""
To get the correct lba value for extended partitions, we need to add
the lba value from the extended partition. For example, if you read the
first 4 partitions and the fourth is an extended partition with an lba
of 1000, we seek to the 1000th sector. Then we read the next mbr,
adding the 1000 from the extended partition to each lba.
"""
self.parent = parent
self.bootable_flag = struct.unpack("<B", data[0])[0]
self.start_chs_address = struct.unpack("<BH", data[1:4])[0]
self.partition_type = struct.unpack("<B", data[4])[0]
self.end_chs_address = struct.unpack("<BH", data[5:8])[0]
# FIXME Check to see how the lba address bytes are used
if self.get_type() == 'Empty':
self.lba = 0
else:
self.lba = struct.unpack("<L", data[8:12])[0]
self.size = struct.unpack("<L", data[12:16])[0]
def get_type(self):
"""
Returns the text value of the partition type
"""
return DOS_PARTITIONS[self.partition_type]
def __repr__(self):
return self.get_type()
def is_bootable(self):
"""
Returns True if this partition is bootable
"""
return self.bootable_flag == 0x80
def is_extended(self):
"""
Returns True if the partition is an extended partition
"""
return 'Extended' in self.get_type()
class Mbr(object):
"""
Parses the Master Boot Record
"""
def __init__(self, data, parent=None):
self.boot_code = struct.unpack("<446B", data[0:446])
self.partitions = []
self.partitions.append(Partition(data[446:462], parent))
self.partitions.append(Partition(data[462:478], parent))
self.partitions.append(Partition(data[478:494], parent))
self.partitions.append(Partition(data[494:510], parent))
self.signature = struct.unpack("<H", data[510:])[0]
@property
def extended_partitions(self):
return [i for i in self.partitions if 'Extended' in i.get_type()]
def validate_signature(self):
"""
Returns True if signature = 0xAA55 (a valid MBR signature)
"""
return self.signature == 0xAA55
def add_partitions(self, disk):
"""
Adds partitions from extended partitions to the MBR class
"""
for partition in self.partitions:
if 'Extended' in partition.get_type():
with open(disk, 'rb') as hd:
hd.seek(partition.read_start)
new_mbr = Mbr(hd.read(512), lba_offset=partition.lba)
self.partitions.extend(new_mbr.partitions)
new_mbr.add_partitions(disk)
def json(self):
mbr_dict = {'Signature': self.signature}
mbr_dict['Partitions'] = []
for number, partition in enumerate(self.partitions):
part_name = "Partition%s" % (number + 1)
mbr_dict['Partitions'].append(
{part_name: {'Type': partition.get_type(),
'Bootable': partition.is_bootable(),
'CHS start': partition.start_chs_address,
'CHS end': partition.end_chs_address,
'Logical block address': partition.lba,
'Size': partition.size,}})
return json.dumps(['Master Boot Record', mbr_dict], indent=4)
"""
ABOUT EXTENDED PARTITIONS
The starting address for a secondary File System partition is relative to the
current partition table.
The starting address for a secondary extended partition entry is relative to
the primary extended partition.
"""
def get_extended_tables(primary_lba, extended_lba, disk):
disk.seek(0)
disk.seek((primary_lba + extended_lba) * SECTOR_SIZE)
mbr = Mbr(disk.read(512))
yield mbr
for partition in mbr.partitions:
if partition.is_extended():
for mbr in get_extended_tables(primary_lba, partition.lba, disk):
yield mbr
def get_partition_tables(open_disk):
with open(open_disk, 'rb') as disk:
mbr = Mbr(disk.read(512))
yield mbr
disk.seek(0)
for partition in mbr.partitions:
if partition.is_extended():
primary_lba = partition.lba
mbrs = get_extended_tables(primary_lba, 0, disk)
for mbr in mbrs:
yield mbr
if __name__=='__main__':
import sys
args = sys.argv
partition_tables = get_partition_tables(args[1])
for pt in partition_tables:
for partition in pt.partitions:
print partition | mbr.py | import struct
import json
# Table 5.3 Some of the type values for DOS partitions.
# More partition values can be found here:
# http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
DOS_PARTITIONS = {
0x00: "Empty",
0x01: "FAT12, CHS",
0x04: "FAT16, 16-32 MB, CHS",
0x05: "Microsoft Extended, CHS",
0x06: "FAT16, 32 MB-2GB, CHS",
0x07: "NTFS",
0x0b: "FAT32, CHS",
0x0c: "FAT32, LBA",
0x0e: "FAT16, 32 MB-2GB, LBA",
0x0f: "Microsoft Extended, LBA",
0x11: "Hidden Fat12, CHS",
0x14: "Hidden FAT16, 16-32 MB, CHS",
0x16: "Hidden FAT16, 32 MB-2GB, CHS",
0x1b: "Hidden FAT32, CHS",
0x1c: "Hidden FAT32, LBA",
0x1e: "Hidden FAT16, 32 MB-2GB, LBA",
0x42: "Microsoft MBR, Dynamic Disk",
0x82: "Solaris x86 -or- Linux Swap",
0x83: "Linux",
0x84: "Hibernation",
0x85: "Linux Extended",
0x86: "NTFS Volume Set",
0x87: "NTFS Volume SET",
0xa0: "Hibernation",
0xa1: "Hibernation",
0xa5: "FreeBSD",
0xa6: "OpenBSD",
0xa8: "Mac OSX",
0xa9: "NetBSD",
0xab: "Mac OSX Boot",
0xb7: "BSDI",
0xb8: "BSDI swap",
# FIXME: I'm pretty sure 0xdb is a recovery partition
0xdb: "Recovery Partition",
0xde: "Dell Diagnostic Partition",
0xee: "EFI GPT Disk",
0xef: "EFI System Partition",
0xfb: "Vmware File System",
0xfc: "Vmware swap",
# FIXME Add flag for VirtualBox Partitions
}
# FIXME find way to determine sector size
SECTOR_SIZE = 512
class Partition(object):
"""
Object for storing Partition Data
"""
def __init__(self, data, parent=None):
"""
To get the correct lba value for extended partitions, we need to add
the lba value from the extended partition. For example, if you read the
first 4 partitions and the fourth is an extended partition with an lba
of 1000, we seek to the 1000th sector. Then we read the next mbr,
adding the 1000 from the extended partition to each lba.
"""
self.parent = parent
self.bootable_flag = struct.unpack("<B", data[0])[0]
self.start_chs_address = struct.unpack("<BH", data[1:4])[0]
self.partition_type = struct.unpack("<B", data[4])[0]
self.end_chs_address = struct.unpack("<BH", data[5:8])[0]
# FIXME Check to see how the lba address bytes are used
if self.get_type() == 'Empty':
self.lba = 0
else:
self.lba = struct.unpack("<L", data[8:12])[0]
self.size = struct.unpack("<L", data[12:16])[0]
def get_type(self):
"""
Returns the text value of the partition type
"""
return DOS_PARTITIONS[self.partition_type]
def __repr__(self):
return self.get_type()
def is_bootable(self):
"""
Returns True if this partition is bootable
"""
return self.bootable_flag == 0x80
def is_extended(self):
"""
Returns True if the partition is an extended partition
"""
return 'Extended' in self.get_type()
class Mbr(object):
"""
Parses the Master Boot Record
"""
def __init__(self, data, parent=None):
self.boot_code = struct.unpack("<446B", data[0:446])
self.partitions = []
self.partitions.append(Partition(data[446:462], parent))
self.partitions.append(Partition(data[462:478], parent))
self.partitions.append(Partition(data[478:494], parent))
self.partitions.append(Partition(data[494:510], parent))
self.signature = struct.unpack("<H", data[510:])[0]
@property
def extended_partitions(self):
return [i for i in self.partitions if 'Extended' in i.get_type()]
def validate_signature(self):
"""
Returns True if signature = 0xAA55 (a valid MBR signature)
"""
return self.signature == 0xAA55
def add_partitions(self, disk):
"""
Adds partitions from extended partitions to the MBR class
"""
for partition in self.partitions:
if 'Extended' in partition.get_type():
with open(disk, 'rb') as hd:
hd.seek(partition.read_start)
new_mbr = Mbr(hd.read(512), lba_offset=partition.lba)
self.partitions.extend(new_mbr.partitions)
new_mbr.add_partitions(disk)
def json(self):
mbr_dict = {'Signature': self.signature}
mbr_dict['Partitions'] = []
for number, partition in enumerate(self.partitions):
part_name = "Partition%s" % (number + 1)
mbr_dict['Partitions'].append(
{part_name: {'Type': partition.get_type(),
'Bootable': partition.is_bootable(),
'CHS start': partition.start_chs_address,
'CHS end': partition.end_chs_address,
'Logical block address': partition.lba,
'Size': partition.size,}})
return json.dumps(['Master Boot Record', mbr_dict], indent=4)
"""
ABOUT EXTENDED PARTITIONS
The starting address for a secondary File System partition is relative to the
current partition table.
The starting address for a secondary extended partition entry is relative to
the primary extended partition.
"""
def get_extended_tables(primary_lba, extended_lba, disk):
disk.seek(0)
disk.seek((primary_lba + extended_lba) * SECTOR_SIZE)
mbr = Mbr(disk.read(512))
yield mbr
for partition in mbr.partitions:
if partition.is_extended():
for mbr in get_extended_tables(primary_lba, partition.lba, disk):
yield mbr
def get_partition_tables(open_disk):
with open(open_disk, 'rb') as disk:
mbr = Mbr(disk.read(512))
yield mbr
disk.seek(0)
for partition in mbr.partitions:
if partition.is_extended():
primary_lba = partition.lba
mbrs = get_extended_tables(primary_lba, 0, disk)
for mbr in mbrs:
yield mbr
if __name__=='__main__':
import sys
args = sys.argv
partition_tables = get_partition_tables(args[1])
for pt in partition_tables:
for partition in pt.partitions:
print partition | 0.335677 | 0.365853 |
import GPy
import numpy as np
from GPy.kern.src.rbf import RBF
from paramz.caching import Cache_this
from scipy.special import logsumexp
import timeit
import os
class TauRBF(RBF):
def __init__(self, input_dim, features, demonstrations, art_demonstrations, discount, lengthscale=None):
# print("Using custom kernel")
self.input_dim = input_dim
self.features = features
self.demos = demonstrations
self.art_demos = art_demonstrations
self.discount = discount
self.temperature = 1
super(TauRBF, self).__init__(input_dim, lengthscale=lengthscale)
def get_reward(self, X):
X = np.append(X, -20. * np.zeros((len(X), 1)), axis=1)
R = np.dot(X, np.transpose(self.features))
return R
def _get_proxy(self, X, X2=None):
n_art_trajectories, n_trajectories, l_trajectory, _ = np.shape(self.art_demos)
discounts = [self.discount ** t for t in range(l_trajectory)]
discounts_x = np.repeat(np.expand_dims(discounts, axis=0), len(X), 0)
discounts_art_x = np.repeat(
np.expand_dims(np.repeat(np.expand_dims(discounts, axis=0), n_art_trajectories, 0), axis=0), len(X), 0)
Rx = self.get_reward(X)
if X2 is not None:
discounts_x2 = np.repeat(np.expand_dims(discounts, axis=0), len(X2), 0)
discounts_art_x2 = np.repeat(
np.expand_dims(np.repeat(np.expand_dims(discounts, axis=0), n_art_trajectories, 0), axis=0), len(X2), 0)
Rx2 = self.get_reward(X2)
proxy_x = []
proxy_x2 = []
demo_states = self.demos[:, :, 0].astype(int)
art_demo_states = self.art_demos[:, :, :, 0].astype(int)
for t in range(n_trajectories):
Rtau_x = Rx[:, demo_states[t, :]]
Rarttau_x = Rx[:, art_demo_states[:, t, :]]
discounted_Rtau_x = np.multiply(Rtau_x, discounts_x)
discounted_Rtau_x = self.temperature*np.expand_dims(np.sum(discounted_Rtau_x, axis=1), axis=1)
discounted_Rarttau_x = np.multiply(Rarttau_x, discounts_art_x)
discounted_Rarttau_x = np.sum(discounted_Rarttau_x, axis=2)
denominator = self.temperature*np.append(discounted_Rtau_x, discounted_Rarttau_x, axis=1)
#deb_dir = "deb_projections"
#os.makedirs(deb_dir,exist_ok=True)
#np.savetxt(os.path.join(deb_dir,"disc_rart_%d.txt"%t), denominator, fmt="%3.5f")
tosub = 0#np.max(denominator, axis=1, keepdims=True)
den = logsumexp(denominator - tosub, axis=1)
current_proxy_x = np.squeeze(discounted_Rtau_x - tosub) - den
####UNCOMMENT THIS IF YOU WANNA USE THE EXP OF REWARDS#########
current_proxy_x = np.exp(current_proxy_x)
#temp_denominator1 = np.exp(denominator)
#temp_denominator2 = np.sum(temp_denominator1, axis=1)
#temp_numerator1 = np.exp(discounted_Rtau_x)
#temp_proxy = np.divide(temp_numerator1.squeeze(), temp_denominator2)
# current_proxy_x = np.exp(
# np.sum(np.multiply(Rtau_x, discounts_x) - np.multiply(Rtau_x, discounts_x), axis=1))
proxy_x.append(current_proxy_x)
if X2 is not None:
Rtau_x2 = Rx2[:, demo_states[t, :]]
Rarttau_x2 = Rx2[:, art_demo_states[:, t, :]]
discounted_Rtau_x2 = np.multiply(Rtau_x2, discounts_x2)
discounted_Rtau_x2 = np.expand_dims(np.sum(discounted_Rtau_x2, axis=1), axis=1)
discounted_Rarttau_x2 = np.multiply(Rarttau_x2, discounts_art_x2)
discounted_Rarttau_x2 = np.sum(discounted_Rarttau_x2, axis=2)
denominator2 = np.append(discounted_Rtau_x2, discounted_Rarttau_x2, axis=1)
tosub2 = 0#np.max(denominator2, axis=1, keepdims=True)
den2 = logsumexp(denominator2 - tosub2, axis=1)
current_proxy_x2 = np.squeeze(discounted_Rtau_x2 - tosub2) - den2
current_proxy_x2 = np.exp(current_proxy_x2)
# current_proxy_x2 = np.exp(
# np.sum(np.multiply(Rtau_x2, discounts_x2) - np.multiply(Rsumtau_x2, discounts_x2), axis=1))
proxy_x2.append(current_proxy_x2)
proxy_x = np.array(proxy_x).T
if X2 is not None:
proxy_x2 = np.array(proxy_x2).T
else:
proxy_x2 = None
return proxy_x, proxy_x2
@Cache_this(limit=3, ignore_args=())
def _scaled_dist(self, X, X2=None):
#Not debug
#X = np.expand_dims(np.arange(-2, 1, 0.1),axis=1)
#X = np.hstack((X,-1*np.ones((X.shape[0],1))))
#X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
#X = np.vstack((X,np.array([[0., 0., 0.], [2., 1., 1.]])))
#X = np.array([[-2., -1., -1.]])
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
proxy_x, proxy_x2 = self._get_proxy(X, X2)
if self.ARD:
if proxy_x2 is not None:
proxy_x2 = proxy_x2 / self.lengthscale
return self._unscaled_dist(proxy_x / self.lengthscale, proxy_x2)
else:
return self._unscaled_dist(proxy_x, proxy_x2) / self.lengthscale
def deb(self, X, X2):
X = np.expand_dims(np.arange(-2, 1, 0.1), axis=1)
X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
X = np.vstack((X, np.array([[0., 0., 0.], [2., 1., 1.]])))
proxy_x, proxy_x2 = self._get_proxy(X, X2)
usc = self._unscaled_dist(proxy_x, proxy_x2)
ssc = self._unscaled_dist(proxy_x, proxy_x2) / self.lengthscale
ssc2 = ssc ** 2
essc2 = np.exp(ssc2)
v = self.variance
k = self.K(X, X2)
return proxy_x, proxy_x2, usc, ssc, ssc2, essc2, v, k
def gradients_X(self, dL_dK, X, X2=None):
super(TauRBF, self).gradients_X(dL_dK, X, X2)
def Kdiag(self, X):
proxy_x, _ = self._get_proxy(X, None)
ret = np.empty(proxy_x.shape[0])
ret[:] = self.variance
return ret
def update_gradients_full(self, dL_dK, X, X2=None, reset=True):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
assert (not self.ARD)
self.variance.gradient = np.sum(self.K(X, X2) * dL_dK) / self.variance
# now the lengthscale gradient(s)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
if self.ARD:
tmp = dL_dr * self._inv_dist(X, X2)
if X2 is None: X2 = X
if use_stationary_cython:
self.lengthscale.gradient = self._lengthscale_grads_cython(tmp, X, X2)
else:
self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2)
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr * r) / self.lengthscale
if self.use_invLengthscale: self.inv_l.gradient = self.lengthscale.gradient * (self.lengthscale ** 3 / -2.)
"""
G = Gridworld(5,0,0.9)
trajectories, start_states = G.generate_trajectory(random_start=True)
artificial_trajectories = G.artificial_trajectories(trajectories,start_states)
X = np.random.uniform(-3.,3.,(20,1))
Y = np.sin(X) + np.random.randn(20,1)*0.05
#kernel = TauRBF(3,G.features,trajectories,artificial_trajectories,G.discount)
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
print("Before:")
print(kernel.lengthscale.values)
m = GPy.models.GPRegression(X,Y,kernel)
m.optimize(messages=True)
print("After:")
print(kernel.lengthscale.values)
print("ssss")
"""
"""
X = np.random.uniform(-3.,3.,(20,3))
Y = np.sin(X) + np.random.randn(20,1)*0.05
kernel = TauRBF(input_dim=3,features=G.features,demonstrations=trajectories,art_demonstrations=artificial_trajectories,discount=G.discount)
#kernel = GPy.kern.RBF(input_dim=3, variance=1., lengthscale=1.)
m = GPy.models.GPRegression(X,Y,kernel)
m.optimize(messages=True)
""" | kernel/rhorbfborlange.py | import GPy
import numpy as np
from GPy.kern.src.rbf import RBF
from paramz.caching import Cache_this
from scipy.special import logsumexp
import timeit
import os
class TauRBF(RBF):
def __init__(self, input_dim, features, demonstrations, art_demonstrations, discount, lengthscale=None):
# print("Using custom kernel")
self.input_dim = input_dim
self.features = features
self.demos = demonstrations
self.art_demos = art_demonstrations
self.discount = discount
self.temperature = 1
super(TauRBF, self).__init__(input_dim, lengthscale=lengthscale)
def get_reward(self, X):
X = np.append(X, -20. * np.zeros((len(X), 1)), axis=1)
R = np.dot(X, np.transpose(self.features))
return R
def _get_proxy(self, X, X2=None):
n_art_trajectories, n_trajectories, l_trajectory, _ = np.shape(self.art_demos)
discounts = [self.discount ** t for t in range(l_trajectory)]
discounts_x = np.repeat(np.expand_dims(discounts, axis=0), len(X), 0)
discounts_art_x = np.repeat(
np.expand_dims(np.repeat(np.expand_dims(discounts, axis=0), n_art_trajectories, 0), axis=0), len(X), 0)
Rx = self.get_reward(X)
if X2 is not None:
discounts_x2 = np.repeat(np.expand_dims(discounts, axis=0), len(X2), 0)
discounts_art_x2 = np.repeat(
np.expand_dims(np.repeat(np.expand_dims(discounts, axis=0), n_art_trajectories, 0), axis=0), len(X2), 0)
Rx2 = self.get_reward(X2)
proxy_x = []
proxy_x2 = []
demo_states = self.demos[:, :, 0].astype(int)
art_demo_states = self.art_demos[:, :, :, 0].astype(int)
for t in range(n_trajectories):
Rtau_x = Rx[:, demo_states[t, :]]
Rarttau_x = Rx[:, art_demo_states[:, t, :]]
discounted_Rtau_x = np.multiply(Rtau_x, discounts_x)
discounted_Rtau_x = self.temperature*np.expand_dims(np.sum(discounted_Rtau_x, axis=1), axis=1)
discounted_Rarttau_x = np.multiply(Rarttau_x, discounts_art_x)
discounted_Rarttau_x = np.sum(discounted_Rarttau_x, axis=2)
denominator = self.temperature*np.append(discounted_Rtau_x, discounted_Rarttau_x, axis=1)
#deb_dir = "deb_projections"
#os.makedirs(deb_dir,exist_ok=True)
#np.savetxt(os.path.join(deb_dir,"disc_rart_%d.txt"%t), denominator, fmt="%3.5f")
tosub = 0#np.max(denominator, axis=1, keepdims=True)
den = logsumexp(denominator - tosub, axis=1)
current_proxy_x = np.squeeze(discounted_Rtau_x - tosub) - den
####UNCOMMENT THIS IF YOU WANNA USE THE EXP OF REWARDS#########
current_proxy_x = np.exp(current_proxy_x)
#temp_denominator1 = np.exp(denominator)
#temp_denominator2 = np.sum(temp_denominator1, axis=1)
#temp_numerator1 = np.exp(discounted_Rtau_x)
#temp_proxy = np.divide(temp_numerator1.squeeze(), temp_denominator2)
# current_proxy_x = np.exp(
# np.sum(np.multiply(Rtau_x, discounts_x) - np.multiply(Rtau_x, discounts_x), axis=1))
proxy_x.append(current_proxy_x)
if X2 is not None:
Rtau_x2 = Rx2[:, demo_states[t, :]]
Rarttau_x2 = Rx2[:, art_demo_states[:, t, :]]
discounted_Rtau_x2 = np.multiply(Rtau_x2, discounts_x2)
discounted_Rtau_x2 = np.expand_dims(np.sum(discounted_Rtau_x2, axis=1), axis=1)
discounted_Rarttau_x2 = np.multiply(Rarttau_x2, discounts_art_x2)
discounted_Rarttau_x2 = np.sum(discounted_Rarttau_x2, axis=2)
denominator2 = np.append(discounted_Rtau_x2, discounted_Rarttau_x2, axis=1)
tosub2 = 0#np.max(denominator2, axis=1, keepdims=True)
den2 = logsumexp(denominator2 - tosub2, axis=1)
current_proxy_x2 = np.squeeze(discounted_Rtau_x2 - tosub2) - den2
current_proxy_x2 = np.exp(current_proxy_x2)
# current_proxy_x2 = np.exp(
# np.sum(np.multiply(Rtau_x2, discounts_x2) - np.multiply(Rsumtau_x2, discounts_x2), axis=1))
proxy_x2.append(current_proxy_x2)
proxy_x = np.array(proxy_x).T
if X2 is not None:
proxy_x2 = np.array(proxy_x2).T
else:
proxy_x2 = None
return proxy_x, proxy_x2
@Cache_this(limit=3, ignore_args=())
def _scaled_dist(self, X, X2=None):
#Not debug
#X = np.expand_dims(np.arange(-2, 1, 0.1),axis=1)
#X = np.hstack((X,-1*np.ones((X.shape[0],1))))
#X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
#X = np.vstack((X,np.array([[0., 0., 0.], [2., 1., 1.]])))
#X = np.array([[-2., -1., -1.]])
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
proxy_x, proxy_x2 = self._get_proxy(X, X2)
if self.ARD:
if proxy_x2 is not None:
proxy_x2 = proxy_x2 / self.lengthscale
return self._unscaled_dist(proxy_x / self.lengthscale, proxy_x2)
else:
return self._unscaled_dist(proxy_x, proxy_x2) / self.lengthscale
def deb(self, X, X2):
X = np.expand_dims(np.arange(-2, 1, 0.1), axis=1)
X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
X = np.hstack((X, -1 * np.ones((X.shape[0], 1))))
X = np.vstack((X, np.array([[0., 0., 0.], [2., 1., 1.]])))
proxy_x, proxy_x2 = self._get_proxy(X, X2)
usc = self._unscaled_dist(proxy_x, proxy_x2)
ssc = self._unscaled_dist(proxy_x, proxy_x2) / self.lengthscale
ssc2 = ssc ** 2
essc2 = np.exp(ssc2)
v = self.variance
k = self.K(X, X2)
return proxy_x, proxy_x2, usc, ssc, ssc2, essc2, v, k
def gradients_X(self, dL_dK, X, X2=None):
super(TauRBF, self).gradients_X(dL_dK, X, X2)
def Kdiag(self, X):
proxy_x, _ = self._get_proxy(X, None)
ret = np.empty(proxy_x.shape[0])
ret[:] = self.variance
return ret
def update_gradients_full(self, dL_dK, X, X2=None, reset=True):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
assert (not self.ARD)
self.variance.gradient = np.sum(self.K(X, X2) * dL_dK) / self.variance
# now the lengthscale gradient(s)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
if self.ARD:
tmp = dL_dr * self._inv_dist(X, X2)
if X2 is None: X2 = X
if use_stationary_cython:
self.lengthscale.gradient = self._lengthscale_grads_cython(tmp, X, X2)
else:
self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2)
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr * r) / self.lengthscale
if self.use_invLengthscale: self.inv_l.gradient = self.lengthscale.gradient * (self.lengthscale ** 3 / -2.)
"""
G = Gridworld(5,0,0.9)
trajectories, start_states = G.generate_trajectory(random_start=True)
artificial_trajectories = G.artificial_trajectories(trajectories,start_states)
X = np.random.uniform(-3.,3.,(20,1))
Y = np.sin(X) + np.random.randn(20,1)*0.05
#kernel = TauRBF(3,G.features,trajectories,artificial_trajectories,G.discount)
kernel = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
print("Before:")
print(kernel.lengthscale.values)
m = GPy.models.GPRegression(X,Y,kernel)
m.optimize(messages=True)
print("After:")
print(kernel.lengthscale.values)
print("ssss")
"""
"""
X = np.random.uniform(-3.,3.,(20,3))
Y = np.sin(X) + np.random.randn(20,1)*0.05
kernel = TauRBF(input_dim=3,features=G.features,demonstrations=trajectories,art_demonstrations=artificial_trajectories,discount=G.discount)
#kernel = GPy.kern.RBF(input_dim=3, variance=1., lengthscale=1.)
m = GPy.models.GPRegression(X,Y,kernel)
m.optimize(messages=True)
""" | 0.343892 | 0.33181 |
from odoo import models,fields,api,_
class VoucherHistory(models.Model):
_inherit = "voucher.history"
marketplace_seller_id = fields.Many2one("res.partner", string="Seller")
@api.model
def create(self, vals):
res = super(VoucherHistory, self).create(vals)
res.marketplace_seller_id = res.voucher_id.marketplace_seller_id.id
return res
class VoucherVoucher(models.Model):
_inherit = "voucher.voucher"
@api.model
def _set_seller_id(self):
user_obj = self.env['res.users'].sudo().browse(self._uid)
if user_obj.partner_id and user_obj.partner_id.seller:
return user_obj.partner_id.id
return self.env['res.partner']
marketplace_seller_id = fields.Many2one("res.partner", string="Seller", default=_set_seller_id, copy=False)
product_ids = fields.Many2many('product.template', 'voucher_id', 'product_id', 'voucher_product_rel',
string='Products',
help="Add products on which this voucher will be valid",
domain = lambda self: [('marketplace_seller_id','in',self.env['voucher.voucher'].compute_login_userid()),('status','=','approved')] if self._context.get('mp_gift_voucher') else [],
)
def compute_login_userid(self):
login_ids = []
seller_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_seller_group')[1]
officer_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_officer_group')[1]
groups_ids = self.env.user.sudo().groups_id.ids
if seller_group in groups_ids and officer_group not in groups_ids:
login_ids.append(self.env.user.sudo().partner_id.id)
return login_ids
elif seller_group in groups_ids and officer_group in groups_ids:
obj = self.env['res.partner'].search([('seller','=',True)])
for rec in obj:
login_ids.append(rec.id)
return login_ids | addons/marketplace_voucher/models/mp_voucher_voucher.py |
from odoo import models,fields,api,_
class VoucherHistory(models.Model):
_inherit = "voucher.history"
marketplace_seller_id = fields.Many2one("res.partner", string="Seller")
@api.model
def create(self, vals):
res = super(VoucherHistory, self).create(vals)
res.marketplace_seller_id = res.voucher_id.marketplace_seller_id.id
return res
class VoucherVoucher(models.Model):
_inherit = "voucher.voucher"
@api.model
def _set_seller_id(self):
user_obj = self.env['res.users'].sudo().browse(self._uid)
if user_obj.partner_id and user_obj.partner_id.seller:
return user_obj.partner_id.id
return self.env['res.partner']
marketplace_seller_id = fields.Many2one("res.partner", string="Seller", default=_set_seller_id, copy=False)
product_ids = fields.Many2many('product.template', 'voucher_id', 'product_id', 'voucher_product_rel',
string='Products',
help="Add products on which this voucher will be valid",
domain = lambda self: [('marketplace_seller_id','in',self.env['voucher.voucher'].compute_login_userid()),('status','=','approved')] if self._context.get('mp_gift_voucher') else [],
)
def compute_login_userid(self):
login_ids = []
seller_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_seller_group')[1]
officer_group = self.env['ir.model.data'].get_object_reference(
'odoo_marketplace', 'marketplace_officer_group')[1]
groups_ids = self.env.user.sudo().groups_id.ids
if seller_group in groups_ids and officer_group not in groups_ids:
login_ids.append(self.env.user.sudo().partner_id.id)
return login_ids
elif seller_group in groups_ids and officer_group in groups_ids:
obj = self.env['res.partner'].search([('seller','=',True)])
for rec in obj:
login_ids.append(rec.id)
return login_ids | 0.30549 | 0.100216 |
import datetime
import re
from collections import OrderedDict
from importlib import import_module
import iso8601
_IMPLEMENTATIONS = {}
class Implementation:
module_name = None
def __init__(self):
self._module = None
if self.module_name:
try:
self._module = import_module(self.module_name)
except ImportError:
pass
def is_available(self):
return self._module is not None
def serialize(self, value, pretty=False):
raise NotImplementedError()
def deserialize(self, value, native_datetimes=True):
raise NotImplementedError()
class ImplementationRegistry:
def __init__(self):
self.implementations = OrderedDict()
@property
def registered_packages(self):
return tuple(self.implementations.keys())
@property
def available_packages(self):
return tuple(
k
for k, v in self.implementations.items()
if v.is_available()
)
def register(self, package, clazz):
self.implementations[package] = clazz()
def get(self, package=None):
if package:
impl = self.implementations.get(package)
if not impl:
raise ValueError(
'"%s" is not a supported package' % (package,)
)
if not impl.is_available():
raise ValueError(
'The "%s" package is not currently available' % (package,)
)
return impl
for impl in self.implementations.values():
if impl.is_available():
return impl
raise NotImplementedError(
'No supported package available'
)
RE_DATE = re.compile(
r'^\d{4}-\d{2}-\d{2}$',
)
RE_TIME = re.compile(
r'^\d{2}:\d{2}:\d{2}(?P<fs>\.\d+)?$',
)
RE_DATETIME = re.compile(
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[-+](\d{2}:\d{2}))?$',
)
def get_date_or_string(value): # noqa: complex
if RE_DATETIME.match(value):
try:
return iso8601.parse_date(value, default_timezone=None)
except iso8601.ParseError:
pass
if RE_DATE.match(value):
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
pass
match = RE_TIME.match(value)
if match:
if match.groupdict()['fs']:
fmt = '%H:%M:%S.%f'
else:
fmt = '%H:%M:%S'
try:
return datetime.datetime.strptime(value, fmt).time()
except ValueError:
pass
return value
def convert_datetimes(value):
if isinstance(value, list):
pairs = enumerate(value)
elif isinstance(value, dict):
pairs = value.items()
results = []
for key, val in pairs:
if isinstance(val, str):
val = get_date_or_string(val)
elif isinstance(val, (dict, list)):
val = convert_datetimes(val)
results.append((key, val))
if isinstance(value, list):
return [result[1] for result in results]
return dict(results) | src/basicserial/util.py |
import datetime
import re
from collections import OrderedDict
from importlib import import_module
import iso8601
_IMPLEMENTATIONS = {}
class Implementation:
module_name = None
def __init__(self):
self._module = None
if self.module_name:
try:
self._module = import_module(self.module_name)
except ImportError:
pass
def is_available(self):
return self._module is not None
def serialize(self, value, pretty=False):
raise NotImplementedError()
def deserialize(self, value, native_datetimes=True):
raise NotImplementedError()
class ImplementationRegistry:
def __init__(self):
self.implementations = OrderedDict()
@property
def registered_packages(self):
return tuple(self.implementations.keys())
@property
def available_packages(self):
return tuple(
k
for k, v in self.implementations.items()
if v.is_available()
)
def register(self, package, clazz):
self.implementations[package] = clazz()
def get(self, package=None):
if package:
impl = self.implementations.get(package)
if not impl:
raise ValueError(
'"%s" is not a supported package' % (package,)
)
if not impl.is_available():
raise ValueError(
'The "%s" package is not currently available' % (package,)
)
return impl
for impl in self.implementations.values():
if impl.is_available():
return impl
raise NotImplementedError(
'No supported package available'
)
RE_DATE = re.compile(
r'^\d{4}-\d{2}-\d{2}$',
)
RE_TIME = re.compile(
r'^\d{2}:\d{2}:\d{2}(?P<fs>\.\d+)?$',
)
RE_DATETIME = re.compile(
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[-+](\d{2}:\d{2}))?$',
)
def get_date_or_string(value): # noqa: complex
if RE_DATETIME.match(value):
try:
return iso8601.parse_date(value, default_timezone=None)
except iso8601.ParseError:
pass
if RE_DATE.match(value):
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
pass
match = RE_TIME.match(value)
if match:
if match.groupdict()['fs']:
fmt = '%H:%M:%S.%f'
else:
fmt = '%H:%M:%S'
try:
return datetime.datetime.strptime(value, fmt).time()
except ValueError:
pass
return value
def convert_datetimes(value):
if isinstance(value, list):
pairs = enumerate(value)
elif isinstance(value, dict):
pairs = value.items()
results = []
for key, val in pairs:
if isinstance(val, str):
val = get_date_or_string(val)
elif isinstance(val, (dict, list)):
val = convert_datetimes(val)
results.append((key, val))
if isinstance(value, list):
return [result[1] for result in results]
return dict(results) | 0.527317 | 0.16896 |
from __future__ import absolute_import
import uuid
from .core import _validate_bool
from .number import _validate_int
from .common import make_optional_argument_default
_undefined = make_optional_argument_default()
def _variant_to_string(variant):
return {
uuid.RESERVED_NCS: "RESERVED_NCS",
uuid.RFC_4122: "RFC_4122",
uuid.RESERVED_MICROSOFT: "RESERVED_MICROSOFT",
uuid.RESERVED_FUTURE: "RESERVED_FUTURE",
}.get(variant, "unknown")
def _validate_uuid(
value,
variant,
version,
required
):
if value is None:
if required:
raise TypeError("required value is None")
return
if not isinstance(value, uuid.UUID):
raise TypeError((
"expected uuid, but value is of type {cls!r}"
).format(cls=value.__class__.__name__))
if variant is not None and value.variant != variant:
raise ValueError((
"expected {expected} variant, but uuid variant is {actual}"
).format(
expected=_variant_to_string(variant),
actual=_variant_to_string(value.variant)
))
if version is not None and value.version != version:
raise ValueError((
"expected UUID{expected}, but received UUID{actual}"
).format(expected=version, actual=version))
class _uuid_validator(object):
def __init__(
self,
variant,
version,
required
):
if variant is not None and variant not in (
uuid.RESERVED_NCS,
uuid.RFC_4122,
uuid.RESERVED_MICROSOFT,
uuid.RESERVED_FUTURE,
):
raise ValueError("unknown variant")
self.__variant = variant
_validate_int(version, required=False)
if version is not None:
if version not in (1, 3, 4, 5):
raise ValueError(
"unknown UUID version: {version}".format(version=version)
)
if variant is None:
variant = uuid.RFC_4122
if variant != uuid.RFC_4122:
raise ValueError((
"version is specified, but variant is {variant}"
).format(variant=_variant_to_string(variant)))
self.__version = version
_validate_bool(required)
self.__required = required
def __call__(self, value):
_validate_uuid(
value,
variant=self.__variant,
version=self.__version,
required=self.__required
)
def __repr__(self):
args = []
if self.__variant is not None:
args.append('variant=uuid.{variant}'.format(
variant=_variant_to_string(self.__variant),
))
if self.__version is not None:
args.append('version={version!r}'.format(
version=self.__version,
))
if not self.__required:
args.append('required={required!r}'.format(
required=self.__required,
))
return 'validate_uuid({args})'.format(args=', '.join(args))
def validate_uuid(
value=_undefined,
variant=None,
version=None,
required=True,
):
"""
Checks that the target value is a valid UUID.
Parameters can be used to narrow down exactly what sort of UUID is
expected.
.. code:: python
def do_the_thing(identifier):
validate_uuid(
identifier,
variant=uuid.RFC_4122,
version=3,
)
# Do something
...
:param unicode value:
The uuid to be validated.
:param int variant:
The UUID variant determines the internal layout of the UUID. This must
be one of `RESERVED_NCS`, `RFC_4122`, `RESERVED_MICROSOFT`, or
`RESERVED_FUTURE` from the `uuid` module.
:param int version:
Can be 1, 3, 4, or 5.
:param bool required:
Whether the value can be `None`. Defaults to `True`.
:raises TypeError:
If the value is not a unicode string , or if it was marked as
`required` but `None` was passed in.
:raises ValueError:
If the value was longer or shorter than expected, or did not match
the pattern.
"""
validate = _uuid_validator(
variant=variant,
version=version,
required=required,
)
if value is not _undefined:
validate(value)
else:
return validate | venv/lib/python3.8/site-packages/validation/uuid.py | from __future__ import absolute_import
import uuid
from .core import _validate_bool
from .number import _validate_int
from .common import make_optional_argument_default
_undefined = make_optional_argument_default()
def _variant_to_string(variant):
return {
uuid.RESERVED_NCS: "RESERVED_NCS",
uuid.RFC_4122: "RFC_4122",
uuid.RESERVED_MICROSOFT: "RESERVED_MICROSOFT",
uuid.RESERVED_FUTURE: "RESERVED_FUTURE",
}.get(variant, "unknown")
def _validate_uuid(
value,
variant,
version,
required
):
if value is None:
if required:
raise TypeError("required value is None")
return
if not isinstance(value, uuid.UUID):
raise TypeError((
"expected uuid, but value is of type {cls!r}"
).format(cls=value.__class__.__name__))
if variant is not None and value.variant != variant:
raise ValueError((
"expected {expected} variant, but uuid variant is {actual}"
).format(
expected=_variant_to_string(variant),
actual=_variant_to_string(value.variant)
))
if version is not None and value.version != version:
raise ValueError((
"expected UUID{expected}, but received UUID{actual}"
).format(expected=version, actual=version))
class _uuid_validator(object):
def __init__(
self,
variant,
version,
required
):
if variant is not None and variant not in (
uuid.RESERVED_NCS,
uuid.RFC_4122,
uuid.RESERVED_MICROSOFT,
uuid.RESERVED_FUTURE,
):
raise ValueError("unknown variant")
self.__variant = variant
_validate_int(version, required=False)
if version is not None:
if version not in (1, 3, 4, 5):
raise ValueError(
"unknown UUID version: {version}".format(version=version)
)
if variant is None:
variant = uuid.RFC_4122
if variant != uuid.RFC_4122:
raise ValueError((
"version is specified, but variant is {variant}"
).format(variant=_variant_to_string(variant)))
self.__version = version
_validate_bool(required)
self.__required = required
def __call__(self, value):
_validate_uuid(
value,
variant=self.__variant,
version=self.__version,
required=self.__required
)
def __repr__(self):
args = []
if self.__variant is not None:
args.append('variant=uuid.{variant}'.format(
variant=_variant_to_string(self.__variant),
))
if self.__version is not None:
args.append('version={version!r}'.format(
version=self.__version,
))
if not self.__required:
args.append('required={required!r}'.format(
required=self.__required,
))
return 'validate_uuid({args})'.format(args=', '.join(args))
def validate_uuid(
value=_undefined,
variant=None,
version=None,
required=True,
):
"""
Checks that the target value is a valid UUID.
Parameters can be used to narrow down exactly what sort of UUID is
expected.
.. code:: python
def do_the_thing(identifier):
validate_uuid(
identifier,
variant=uuid.RFC_4122,
version=3,
)
# Do something
...
:param unicode value:
The uuid to be validated.
:param int variant:
The UUID variant determines the internal layout of the UUID. This must
be one of `RESERVED_NCS`, `RFC_4122`, `RESERVED_MICROSOFT`, or
`RESERVED_FUTURE` from the `uuid` module.
:param int version:
Can be 1, 3, 4, or 5.
:param bool required:
Whether the value can be `None`. Defaults to `True`.
:raises TypeError:
If the value is not a unicode string , or if it was marked as
`required` but `None` was passed in.
:raises ValueError:
If the value was longer or shorter than expected, or did not match
the pattern.
"""
validate = _uuid_validator(
variant=variant,
version=version,
required=required,
)
if value is not _undefined:
validate(value)
else:
return validate | 0.864496 | 0.134548 |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from skimage.io import imread
import keras
from keras import Sequential
from tensorflow.keras.applications.mobilenet import MobileNet
from keras.layers import Dense
from keras.preprocessing import image
import tensorflow as tf
import tensorflow.keras.layers as layers
import warnings
from random import shuffle
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import cv2
from sklearn.metrics import classification_report
def set_seed(seed=42):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
set_seed()
brain_df = pd.read_csv('../Dataset/BrainTumor.csv', usecols=[0,1])
brain_df.head()
brain_df.isnull().sum()
brain_df['Class'].value_counts()
sns.countplot(brain_df['Class'])
path_list = []
base_path = '..\Images'
for entry in os.listdir(base_path):
path_list.append(os.path.join(base_path, entry))
paths_dict = {os.path.splitext(os.path.basename(x))[0]: x for x in path_list}
brain_df['Path'] = brain_df['Image'].map(paths_dict.get)
brain_df.head()
for x in range(0,9):
plt.subplot(3,3,x+1)
plt.xticks([])
plt.yticks([])
img = imread(brain_df['Path'][x])
plt.imshow(img)
plt.xlabel(brain_df['Class'][x])
brain_df['split'] = np.random.randn(brain_df.shape[0], 1)
msk = np.random.rand(len(brain_df)) <= 0.8
train_df = brain_df[msk]
test_df = brain_df[~msk]
train_df.to_csv('brain_tumor_train.csv', index=False)
test_df.to_csv('brain_tumor_test.csv', index=False)
train_list = train_df.values.tolist()
test_list = test_df.values.tolist()
def generator(samples, batch_size=32,shuffle_data=False):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
X_train = []
y_train = []
for batch_sample in batch_samples:
label = batch_sample[1]
img_path = batch_sample[2]
img = cv2.imread(img_path)
# img = cv2.resize(img,(224,224))
img = img.astype(np.float32)
X_train.append(keras.applications.nasnet.preprocess_input(img))
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
yield X_train, y_train
train_generator = generator(train_list)
test_generator = generator(test_list)
model = Sequential([
MobileNet(input_shape=(240, 240, 3),include_top=False, weights='imagenet'),
layers.GlobalAveragePooling2D(),
layers.Dropout(0.2),
layers.Dense(units=1, activation='sigmoid',name='preds'),
])
model.layers[0].trainable= False
model.summary()
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(epsilon=0.01),
metrics=['binary_accuracy']
)
batch_size = 32
train_size = len(train_list)
test_size = len(test_list)
steps_per_epoch = train_size//batch_size
validation_steps = test_size//batch_size
early_stopping = keras.callbacks.EarlyStopping(
patience=10,
min_delta=0.001,
restore_best_weights=True,
)
# history = model.fit_generator(
# train_generator,
# steps_per_epoch = steps_per_epoch,
# epochs=10,
# validation_data=test_generator,
# validation_steps = validation_steps,
# verbose=1,
# callbacks = [early_stopping]
# )
# model.save("model_brain_adam.h5")
# print("Saved model to disk")
# history_df = pd.DataFrame(history.history)
# history_df.loc[5:, ['loss', 'val_loss']].plot()
# history_df.loc[5:, ['binary_accuracy', 'val_binary_accuracy']].plot()
# print(("Best Validation Loss: {:0.4f}" +\
# "\nBest Validation Accuracy: {:0.4f}")\
# .format(history_df['val_loss'].min(),
# history_df['val_binary_accuracy'].max()))
pretrained_cnn = keras.models.load_model('model_brain_adam.h5')
# eval_score = pretrained_cnn.evaluate(test_generator, steps = validation_steps)
# print('Eval loss:',eval_score[0])
# print('Eval accuracy:',eval_score[1])
y_pred = np.rint(pretrained_cnn.predict(test_generator, steps = validation_steps)).astype(int)
y_test = [i[1] for i in test_list[0:-2]]
target_classes = ['No Tumor','Tumor']
print(classification_report(y_test , y_pred))
ConfusionMatrixDisplay.from_predictions(y_test, y_pred)
# print(classification_report(y_test, y_pred))
# cm = confusion_matrix(y_test, y_pred)
# f = sns.heatmap(cm, annot=True, fmt='d')
plt.show() | Classifiers/CNN.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from skimage.io import imread
import keras
from keras import Sequential
from tensorflow.keras.applications.mobilenet import MobileNet
from keras.layers import Dense
from keras.preprocessing import image
import tensorflow as tf
import tensorflow.keras.layers as layers
import warnings
from random import shuffle
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import cv2
from sklearn.metrics import classification_report
def set_seed(seed=42):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
set_seed()
brain_df = pd.read_csv('../Dataset/BrainTumor.csv', usecols=[0,1])
brain_df.head()
brain_df.isnull().sum()
brain_df['Class'].value_counts()
sns.countplot(brain_df['Class'])
path_list = []
base_path = '..\Images'
for entry in os.listdir(base_path):
path_list.append(os.path.join(base_path, entry))
paths_dict = {os.path.splitext(os.path.basename(x))[0]: x for x in path_list}
brain_df['Path'] = brain_df['Image'].map(paths_dict.get)
brain_df.head()
for x in range(0,9):
plt.subplot(3,3,x+1)
plt.xticks([])
plt.yticks([])
img = imread(brain_df['Path'][x])
plt.imshow(img)
plt.xlabel(brain_df['Class'][x])
brain_df['split'] = np.random.randn(brain_df.shape[0], 1)
msk = np.random.rand(len(brain_df)) <= 0.8
train_df = brain_df[msk]
test_df = brain_df[~msk]
train_df.to_csv('brain_tumor_train.csv', index=False)
test_df.to_csv('brain_tumor_test.csv', index=False)
train_list = train_df.values.tolist()
test_list = test_df.values.tolist()
def generator(samples, batch_size=32,shuffle_data=False):
"""
Yields the next training batch.
Suppose `samples` is an array [[image1_filename,label1], [image2_filename,label2],...].
"""
num_samples = len(samples)
while True:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
X_train = []
y_train = []
for batch_sample in batch_samples:
label = batch_sample[1]
img_path = batch_sample[2]
img = cv2.imread(img_path)
# img = cv2.resize(img,(224,224))
img = img.astype(np.float32)
X_train.append(keras.applications.nasnet.preprocess_input(img))
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
yield X_train, y_train
train_generator = generator(train_list)
test_generator = generator(test_list)
model = Sequential([
MobileNet(input_shape=(240, 240, 3),include_top=False, weights='imagenet'),
layers.GlobalAveragePooling2D(),
layers.Dropout(0.2),
layers.Dense(units=1, activation='sigmoid',name='preds'),
])
model.layers[0].trainable= False
model.summary()
model.compile(
loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(epsilon=0.01),
metrics=['binary_accuracy']
)
batch_size = 32
train_size = len(train_list)
test_size = len(test_list)
steps_per_epoch = train_size//batch_size
validation_steps = test_size//batch_size
early_stopping = keras.callbacks.EarlyStopping(
patience=10,
min_delta=0.001,
restore_best_weights=True,
)
# history = model.fit_generator(
# train_generator,
# steps_per_epoch = steps_per_epoch,
# epochs=10,
# validation_data=test_generator,
# validation_steps = validation_steps,
# verbose=1,
# callbacks = [early_stopping]
# )
# model.save("model_brain_adam.h5")
# print("Saved model to disk")
# history_df = pd.DataFrame(history.history)
# history_df.loc[5:, ['loss', 'val_loss']].plot()
# history_df.loc[5:, ['binary_accuracy', 'val_binary_accuracy']].plot()
# print(("Best Validation Loss: {:0.4f}" +\
# "\nBest Validation Accuracy: {:0.4f}")\
# .format(history_df['val_loss'].min(),
# history_df['val_binary_accuracy'].max()))
pretrained_cnn = keras.models.load_model('model_brain_adam.h5')
# eval_score = pretrained_cnn.evaluate(test_generator, steps = validation_steps)
# print('Eval loss:',eval_score[0])
# print('Eval accuracy:',eval_score[1])
y_pred = np.rint(pretrained_cnn.predict(test_generator, steps = validation_steps)).astype(int)
y_test = [i[1] for i in test_list[0:-2]]
target_classes = ['No Tumor','Tumor']
print(classification_report(y_test , y_pred))
ConfusionMatrixDisplay.from_predictions(y_test, y_pred)
# print(classification_report(y_test, y_pred))
# cm = confusion_matrix(y_test, y_pred)
# f = sns.heatmap(cm, annot=True, fmt='d')
plt.show() | 0.381565 | 0.302146 |
import discord
import datetime
import warnings
from discord.ext import commands, tasks
from discord import app_commands
import aiohttp
import aiohttp
class example(commands.Cog):
def __init__(self, bot: commands.Bot,) -> None:
self.bot = bot
@app_commands.command(name="clean", description="clear messages")
@app_commands.checks.has_permissions(manage_messages = True)
@app_commands.checks.cooldown(1,5, key=lambda j:(j.guild_id,j.user.id))
async def clearcommand(self, interaction: discord.Interaction,amount : int = 5):
await interaction.channel.purge(limit=amount)
await interaction.response.defer( ephemeral= True)
await interaction.followup.send(f"{amount} message deleted from channel succesfully",)
@clearcommand.error
async def clearcommand_error(self, interaction : discord.Interaction, error: app_commands.AppCommandError):
if isinstance(error ,app_commands.MissingPermissions):
await interaction.response.send_message("your mom, go and get some permissions first", ephemeral= True)
elif isinstance(error ,app_commands.CommandOnCooldown):
await interaction.response.send_message(f"{error}", ephemeral= True)
else:
await interaction.response.send_message(f"something wen wrong do $help for help $bugs for reporting a bug", ephemeral=True)
@commands.command()
@commands.has_permissions(kick_members=True)
async def clear(self,ctx, amount=5):
await ctx.channel.purge(limit = amount)
@commands.command(description="kick a random user")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.kick(member)
await ctx.channel.send(f'User {member.mention} has been ejected for {reason}')
@commands.command(description="ban a random user")
@commands.has_permissions(ban_members=True)
async def ban(self,ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.ban(member)
await ctx.channel.send(f'User {member.mention} has been banned for {reason}')
@commands.command(description="unban a random user")
@commands.has_permissions(administrator=True)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.channel.send(f'Unbanned {user.mention}')
return
@commands.command(aliases = ["gl"])
@commands.is_owner()
async def guild_leave(self, ctx ,guildid):
guild = await self.bot.fetch_guild(int(guildid))
await guild.leave()
embed = discord.Embed(title=f"i left the guild {guild.name} ")
await ctx.send(embed = embed)
#==========================================================================
'''@commands.command(description="ban a random user")
@commands.has_permissions(ban_members=True)
async def timeout(self, ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.time(member)
await ctx.channel.send(f'User {member.mention} has been banned for {reason}')'''
'''@commands.command(description="kick any random user")
@commands.has_permissions(kick_members=True)
async def mute(self,ctx, member: discord.Member):
if ctx.guild.id == 946345220625277030:
muted_role = 957305814996107294
await ctx.member.add_roles(muted_role)
await ctx.channel.send(member.mention + "has been muted")'''
'''@commands.command(pass_context=True)
async def mute(self, ctx, member: discord.Member):
if ctx.message.author.server_permissions.administrator or ctx.message.author.id == '1<PASSWORD>':
role = discord.utils.get(member.server.roles, name='Muted')
await ctx.add_roles(member, role)
embed = discord.Embed(title="User Muted!",
description="**{0}** was muted by **{1}**!".format(member, ctx.message.author),
color=0xff00f6)
await ctx.say(embed=embed)
else:
embed = discord.Embed(title="Permission Denied.",
description="You don't have permission to use this command.", color=0xff00f6)
await ctx.say(embed=embed)'''
'''@commands.command(description="mutes the specified user.")
@commands.has_permissions(manage_messages=True)
async def mute(self, ctx, member: discord.member, *, reason=None):
guild = ctx.guild
muterole = discord.utils.get(guild.roles, name="Muted")
if not muterole:
muterole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permission(muterole, speak=False, send_messages=False, read_message_history=True, read_messages=False)
await member.add_roles(muterole, reason=reason)
await ctx.send(f"Muted {member.mention} for reason {reason}")
await member.send(f'you were muted server {guild.name} for {reason}')'''
# ==========================================================================
'''@commands.command(description="unban a random user")
@commands.has_permissions(administrator=True)
async def unban(self,ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.channel.send(f'Unbanned {user.mention}')
return'''
async def setup(bot: commands.Bot ) -> None:
await bot.add_cog(
example(bot)) | cogs/example.py | import discord
import datetime
import warnings
from discord.ext import commands, tasks
from discord import app_commands
import aiohttp
import aiohttp
class example(commands.Cog):
def __init__(self, bot: commands.Bot,) -> None:
self.bot = bot
@app_commands.command(name="clean", description="clear messages")
@app_commands.checks.has_permissions(manage_messages = True)
@app_commands.checks.cooldown(1,5, key=lambda j:(j.guild_id,j.user.id))
async def clearcommand(self, interaction: discord.Interaction,amount : int = 5):
await interaction.channel.purge(limit=amount)
await interaction.response.defer( ephemeral= True)
await interaction.followup.send(f"{amount} message deleted from channel succesfully",)
@clearcommand.error
async def clearcommand_error(self, interaction : discord.Interaction, error: app_commands.AppCommandError):
if isinstance(error ,app_commands.MissingPermissions):
await interaction.response.send_message("your mom, go and get some permissions first", ephemeral= True)
elif isinstance(error ,app_commands.CommandOnCooldown):
await interaction.response.send_message(f"{error}", ephemeral= True)
else:
await interaction.response.send_message(f"something wen wrong do $help for help $bugs for reporting a bug", ephemeral=True)
@commands.command()
@commands.has_permissions(kick_members=True)
async def clear(self,ctx, amount=5):
await ctx.channel.purge(limit = amount)
@commands.command(description="kick a random user")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.kick(member)
await ctx.channel.send(f'User {member.mention} has been ejected for {reason}')
@commands.command(description="ban a random user")
@commands.has_permissions(ban_members=True)
async def ban(self,ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.ban(member)
await ctx.channel.send(f'User {member.mention} has been banned for {reason}')
@commands.command(description="unban a random user")
@commands.has_permissions(administrator=True)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.channel.send(f'Unbanned {user.mention}')
return
@commands.command(aliases = ["gl"])
@commands.is_owner()
async def guild_leave(self, ctx ,guildid):
guild = await self.bot.fetch_guild(int(guildid))
await guild.leave()
embed = discord.Embed(title=f"i left the guild {guild.name} ")
await ctx.send(embed = embed)
#==========================================================================
'''@commands.command(description="ban a random user")
@commands.has_permissions(ban_members=True)
async def timeout(self, ctx, member: discord.Member, *, reason=None):
if reason == None:
reason = " no reason provided"
await ctx.guild.time(member)
await ctx.channel.send(f'User {member.mention} has been banned for {reason}')'''
'''@commands.command(description="kick any random user")
@commands.has_permissions(kick_members=True)
async def mute(self,ctx, member: discord.Member):
if ctx.guild.id == 946345220625277030:
muted_role = 957305814996107294
await ctx.member.add_roles(muted_role)
await ctx.channel.send(member.mention + "has been muted")'''
'''@commands.command(pass_context=True)
async def mute(self, ctx, member: discord.Member):
if ctx.message.author.server_permissions.administrator or ctx.message.author.id == '1<PASSWORD>':
role = discord.utils.get(member.server.roles, name='Muted')
await ctx.add_roles(member, role)
embed = discord.Embed(title="User Muted!",
description="**{0}** was muted by **{1}**!".format(member, ctx.message.author),
color=0xff00f6)
await ctx.say(embed=embed)
else:
embed = discord.Embed(title="Permission Denied.",
description="You don't have permission to use this command.", color=0xff00f6)
await ctx.say(embed=embed)'''
'''@commands.command(description="mutes the specified user.")
@commands.has_permissions(manage_messages=True)
async def mute(self, ctx, member: discord.member, *, reason=None):
guild = ctx.guild
muterole = discord.utils.get(guild.roles, name="Muted")
if not muterole:
muterole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permission(muterole, speak=False, send_messages=False, read_message_history=True, read_messages=False)
await member.add_roles(muterole, reason=reason)
await ctx.send(f"Muted {member.mention} for reason {reason}")
await member.send(f'you were muted server {guild.name} for {reason}')'''
# ==========================================================================
'''@commands.command(description="unban a random user")
@commands.has_permissions(administrator=True)
async def unban(self,ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.channel.send(f'Unbanned {user.mention}')
return'''
async def setup(bot: commands.Bot ) -> None:
await bot.add_cog(
example(bot)) | 0.298287 | 0.081703 |
from layeredGraphLayouter.containers.constants import PortType, PortSide
from layeredGraphLayouter.containers.lNode import LNode
from layeredGraphLayouter.crossing.abstractBarycenterPortDistributor import AbstractBarycenterPortDistributor
class NodeRelativePortDistributor(AbstractBarycenterPortDistributor):
def calculatePortRanks(self, node: LNode, rankSum: float, typ: PortType):
portRanks = self.portRanks
if typ == PortType.INPUT:
# Count the number of input ports, and additionally the north-side
# input ports
inputCount = 0
northInputCount = 0
for port in node.iterPorts():
if port.incomingEdges:
inputCount += 1
if port.side == PortSide.NORTH:
northInputCount += 1
# Assign port ranks in the order north - west - south - east
incr = 1.0 / (inputCount + 1)
northPos = rankSum + northInputCount * incr
restPos = rankSum + 1 - incr
for port in node.getPortsByType(PortType.INPUT):
if port.side == PortSide.NORTH:
portRanks[port] = northPos
northPos -= incr
else:
portRanks[port] = restPos
restPos -= incr
elif typ == PortType.OUTPUT:
# Count the number of output ports
outputCount = 0
for port in node.iterPorts():
if port.outgoingEdges:
outputCount += 1
# Iterate output ports in their natural order, that is north - east
# - south - west
incr = 1.0 / (outputCount + 1)
pos = rankSum + incr
for port in node.getPortsByType(PortType.OUTPUT):
portRanks[port] = pos
pos += incr
else:
# this means illegal input to the method
raise ValueError("Port type is undefined", typ)
# the consumed rank is always 1
return 1 | layeredGraphLayouter/crossing/nodeRelativePortDistributor.py | from layeredGraphLayouter.containers.constants import PortType, PortSide
from layeredGraphLayouter.containers.lNode import LNode
from layeredGraphLayouter.crossing.abstractBarycenterPortDistributor import AbstractBarycenterPortDistributor
class NodeRelativePortDistributor(AbstractBarycenterPortDistributor):
def calculatePortRanks(self, node: LNode, rankSum: float, typ: PortType):
portRanks = self.portRanks
if typ == PortType.INPUT:
# Count the number of input ports, and additionally the north-side
# input ports
inputCount = 0
northInputCount = 0
for port in node.iterPorts():
if port.incomingEdges:
inputCount += 1
if port.side == PortSide.NORTH:
northInputCount += 1
# Assign port ranks in the order north - west - south - east
incr = 1.0 / (inputCount + 1)
northPos = rankSum + northInputCount * incr
restPos = rankSum + 1 - incr
for port in node.getPortsByType(PortType.INPUT):
if port.side == PortSide.NORTH:
portRanks[port] = northPos
northPos -= incr
else:
portRanks[port] = restPos
restPos -= incr
elif typ == PortType.OUTPUT:
# Count the number of output ports
outputCount = 0
for port in node.iterPorts():
if port.outgoingEdges:
outputCount += 1
# Iterate output ports in their natural order, that is north - east
# - south - west
incr = 1.0 / (outputCount + 1)
pos = rankSum + incr
for port in node.getPortsByType(PortType.OUTPUT):
portRanks[port] = pos
pos += incr
else:
# this means illegal input to the method
raise ValueError("Port type is undefined", typ)
# the consumed rank is always 1
return 1 | 0.492432 | 0.340471 |
import matplotlib
matplotlib.use('Agg')
import numpy as np
from scipy.stats import pearsonr
import pylab as pl
import sys
import json
import yaml
sys.path.append("code/striatal_model")
import params
import colors
from plot_tools2 import *
import matplotlib.pyplot as plt
import pandas
import seaborn as sbn
import matplotlib.collections as collections
import scalebars
grid_dimensions = [6, 6]
num_trials = 5
print len(sys.argv), sys.argv
all_spikes_fn = sys.argv[1:1 + num_trials]
all_channels_fn = sys.argv[1 + 1 * num_trials:1 + 2 * num_trials]
experiment_fn = sys.argv[1 + 2 * num_trials]
hemisphere = sys.argv[2 + 2 * num_trials]
traces_out_fn = sys.argv[3 + 2 * num_trials]
corr_out_fn = sys.argv[4 + 2 * num_trials]
corr_data_out = sys.argv[5 + 2 * num_trials]
df = pandas.DataFrame({"type": [], "channel": [], "CC": [], "trial": []})
for trial in range(num_trials):
print "trial", trial
spikes_fn = all_spikes_fn[trial]
channels_fn = all_channels_fn[trial]
# spike data of the channels
data = np.loadtxt(spikes_fn)
senders = data[:, 0]
unique_senders = np.unique(senders) # all active senders
times = data[:, 1]
with open(channels_fn, "r+") as f:
channels = json.load(f)
channels = channels['channels']
with open(experiment_fn, "r+") as f:
cfg = yaml.load(f)
stim_times = get_stim_times(cfg, hemisphere, params, mask=True)[
0].astype('int')
chan_go_left = 21
chan_go_right = 22
exp_filter = np.exp(np.arange(0, 5, 0.001) / -0.3)
spike_masks_go_left_d1 = get_spikes_mask(
senders, times, channels[chan_go_left]['d1'])
spike_masks_go_left_d2 = get_spikes_mask(
senders, times, channels[chan_go_left]['d2'])
spike_masks_go_right_d1 = get_spikes_mask(
senders, times, channels[chan_go_right]['d1'])
spike_masks_go_right_d2 = get_spikes_mask(
senders, times, channels[chan_go_right]['d2'])
print "mask done"
filtered_spikes_go_left_d1 = filter_spikes(
spike_masks_go_left_d1, exp_filter)
filtered_spikes_go_left_d2 = filter_spikes(
spike_masks_go_left_d2, exp_filter)
filtered_spikes_go_right_d1 = filter_spikes(
spike_masks_go_right_d1, exp_filter)
filtered_spikes_go_right_d2 = filter_spikes(
spike_masks_go_right_d2, exp_filter)
filtered_spikes_go_left_d1 = np.mean(filtered_spikes_go_left_d1, axis=0)[
np.where(stim_times)]
filtered_spikes_go_left_d2 = np.mean(filtered_spikes_go_left_d2, axis=0)[
np.where(stim_times)]
filtered_spikes_go_right_d1 = np.mean(filtered_spikes_go_right_d1, axis=0)[
np.where(stim_times)]
filtered_spikes_go_right_d2 = np.mean(filtered_spikes_go_right_d2, axis=0)[
np.where(stim_times)]
filtered_spikes_go_left_d1 -= np.mean(filtered_spikes_go_left_d1)
filtered_spikes_go_left_d2 -= np.mean(filtered_spikes_go_left_d2)
filtered_spikes_go_right_d1 -= np.mean(filtered_spikes_go_right_d1)
filtered_spikes_go_right_d2 -= np.mean(filtered_spikes_go_right_d2)
print "filter done"
df = df.append({"channel": 'go left', 'type': 'd1d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d1, filtered_spikes_go_left_d2)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'go right', 'type': 'd1d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_right_d1, filtered_spikes_go_right_d2)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'between', 'type': 'd1d1', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d1, filtered_spikes_go_right_d1)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'between', 'type': 'd2d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d2, filtered_spikes_go_right_d2)[0, 1]}, ignore_index=True)
window_size = 250 # (ms)
step_size = 10 # (ms)
switching_times = np.arange(20000, 50000, step_size).astype('int')
switching = np.zeros(len(switching_times))
for i, t in enumerate(switching_times):
window_go_left = filtered_spikes_go_left_d2[t -
window_size / 2:t + window_size / 2]
window_go_right = filtered_spikes_go_right_d2[t -
window_size / 2:t + window_size / 2]
if all(window_go_left - window_go_right > 0.0):
switching[i] = 1
elif all(window_go_right - window_go_left > 0.0):
switching[i] = -1
colors.seaborn.set_context('paper', font_scale=3.0,
rc={"lines.linewidth": 1.5})
colors.seaborn.set_style('whitegrid', {"axes.linewidth": 1.5})
lw = 1.5
fig = pl.figure(figsize=[16, 10])
fig.set_tight_layout(True)
ax0 = fig.add_subplot(1, 1, 1)
scalebars.add_scalebar(ax0, matchx=False, matchy=False, hidex=False,
hidey=False, size=3, label="3 Hz", horizontal=False)
x = np.arange(0, 30, 0.001 * step_size)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching > 0, facecolor=colors.colors[0], alpha=1.0)
ax0.add_collection(collection)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching < 0, facecolor=colors.colors[2], alpha=1.0)
ax0.add_collection(collection)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching == 0, facecolor=colors.colors[-1], alpha=1.0)
ax0.add_collection(collection)
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_left_d1[20000:50000] + 21, label="D1 go left")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_left_d2[20000:50000] + 14, label="D2 go left")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_right_d1[20000:50000] + 7, label="D1 go right")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_right_d2[20000:50000] + 0, label="D2 go right")
ax0.set_xlabel("Time (s)", fontweight='bold')
pl.yticks([0, 7, 14, 21, 28], ["turn right D2", "turn right D1",
"turn left D2", "turn left D1", "winning channel"], rotation=40)
ax0.set_ylim([-4, 29.5])
fig.savefig(traces_out_fn)
fig = pl.figure(figsize=[16, 10])
fig.set_tight_layout(True)
ax2 = fig.add_subplot(1, 1, 1)
sbn.stripplot(x='channel', y='CC', hue='type',
data=df, size=10., alpha=0.5, ax=ax2)
sbn.violinplot(x='channel', y='CC', hue='type',
data=df, size=15., scale='width', ax=ax2)
ax2.legend_.remove()
ax2.set_xlabel("Channel", fontweight='bold')
ax2.set_ylabel("CC", fontweight='bold')
ax2.set_ylim([-1.2, 1.2])
fig.savefig(corr_out_fn)
df.to_json(corr_data_out) | code/analysis/plot_competing_correlations.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from scipy.stats import pearsonr
import pylab as pl
import sys
import json
import yaml
sys.path.append("code/striatal_model")
import params
import colors
from plot_tools2 import *
import matplotlib.pyplot as plt
import pandas
import seaborn as sbn
import matplotlib.collections as collections
import scalebars
grid_dimensions = [6, 6]
num_trials = 5
print len(sys.argv), sys.argv
all_spikes_fn = sys.argv[1:1 + num_trials]
all_channels_fn = sys.argv[1 + 1 * num_trials:1 + 2 * num_trials]
experiment_fn = sys.argv[1 + 2 * num_trials]
hemisphere = sys.argv[2 + 2 * num_trials]
traces_out_fn = sys.argv[3 + 2 * num_trials]
corr_out_fn = sys.argv[4 + 2 * num_trials]
corr_data_out = sys.argv[5 + 2 * num_trials]
df = pandas.DataFrame({"type": [], "channel": [], "CC": [], "trial": []})
for trial in range(num_trials):
print "trial", trial
spikes_fn = all_spikes_fn[trial]
channels_fn = all_channels_fn[trial]
# spike data of the channels
data = np.loadtxt(spikes_fn)
senders = data[:, 0]
unique_senders = np.unique(senders) # all active senders
times = data[:, 1]
with open(channels_fn, "r+") as f:
channels = json.load(f)
channels = channels['channels']
with open(experiment_fn, "r+") as f:
cfg = yaml.load(f)
stim_times = get_stim_times(cfg, hemisphere, params, mask=True)[
0].astype('int')
chan_go_left = 21
chan_go_right = 22
exp_filter = np.exp(np.arange(0, 5, 0.001) / -0.3)
spike_masks_go_left_d1 = get_spikes_mask(
senders, times, channels[chan_go_left]['d1'])
spike_masks_go_left_d2 = get_spikes_mask(
senders, times, channels[chan_go_left]['d2'])
spike_masks_go_right_d1 = get_spikes_mask(
senders, times, channels[chan_go_right]['d1'])
spike_masks_go_right_d2 = get_spikes_mask(
senders, times, channels[chan_go_right]['d2'])
print "mask done"
filtered_spikes_go_left_d1 = filter_spikes(
spike_masks_go_left_d1, exp_filter)
filtered_spikes_go_left_d2 = filter_spikes(
spike_masks_go_left_d2, exp_filter)
filtered_spikes_go_right_d1 = filter_spikes(
spike_masks_go_right_d1, exp_filter)
filtered_spikes_go_right_d2 = filter_spikes(
spike_masks_go_right_d2, exp_filter)
filtered_spikes_go_left_d1 = np.mean(filtered_spikes_go_left_d1, axis=0)[
np.where(stim_times)]
filtered_spikes_go_left_d2 = np.mean(filtered_spikes_go_left_d2, axis=0)[
np.where(stim_times)]
filtered_spikes_go_right_d1 = np.mean(filtered_spikes_go_right_d1, axis=0)[
np.where(stim_times)]
filtered_spikes_go_right_d2 = np.mean(filtered_spikes_go_right_d2, axis=0)[
np.where(stim_times)]
filtered_spikes_go_left_d1 -= np.mean(filtered_spikes_go_left_d1)
filtered_spikes_go_left_d2 -= np.mean(filtered_spikes_go_left_d2)
filtered_spikes_go_right_d1 -= np.mean(filtered_spikes_go_right_d1)
filtered_spikes_go_right_d2 -= np.mean(filtered_spikes_go_right_d2)
print "filter done"
df = df.append({"channel": 'go left', 'type': 'd1d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d1, filtered_spikes_go_left_d2)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'go right', 'type': 'd1d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_right_d1, filtered_spikes_go_right_d2)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'between', 'type': 'd1d1', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d1, filtered_spikes_go_right_d1)[0, 1]}, ignore_index=True)
df = df.append({"channel": 'between', 'type': 'd2d2', 'trial': trial, 'CC': correlate2(
filtered_spikes_go_left_d2, filtered_spikes_go_right_d2)[0, 1]}, ignore_index=True)
window_size = 250 # (ms)
step_size = 10 # (ms)
switching_times = np.arange(20000, 50000, step_size).astype('int')
switching = np.zeros(len(switching_times))
for i, t in enumerate(switching_times):
window_go_left = filtered_spikes_go_left_d2[t -
window_size / 2:t + window_size / 2]
window_go_right = filtered_spikes_go_right_d2[t -
window_size / 2:t + window_size / 2]
if all(window_go_left - window_go_right > 0.0):
switching[i] = 1
elif all(window_go_right - window_go_left > 0.0):
switching[i] = -1
colors.seaborn.set_context('paper', font_scale=3.0,
rc={"lines.linewidth": 1.5})
colors.seaborn.set_style('whitegrid', {"axes.linewidth": 1.5})
lw = 1.5
fig = pl.figure(figsize=[16, 10])
fig.set_tight_layout(True)
ax0 = fig.add_subplot(1, 1, 1)
scalebars.add_scalebar(ax0, matchx=False, matchy=False, hidex=False,
hidey=False, size=3, label="3 Hz", horizontal=False)
x = np.arange(0, 30, 0.001 * step_size)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching > 0, facecolor=colors.colors[0], alpha=1.0)
ax0.add_collection(collection)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching < 0, facecolor=colors.colors[2], alpha=1.0)
ax0.add_collection(collection)
collection = collections.BrokenBarHCollection.span_where(
x, ymin=26.5, ymax=29.5, where=switching == 0, facecolor=colors.colors[-1], alpha=1.0)
ax0.add_collection(collection)
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_left_d1[20000:50000] + 21, label="D1 go left")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_left_d2[20000:50000] + 14, label="D2 go left")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_right_d1[20000:50000] + 7, label="D1 go right")
ax0.plot(np.arange(0, 30, 0.001),
filtered_spikes_go_right_d2[20000:50000] + 0, label="D2 go right")
ax0.set_xlabel("Time (s)", fontweight='bold')
pl.yticks([0, 7, 14, 21, 28], ["turn right D2", "turn right D1",
"turn left D2", "turn left D1", "winning channel"], rotation=40)
ax0.set_ylim([-4, 29.5])
fig.savefig(traces_out_fn)
fig = pl.figure(figsize=[16, 10])
fig.set_tight_layout(True)
ax2 = fig.add_subplot(1, 1, 1)
sbn.stripplot(x='channel', y='CC', hue='type',
data=df, size=10., alpha=0.5, ax=ax2)
sbn.violinplot(x='channel', y='CC', hue='type',
data=df, size=15., scale='width', ax=ax2)
ax2.legend_.remove()
ax2.set_xlabel("Channel", fontweight='bold')
ax2.set_ylabel("CC", fontweight='bold')
ax2.set_ylim([-1.2, 1.2])
fig.savefig(corr_out_fn)
df.to_json(corr_data_out) | 0.279828 | 0.416559 |
from django.conf.urls.defaults import patterns, url
from django.test import TestCase
from djangorestframework import status
from djangorestframework.views import View
from djangorestframework.compat import View as DjangoView
from djangorestframework.renderers import BaseRenderer, JSONRenderer, YAMLRenderer, \
XMLRenderer, JSONPRenderer
from djangorestframework.parsers import JSONParser, YAMLParser
from djangorestframework.mixins import ResponseMixin
from djangorestframework.response import Response
from StringIO import StringIO
import datetime
from decimal import Decimal
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
RENDERER_A_SERIALIZER = lambda x: 'Renderer A: %s' % x
RENDERER_B_SERIALIZER = lambda x: 'Renderer B: %s' % x
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, obj=None, media_type=None):
return RENDERER_A_SERIALIZER(obj)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, obj=None, media_type=None):
return RENDERER_B_SERIALIZER(obj)
class MockView(ResponseMixin, DjangoView):
renderers = (RendererA, RendererB)
def get(self, request, **kwargs):
response = Response(DUMMYSTATUS, DUMMYCONTENT)
return self.render(response)
class MockGETView(View):
def get(self, request, **kwargs):
return {'foo': ['bar', 'baz']}
urlpatterns = patterns('',
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderers=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderers=[RendererA, RendererB])),
url(r'^jsonp/jsonrenderer$', MockGETView.as_view(renderers=[JSONRenderer, JSONPRenderer])),
url(r'^jsonp/nojsonrenderer$', MockGETView.as_view(renderers=[JSONPRenderer])),
)
class RendererIntegrationTests(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
urls = 'djangorestframework.tests.renderers'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEquals(resp.status_code, DUMMYSTATUS)
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, '')
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
resp = self.client.get('/?_accept=%s' % RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEquals(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_conflicting_format_query_and_accept_ignores_accept(self):
"""If a 'format' query is specified that does not match the Accept
header, we should only honor the 'format' query string."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT='dummy')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_bla(self):
resp = self.client.get('/?format=formatb',
HTTP_ACCEPT='text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
_flat_repr = '{"foo": ["bar", "baz"]}'
_indented_repr = '{\n "foo": [\n "bar", \n "baz"\n ]\n}'
class JSONRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_without_content_type_args(self):
"""
Test basic JSON rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
content = renderer.render(obj, 'application/json')
self.assertEquals(content, _flat_repr)
def test_with_content_type_args(self):
"""
Test JSON rendering with additional content type arguments supplied.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
content = renderer.render(obj, 'application/json; indent=2')
self.assertEquals(content, _indented_repr)
def test_render_and_parse(self):
"""
Test rendering and then parsing returns the original object.
IE obj -> render -> parse -> obj.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
parser = JSONParser(None)
content = renderer.render(obj, 'application/json')
(data, files) = parser.parse(StringIO(content))
self.assertEquals(obj, data)
class JSONPRendererTests(TestCase):
"""
Tests specific to the JSONP Renderer
"""
urls = 'djangorestframework.tests.renderers'
def test_without_callback_with_json_renderer(self):
"""
Test JSONP rendering with View JSON Renderer.
"""
resp = self.client.get('/jsonp/jsonrenderer',
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, 'callback(%s);' % _flat_repr)
def test_without_callback_without_json_renderer(self):
"""
Test JSONP rendering without View JSON Renderer.
"""
resp = self.client.get('/jsonp/nojsonrenderer',
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, 'callback(%s);' % _flat_repr)
def test_with_callback(self):
"""
Test JSONP rendering with callback function name.
"""
callback_func = 'myjsonpcallback'
resp = self.client.get('/jsonp/nojsonrenderer?callback=' + callback_func,
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, '%s(%s);' % (callback_func, _flat_repr))
if YAMLRenderer:
_yaml_repr = 'foo: [bar, baz]\n'
class YAMLRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_render(self):
"""
Test basic YAML rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer(None)
content = renderer.render(obj, 'application/yaml')
self.assertEquals(content, _yaml_repr)
def test_render_and_parse(self):
"""
Test rendering and then parsing returns the original object.
IE obj -> render -> parse -> obj.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer(None)
parser = YAMLParser(None)
content = renderer.render(obj, 'application/yaml')
(data, files) = parser.parse(StringIO(content))
self.assertEquals(obj, data)
class XMLRendererTestCase(TestCase):
"""
Tests specific to the XML Renderer
"""
def test_render_string(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 'astring'}, 'application/xml')
self.assertXMLContains(content, '<field>astring</field>')
def test_render_integer(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 111}, 'application/xml')
self.assertXMLContains(content, '<field>111</field>')
def test_render_datetime(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({
'field': datetime.datetime(2011, 12, 25, 12, 45, 00)
}, 'application/xml')
self.assertXMLContains(content, '<field>2011-12-25 12:45:00</field>')
def test_render_float(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 123.4}, 'application/xml')
self.assertXMLContains(content, '<field>123.4</field>')
def test_render_decimal(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': Decimal('111.2')}, 'application/xml')
self.assertXMLContains(content, '<field>111.2</field>')
def test_render_none(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': None}, 'application/xml')
self.assertXMLContains(content, '<field></field>')
def assertXMLContains(self, xml, string):
self.assertTrue(xml.startswith('<?xml version="1.0" encoding="utf-8"?>\n<root>'))
self.assertTrue(xml.endswith('</root>'))
self.assertTrue(string in xml, '%r not in %r' % (string, xml)) | djangorestframework/tests/renderers.py | from django.conf.urls.defaults import patterns, url
from django.test import TestCase
from djangorestframework import status
from djangorestframework.views import View
from djangorestframework.compat import View as DjangoView
from djangorestframework.renderers import BaseRenderer, JSONRenderer, YAMLRenderer, \
XMLRenderer, JSONPRenderer
from djangorestframework.parsers import JSONParser, YAMLParser
from djangorestframework.mixins import ResponseMixin
from djangorestframework.response import Response
from StringIO import StringIO
import datetime
from decimal import Decimal
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
RENDERER_A_SERIALIZER = lambda x: 'Renderer A: %s' % x
RENDERER_B_SERIALIZER = lambda x: 'Renderer B: %s' % x
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, obj=None, media_type=None):
return RENDERER_A_SERIALIZER(obj)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, obj=None, media_type=None):
return RENDERER_B_SERIALIZER(obj)
class MockView(ResponseMixin, DjangoView):
renderers = (RendererA, RendererB)
def get(self, request, **kwargs):
response = Response(DUMMYSTATUS, DUMMYCONTENT)
return self.render(response)
class MockGETView(View):
def get(self, request, **kwargs):
return {'foo': ['bar', 'baz']}
urlpatterns = patterns('',
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderers=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderers=[RendererA, RendererB])),
url(r'^jsonp/jsonrenderer$', MockGETView.as_view(renderers=[JSONRenderer, JSONPRenderer])),
url(r'^jsonp/nojsonrenderer$', MockGETView.as_view(renderers=[JSONPRenderer])),
)
class RendererIntegrationTests(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
urls = 'djangorestframework.tests.renderers'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEquals(resp.status_code, DUMMYSTATUS)
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, '')
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEquals(resp['Content-Type'], RendererA.media_type)
self.assertEquals(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
resp = self.client.get('/?_accept=%s' % RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEquals(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT=RendererB.media_type)
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_conflicting_format_query_and_accept_ignores_accept(self):
"""If a 'format' query is specified that does not match the Accept
header, we should only honor the 'format' query string."""
resp = self.client.get('/?format=%s' % RendererB.format,
HTTP_ACCEPT='dummy')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
def test_bla(self):
resp = self.client.get('/?format=formatb',
HTTP_ACCEPT='text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
self.assertEquals(resp['Content-Type'], RendererB.media_type)
self.assertEquals(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEquals(resp.status_code, DUMMYSTATUS)
_flat_repr = '{"foo": ["bar", "baz"]}'
_indented_repr = '{\n "foo": [\n "bar", \n "baz"\n ]\n}'
class JSONRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_without_content_type_args(self):
"""
Test basic JSON rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
content = renderer.render(obj, 'application/json')
self.assertEquals(content, _flat_repr)
def test_with_content_type_args(self):
"""
Test JSON rendering with additional content type arguments supplied.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
content = renderer.render(obj, 'application/json; indent=2')
self.assertEquals(content, _indented_repr)
def test_render_and_parse(self):
"""
Test rendering and then parsing returns the original object.
IE obj -> render -> parse -> obj.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer(None)
parser = JSONParser(None)
content = renderer.render(obj, 'application/json')
(data, files) = parser.parse(StringIO(content))
self.assertEquals(obj, data)
class JSONPRendererTests(TestCase):
"""
Tests specific to the JSONP Renderer
"""
urls = 'djangorestframework.tests.renderers'
def test_without_callback_with_json_renderer(self):
"""
Test JSONP rendering with View JSON Renderer.
"""
resp = self.client.get('/jsonp/jsonrenderer',
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, 'callback(%s);' % _flat_repr)
def test_without_callback_without_json_renderer(self):
"""
Test JSONP rendering without View JSON Renderer.
"""
resp = self.client.get('/jsonp/nojsonrenderer',
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, 'callback(%s);' % _flat_repr)
def test_with_callback(self):
"""
Test JSONP rendering with callback function name.
"""
callback_func = 'myjsonpcallback'
resp = self.client.get('/jsonp/nojsonrenderer?callback=' + callback_func,
HTTP_ACCEPT='application/json-p')
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json-p')
self.assertEquals(resp.content, '%s(%s);' % (callback_func, _flat_repr))
if YAMLRenderer:
_yaml_repr = 'foo: [bar, baz]\n'
class YAMLRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_render(self):
"""
Test basic YAML rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer(None)
content = renderer.render(obj, 'application/yaml')
self.assertEquals(content, _yaml_repr)
def test_render_and_parse(self):
"""
Test rendering and then parsing returns the original object.
IE obj -> render -> parse -> obj.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer(None)
parser = YAMLParser(None)
content = renderer.render(obj, 'application/yaml')
(data, files) = parser.parse(StringIO(content))
self.assertEquals(obj, data)
class XMLRendererTestCase(TestCase):
"""
Tests specific to the XML Renderer
"""
def test_render_string(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 'astring'}, 'application/xml')
self.assertXMLContains(content, '<field>astring</field>')
def test_render_integer(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 111}, 'application/xml')
self.assertXMLContains(content, '<field>111</field>')
def test_render_datetime(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({
'field': datetime.datetime(2011, 12, 25, 12, 45, 00)
}, 'application/xml')
self.assertXMLContains(content, '<field>2011-12-25 12:45:00</field>')
def test_render_float(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': 123.4}, 'application/xml')
self.assertXMLContains(content, '<field>123.4</field>')
def test_render_decimal(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': Decimal('111.2')}, 'application/xml')
self.assertXMLContains(content, '<field>111.2</field>')
def test_render_none(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer(None)
content = renderer.render({'field': None}, 'application/xml')
self.assertXMLContains(content, '<field></field>')
def assertXMLContains(self, xml, string):
self.assertTrue(xml.startswith('<?xml version="1.0" encoding="utf-8"?>\n<root>'))
self.assertTrue(xml.endswith('</root>'))
self.assertTrue(string in xml, '%r not in %r' % (string, xml)) | 0.755366 | 0.15034 |
import tweepy
import urllib.request
import pandas as pd
# Authenticate to Twitter
from os import environ
CONSUMER_KEY = environ['CONSUMER_KEY']
CONSUMER_SECRET = environ['CONSUMER_SECRET']
ACCESS_KEY = environ['ACCESS_KEY']
ACCESS_SECRET = environ['ACCESS_SECRET']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
deukae = {
"jiu": "minjihourly",
"sua": "suahours",
"siyeon": "siyeonhours",
"handong": "hourlydong",
"yoohyeon": "hourlyoohyoen",
"dami": "hourly_dami",
"gahyeon": "hourlygahyeon"
}
loona = {
'heejin': 'hourlyheejin',
'hyunjin': 'hourlyaeong',
'haseul': 'seulhour',
'yeojin': 'hourlyyeojin',
'vivi': 'vivihourly',
'kimlip': 'kimliphourly',
'jinsoul': 'hourlyjinsoul',
'choerry': 'hourlychoerry',
'yves': 'yveshourly',
'chuu': 'chuuhour',
'gowon': 'hourlywon',
'oliviahye': 'hourlyolivia'
}
image_count = pd.read_csv('image_count.csv', index_col='Unnamed: 0').to_dict()['0']
first_id = {
"jiu": 0,
"sua": 0,
"siyeon": 0,
"handong": 0,
"yoohyeon": 0,
"dami": 0,
"gahyeon": 0,
'heejin': 0,
'hyunjin': 0,
'haseul': 0,
'yeojin': 0,
'vivi': 0,
'kimlip': 0,
'jinsoul': 0,
'choerry': 0,
'yves': 0,
'chuu': 0,
'gowon': 0,
'oliviahye': 0
}
last_id = pd.read_csv('last_id.csv', index_col='Unnamed: 0').to_dict()['0']
def fetch(dict, folder):
for key,value in dict.items():
public_tweets = api.user_timeline(value, count = 200, include_rts = 'false', exclude_replies = 'true')
url = []
for tweet in public_tweets:
if "media" in tweet.entities:
if "thumb" not in tweet.entities["media"][0]['media_url']:
first_id[key] = tweet.id
break
for tweet in public_tweets:
print(tweet.id)
print(str(key)+str(last_id[key]))
if (tweet.id == last_id[key]):
break
if "media" in tweet.entities:
if "thumb" not in tweet.entities["media"][0]['media_url']:
url.append(tweet.entities["media"][0]['media_url'])
for image in url:
image_count[key] += 1
name = "images" + "/" + folder + "/" + key + str(image_count[key])+".jpg"
print(name)
urllib.request.urlretrieve(image, name)
fetch(deukae, 'deukae')
fetch(loona, 'loona')
pd.DataFrame.from_dict(first_id, orient='index').to_csv('last_id.csv')
pd.DataFrame.from_dict(image_count, orient='index').to_csv('image_count.csv') | fetch.py | import tweepy
import urllib.request
import pandas as pd
# Authenticate to Twitter
from os import environ
CONSUMER_KEY = environ['CONSUMER_KEY']
CONSUMER_SECRET = environ['CONSUMER_SECRET']
ACCESS_KEY = environ['ACCESS_KEY']
ACCESS_SECRET = environ['ACCESS_SECRET']
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
deukae = {
"jiu": "minjihourly",
"sua": "suahours",
"siyeon": "siyeonhours",
"handong": "hourlydong",
"yoohyeon": "hourlyoohyoen",
"dami": "hourly_dami",
"gahyeon": "hourlygahyeon"
}
loona = {
'heejin': 'hourlyheejin',
'hyunjin': 'hourlyaeong',
'haseul': 'seulhour',
'yeojin': 'hourlyyeojin',
'vivi': 'vivihourly',
'kimlip': 'kimliphourly',
'jinsoul': 'hourlyjinsoul',
'choerry': 'hourlychoerry',
'yves': 'yveshourly',
'chuu': 'chuuhour',
'gowon': 'hourlywon',
'oliviahye': 'hourlyolivia'
}
image_count = pd.read_csv('image_count.csv', index_col='Unnamed: 0').to_dict()['0']
first_id = {
"jiu": 0,
"sua": 0,
"siyeon": 0,
"handong": 0,
"yoohyeon": 0,
"dami": 0,
"gahyeon": 0,
'heejin': 0,
'hyunjin': 0,
'haseul': 0,
'yeojin': 0,
'vivi': 0,
'kimlip': 0,
'jinsoul': 0,
'choerry': 0,
'yves': 0,
'chuu': 0,
'gowon': 0,
'oliviahye': 0
}
last_id = pd.read_csv('last_id.csv', index_col='Unnamed: 0').to_dict()['0']
def fetch(dict, folder):
for key,value in dict.items():
public_tweets = api.user_timeline(value, count = 200, include_rts = 'false', exclude_replies = 'true')
url = []
for tweet in public_tweets:
if "media" in tweet.entities:
if "thumb" not in tweet.entities["media"][0]['media_url']:
first_id[key] = tweet.id
break
for tweet in public_tweets:
print(tweet.id)
print(str(key)+str(last_id[key]))
if (tweet.id == last_id[key]):
break
if "media" in tweet.entities:
if "thumb" not in tweet.entities["media"][0]['media_url']:
url.append(tweet.entities["media"][0]['media_url'])
for image in url:
image_count[key] += 1
name = "images" + "/" + folder + "/" + key + str(image_count[key])+".jpg"
print(name)
urllib.request.urlretrieve(image, name)
fetch(deukae, 'deukae')
fetch(loona, 'loona')
pd.DataFrame.from_dict(first_id, orient='index').to_csv('last_id.csv')
pd.DataFrame.from_dict(image_count, orient='index').to_csv('image_count.csv') | 0.098767 | 0.091911 |
loadouts_const = [
{
"id": "1MobOPbsFZoVpLWDUUgmeg",
"name": "dp27",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7LoT7yAe0LK7bDOeq6MZZM/33995bc704667674af1b73fe962d4c7c/Primary_gun_DP27.png"
},
{
"id": "3iisbOg3JC9epuJDdrMcAk",
"name": "9x19vsn",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/42gH96xTTYaTZsfXI3c0wL/a7edbf11af97091ee884b68e59fe6a4f/9x19VSN.png"
},
{
"id": "4EmVfbHbYqwRNnK02lU79C",
"name": "pmm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3y4LIwwm8YNQHAv8oOkWCK/a2375901cee34e68fa39c976d85de8aa/PMM.png"
},
{
"id": "3Ch5Pac0IKVBJe5oYZzIol",
"name": "gsh-18",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5s5Q33j3MNcXf9lwfxfd7m/4eb3a6af1d431481b6ddcec44fbc7602/GSh-18.png"
},
{
"id": "3WoO6qQpm6SkD2ceFlpIVq",
"name": "barbed-wire",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7igaibxuCcSpWDkZensEJS/bfa2cef52f3d860b7a06c2b4d7a6340e/Barbed_wire.png"
},
{
"id": "5QtTa00eoscVRzAfGy44y6",
"name": "proximity-alarm",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2TsFLmb2O6LRZpbxzkZDck/c9146913388a9567500b704c95600621/Proximity_alarm.png"
},
{
"id": "4rJKd9S4S3Edu84n3jaWbq",
"name": "shumikha-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/37wX75QnY7XA6KbjM4aF5n/0ab116d398cf71463e11d43913818ec1/Shumikha-Launcher.png"
},
{
"id": "1LVSwzrXIEAd1O3vntSQMs",
"name": "p10-roni",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7K86OBjL3zmYWt0ZvUcCLj/16a947334e39f27da177d787773593e4/r6-operator-weapon-smg-p10roni.png"
},
{
"id": "6xDz1HSwIn3ZcV9nKIeKUN",
"name": "mk-14-ebr",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6KIMqp5dA95z1RI3PrG9jv/eb939638169811a3fa858a44e6e5d97e/Mk_14_EBR.png"
},
{
"id": "5mI0sCcUxKW3Imv5ZMBBeL",
"name": "prb92",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/dl28J1HsE7mzhj66pmd5D/b8d8fc48d2dde13154047de94abbd8ca/PRB92.png"
},
{
"id": "7eb4vAG3ycZGuIRAoRl58a",
"name": "surya-gate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4hLJAAVKrf50wosG0471od/cde1867daf863c03754969f159ac00de/r6s-operator-ability-aruni.png"
},
{
"id": "2Mh1URS57n4Yuc2vHOojl7",
"name": "super-90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1TLWSu0xHJlAsfEfafeC9X/f9647e70a18962bf1627095c8b46832e/Super_90.png"
},
{
"id": "4pZ8kx4SSqhhLJ1iaSyEAU",
"name": "9mm-c1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60sbThKtOpNOwKu3OP0oGV/672fd9263f7786402a0d855273473a6f/9mm_C1.png"
},
{
"id": "2cDP1BjKw2UkKkDJhYLZAU",
"name": "mk1-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3tWoNeF3jQYs3w4EOydQYs/434409c96693df1fd3e969d778e70795/Mk1_9mm_BI.png"
},
{
"id": "6Urz2FjkmefuoCPGDoVZCm",
"name": "ita12s",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5G4DroaSdqHzJWCe7qqbHZ/5dd2e03f853182c78a1e7fcbc642f0cf/ITA12S.png"
},
{
"id": "7pAPyONkaR3xGR47gvXwSz",
"name": "bulletproof-camera",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/gZuOXvuTu2i8hQX0B6auy/259f379a6283bae618443d722a896f1a/Bulletproof_camera.png"
},
{
"id": "6ZPm8q8dyQXt1my5OHZWic",
"name": "deployable-shield",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/W0WE0X2VQlxwWIAFCJ6Jm/523650a39de5a23dd9520d7299c9e25a/Deployable_Shield.png"
},
{
"id": "2yKP1QdTJfIMQN9d7ZeTmU",
"name": "welcome-mate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/xsIzH7XCAqvn7F3tEfAPe/c41e59a9d7f2ed7ee38b16ed0a882351/Welcome-Mate.png"
},
{
"id": "zs4Rebj67KAk06ASjJPxO",
"name": "spas-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Hp6Fbss6uI59OT4nZNB6e/a4d09954803cb2580353cfa03e8c778b/SPAS-12.png"
},
{
"id": "41pnpfTTAjzKYvpEBmNKdD",
"name": "t-5-smg",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1Ne8bvX8BdCALevWKMllQN/4baa3e79d323de134dd182e0272b9c3b/T-5_SMG.png"
},
{
"id": "1fj1XX5YxggVcr5mU1OPy3",
"name": "bailiff-410",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/N8FLbo4fsNyBe8msKgRhT/8f403dc0b58087bcafab786dd95ba33f/Bailiff_410.png"
},
{
"id": "4HiVAhAUQe5BEXgBNg2ECe",
"name": "usp40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7FxemzWRtlpAhK9MyKp1Gp/817cc25b6b7c3575dc1ba53a6a8170a9/USP40.png"
},
{
"id": "6kF8p8NlbGPvjRz42YxYYE",
"name": "remah-dash",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3dM2B3qCdU0woydIbiy2xn/55aa99443002ad794d3f78dada26d035/r6s-operator-ability-oryx.png"
},
{
"id": "eR3JkIxE5GyWvNpybHCRr",
"name": "mp5",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60YbOvSBQt6ZUlu8YDXoZm/51ef3857b2986de700262432e8433714/MP5.png"
},
{
"id": "5QeGmJGqn3gZxACxzF4kbR",
"name": "rg15",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2LNSsp7B7wUnnPUweir7Jm/9f66d53be7a63a17a55253a0bea6eec1/RG15.png"
},
{
"id": "7zuAWr4kVRJFYVj33Ltfex",
"name": "impact-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7iJK9B1Vr3PDO3rGftU00l/c3d8edc5564a80580e4ac2f9a4fc3937/Impact_Grenade.png"
},
{
"id": "4Lnu4kaDPzUIxgCStqfrbR",
"name": "nitro-cell",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4pBSTw9U6l9GRnT12G6Xln/e0991bc03b48d217f510af8b611c8828/Nitro_Cell.png"
},
{
"id": "5CkPFHPPJB3909Fff9BYBs",
"name": "banshee",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/49ixqWhGgjvHu0Ay8JzeSH/c6a3fe584847850186e15c7fb4244385/r6s-operator-ability-melusi.png"
},
{
"id": "wfzQPegCiVkDRgsx6MOjZ",
"name": "c8-sfw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1itXpz2GnvdwwRyhX1SYa2/b58ff71048fa3bb5ed09d5d935dc90f4/C8-SFW.png"
},
{
"id": "5b6dGdkffoVKAyDycG5hjg",
"name": "camrs",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4dBzqVVmnpv1DZi91LAnEN/e374b4ea289fc992280b943cdbb94d60/CAMRS.png"
},
{
"id": "myf6Hy39exE9Cot5zDEis",
"name": "claymore",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4T4H5EJgUxorucGVtU2pkm/74fef324b89c220ce6426e8097f915b9/Claymore.png"
},
{
"id": "2NNtCVZhqQykqVEtze4fxJ",
"name": "stun-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3XnK8s1iQJQu5cfr6UyQfK/429480b96d6d6d6b830c32c75d2608f5/Stun_Grenade.png"
},
{
"id": "XNjuIs9nL1RQNnWOMNfC9",
"name": "skeleton-key",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2w8EQtN4FFtEMa9lBYyWGg/36bbc6d819761c11418c868d2e483991/Skeleton-Key.png"
},
{
"id": "65tbXPRuQxAV8RUaMoCYJh",
"name": "lmg-e",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7JVJIew6t3iKwgByvrFXyi/7ba44dfda28b525506633e453104a604/LMG-E.png"
},
{
"id": "2dvLoMLwWSRwyK70gARboS",
"name": "m762",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4oWAgi7tgQP1Tq0HooRtye/9109a74921ee17610d4bd85a61582823/M762.png"
},
{
"id": "75HMflo54bNBGSUyX2je5s",
"name": "breach-charge",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1OgTMhyF1FBsSAo4njk26m/9881001e9db03a4806b2eea6007e4a1a/Breach_Charge.png"
},
{
"id": "5tTXUrm4TLdSBHtsJ1p9d8",
"name": "ks79-lifeline",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1elqIEWJ6XsXKAbMNd0Cai/0b4c0591bad284d957e652cdae0b706b/KS79-Lifeline.png"
},
{
"id": "HCADlLBkfNlDRRvlq3iPo",
"name": "mp5sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HaMldwFltBwiiyDDfkPpD/6de3aa9aaa17458e7f6186ba59b8deff/MP5SD.png"
},
{
"id": "67fxpqXp4gjOQixPHPaQMB",
"name": "supernova",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2tpjCRFLcc3hogjJGbKDsi/5ad0ab63b7245022aca5c1c1fb42d473/SuperNova.png"
},
{
"id": "4mbLbnjsLEQ27BEXQ1vqGs",
"name": "p229",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/76ja0RxqzHW9PpvWgpG7Sk/cb753b50b20fe67deaef54d8b2a46b54/P229.png"
},
{
"id": "7sblClEPf57IKm77UCqFSj",
"name": "bearing-9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4mdftEOh5Vu9KhhpgKLKrT/abedcc75868774018295ec2a08a7b3de/Bearing_9.png"
},
{
"id": "5FUiujmYYXsvq1zQ0lZlVx",
"name": "yokai",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/TdDZyrKpjt9EQo8tHpIJk/d987db4da22046a0663be8be82dcda88/Yokai.png"
},
{
"id": "5cZ1wkLzuOHnnYjma40PwQ",
"name": "bosg-12-2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2ZjVndetsX8WEn5ZfyUQa0/e3a781be7eab22876d25f748e8fd0f5a/BOSG.12.2.png"
},
{
"id": "2zEjl6sxdsxgVBmAsDZxcq",
"name": "c75-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3wUuefwPjU705mZkTdJ9UH/8ccb11884cfa34c176ac5500af139177/C75_Auto.png"
},
{
"id": "4W61sh5pt9Ghkw4g7Muvee",
"name": "smg-12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/EwJgB7KdgOb6dDm7ro33u/b73f0890f992c1a365210f08efcc6db5/SMG-12.png"
},
{
"id": "7aslgBcBTFi4XKqlAkvvrc",
"name": "smoke-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3LaxoSLC49T5vgKnUAlTLT/c47c4636845a04478432c48be8c29aee/Smoke_Grenade.png"
},
{
"id": "5o5qaxqMxosu04407U4sBL",
"name": "logic-bomb",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5ej2g1iCMHdfjn8h8qgfmU/bf07fef4b063a46389ca650ed02b292a/Logic-Bomb.png"
},
{
"id": "kzR6vfRLXm9f1EvoK9dBP",
"name": "m12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4FxqA5pa8JY9QQ7FEcjwPw/ffc779fcde5b970e7b95db6653637dab/M12.png"
},
{
"id": "5gcX8x7LiBHg2LA1JIdEHp",
"name": "spas-15",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/CyofBgipHq4RTafvPFWd4/bc3d0ecc871b70e57735855f852efacf/SPAS-15.png"
},
{
"id": "1Y7hJmAXWWqh1MIkXqUbKw",
"name": "luison",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5cSDFUWb8P1BAQUgnTozeM/fd3a3348f42c95d6afa9f105ae23f2e5/Luison.png"
},
{
"id": "1ojdoiQ8AbqFX3FB7Neqmk",
"name": "silent-step",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6PTsBBBGTT5oixxzvYv1Y4/18e31c74ba1ca73ed2694134acd9c078/Silent-Step.png"
},
{
"id": "6Il345pPRhv4Xx4qzTFpmA",
"name": "aug-a2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1eO39zRe8XxJXH1KZiIWhM/02049ced0fbfa630833e8b0d3c03de07/AUG_A2.png"
},
{
"id": "3Xq4lwAY8Sc1Z687gD9mnD",
"name": "mp5k",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1pk8nOI7ybQjYOSI4fuzOm/fcd78df0f729be545e75c09aae85c360/MP5K.png"
},
{
"id": "4PHq1TcVzAqQp11Ve7CFFC",
"name": "d-40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4niSMDCeiryoMBXJZq60Vv/48339331d05e289868cf4050c49b1b2b/D-40.png"
},
{
"id": "5Y36nPWZ6lXp37GDupoLRV",
"name": "p12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2mpM7rah7rwEW0bViIirUC/ed9caa4db58421519fa4db390b1aa164/P12.png"
},
{
"id": "6L5PL3qOQjjpNUdA9l0WLD",
"name": "mag-net-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1IKNZzLv63AJd9vlbXj3Bo/883371432ffb22e5bf35bc82dd706384/Mag-net_System.png"
},
{
"id": "3ePDML7HMucggZaNG2nL0a",
"name": "t-95-lsw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/23HCxaNTRUHBlFAvCTMZQm/fe319cc164fac034a29e9b114ae7d5cb/T-95_LSW.png"
},
{
"id": "6pPXSrzgAKEyTiiyrs1Qbn",
"name": "six12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2v6MwsHwjOZ5Muid53lyfN/e5f1c4997db93abfe3ac356fce23376c/SIX12.png"
},
{
"id": "3ECycrhAlLH7str0T4F2hp",
"name": "q-929",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2fRVszR5yGDHbV0AL8muso/0838dac90b66aa810daa49d36382fb64/Q-929.png"
},
{
"id": "31sOhkze6zBhWkkM8HR44n",
"name": "secondary-breacher",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3OvnVPWY1UyrDE913kU0a1/eae4b2a1584234ea2ff4ad6481239f3b/SecondaryBreacher.png"
},
{
"id": "168akpqtP52LsTYlMIqeHX",
"name": "candela",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4vpN9vu5wD9dyb2knMosTy/430796de3c0c2a5c2eb2ac6f4217eba0/Candela.png"
},
{
"id": "5hAVF2eVv7NyeJPAJL07sg",
"name": "ak-74m",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1j5HiQP8aFphTe65fqDdg0/23eecb5c603c5ba9f59fc6cbc5e4a531/AK-74M.png"
},
{
"id": "Q4Q9LtkAztMdeUT53C39j",
"name": "arx200",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6VgkPBsr1WApI3rWc9kcM0/b18b8e25f3e951e8e722213f2ee59eb0/ARX200.png"
},
{
"id": "2NcOcqzfy4HnaHNUITnUYN",
"name": "-44-mag-semi-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6W3Jz0YcQzbZ6BOPr7VVel/4c67f342964132a652f7d5821e887050/.44_Mag_Semi-Auto.png"
},
{
"id": "6dV0styTnHMeqh4effTNF8",
"name": "airjab-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6d0LN1QWzviEkcYu3mTn6v/e49511a479756f71224f14225ad9cbd8/Airjab-Launcher.png"
},
{
"id": "4jhzD37iXaCsWyBAo1PQ5J",
"name": "p90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4nGrNspOvII2oS3lEMkg5x/2398a493c298bc654f97c58767aa40f3/P90.png"
},
{
"id": "43jUNG843Bn0knjA3tXwXo",
"name": "sg-cqb",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5JoL3b36Fsztt9Q2XYmrbJ/dacec96948d3f8fe92914a69b9aac593/SG-CQB.png"
},
{
"id": "ElQvUTqCd5JbW2PIJ0lTS",
"name": "p9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6Fd1cl17KA0CtgodEiiY6v/d0f145ea72f2aacbd04260ba7d8f1c74/P9.png"
},
{
"id": "55BZj1JeqRvuczMpa04gRU",
"name": "lfp586",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1zc7UtdBfCZakwbiYqBvSz/1fd3f1584de38ca7c9315d498f094276/LFP586.png"
},
{
"id": "6XCPWiyRqIM6rfCYnSRFKg",
"name": "armor-pack",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/MeoKw7iPY6EFYvjS07CRg/b2d7eba623f3c63d6b7097a8f2253954/Armor-Pack.png"
},
{
"id": "3ATrltpsW24BFhZMHNmhfI",
"name": "fmg-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/0oneJNsBR06QjuowxwtHG/bd3b391c6eec2bd615f2ed83197a13ac/FMG-9.png"
},
{
"id": "4ggSgqX4ixVHJZwhnenHC1",
"name": "six12-sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1GTua079Xbtkpjhx96sRsW/079ed1a71a0d12b5e48e1b0d40b87110/SIX12_SD.png"
},
{
"id": "01zcYOKDgxP24MPkEaswD7",
"name": "5.7-usg",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/tkYcSAJSe5yGkeUhzZqBO/e81feb86df4a7eb6951052bec26b6ed7/5.7_USG.png"
},
{
"id": "7GKyGyCXQ9vVZ0kCdSPJl4",
"name": "d-50",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6mMQRDsrComRFa7bC6cNkG/8cd17e545e3d28dcc11a040d000cfa16/D-50.png"
},
{
"id": "1p5ZdYWvISi4qDV0S2fDP4",
"name": "frag-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4GZsPHbm9H0k5EWz7TMkwO/33b9007bc6ee03dab15cfa15eb69e096/Frag_Grenade.png"
},
{
"id": "6GQ8on95B9PMLjMDrZjXgD",
"name": "hel-presence-reduction",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/57miqbOn8xWBh7ne7za3CV/35364108d49380a0ed33998f970e104f/HEL-Presence-Reduction.png"
},
{
"id": "56o4y5mOsXlFhnzWlq9xMJ",
"name": "commando-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4P9dpUph5w3MSsLNnW6be/04baba24990fcb75a9c0bcfd01b7d190/Commando_9.png"
},
{
"id": "J5YsiIB8uvpeZgrWXrhlA",
"name": "sdp-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/Tgsdyz3XEqmgUYi9aZZgb/6755f4da7af7a7179ffab92acf8d477e/SDP_9mm.png"
},
{
"id": "6m6vEqsps3Mhn6cOHo9yKS",
"name": "pest-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5L0fFKVOwozKMcmJoenfef/56e4efdf77363556b35a76fd4e0e60f6/Pest-Launcher.png"
},
{
"id": "5AVE3Ok87dbmTwuI5K5fZg",
"name": "le-rock-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "7J9icaPnaxguoiBWfdqomb",
"name": "le-rock-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "64NDkY7SFav037M3uh6KRD",
"name": "vector-45-acp",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7D1cDf13FqUhoLihzvuPln/068aa7e507155598449c58c0a49a90d6/Vector_.45_ACP.png"
},
{
"id": "1M88HlyLX6jD774vkptDLV",
"name": "ita12l",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4Y6ziRzm9RiPii83fm8BV1/1f472744d2c2dec8d9206f4d8733d92c/ITA12L.png"
},
{
"id": "6X2RibCre3jpetmCoFZaUu",
"name": "black-mirror",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1a1w8epOhWE8VtzvvCJG9d/b20cbb221f7d45e5838f839ce042f409/Black-mirror.png"
},
{
"id": "2xDl4cDXX48FuUiMPApZHo",
"name": "ar-15-50",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4lGGEGZLkbldz114Wl5hCo/78a04c46654f80fae03e730bd79f3563/AR-15.50.png"
},
{
"id": "4zn5v7GdQhRyojYT6qAwwM",
"name": "m4",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3jhi90ycmuc8mAiuSXFoCi/bcf354459e7becd6ede52ee97917c832/M4.png"
},
{
"id": "4GfGPq4g6TDwHvQEJII9ee",
"name": "1911-tacops",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/189UukZ6fVnvQR6LJtLYry/6eec29603d5b7b0ca8cab6ac0ef083ac/1911_TACOPS.png"
},
{
"id": "3ECK2BieW8MOShqE0XJVwd",
"name": "breaching-torch",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4rPBvxDKsKiQCMjt7GxJMw/09e45c68bbc41c1721acbbe0257e2465/Breaching-Torch.png"
},
{
"id": "4F64StqLivWX4lHm7iNgqG",
"name": "v308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5YBZe76NUDO32eF66wW90g/488c315743d59230962a4d67618223d6/V308.png"
},
{
"id": "7MC9QIlZkFL8AAqkAfGIbV",
"name": "417",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5djkS4YtAtOF0vBmg0T60x/ea2b1ff7e5367e66c99bc7ad7e95bfe3/417.png"
},
{
"id": "zRjInzDWpoahREdiE2RDM",
"name": "ee-one-d",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7fRknnWl2K2qjKle1t79j/0506d25798aeb0691c8a576665050f7d/EE-ONE-D.png"
},
{
"id": "7tItrCBHMWLbtvlBxYDWfS",
"name": "g36c",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2SZoqSXKoNPvZFIJsFsDE5/cb109885bf19c8697abf832f10cfd9a6/G36C.png"
},
{
"id": "4FYdLQfxYBnKCfY2cZ9flD",
"name": "gemini-replicator",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/K8E4EHWbD8wTjVqro6wVl/62339b2fbe1d3a2319dcd320f7a0b070/r6s-operator-ability-iana.png"
},
{
"id": "E0pKBweJkY0ok4BvfDSxv",
"name": "csrx-300",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7tUB9ZNXJhdN6ejAkCEeFQ/99691bcc19f641cf872925905d08a539/CSRX_300.png"
},
{
"id": "5EraRZbq9P8VR8Sd0Sarh9",
"name": "spsmg9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5EtwSgylXckBNg4n6gDR9J/bc6fc6c5c12ae11da59aee95828ebd76/SPSMG9.png"
},
{
"id": "2Eik88OMmWOse0qBVegpjG",
"name": "lv-explosive-lance",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/75eebt48ELO4eGGdIMVMpY/9533c7dc8f36651f5b5ad50c8ccb6c5a/LV_Explosive_Lance.png"
},
{
"id": "3rjbxjDZx9mwvN5xHkZDWp",
"name": "aug-a3",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3W9XJdMOgpHSw55HfwRSAv/cf8f220678d503e6c3e535c00b2e636a/AUG_A3.png"
},
{
"id": "3vCxcPpLsOovwCKEuXLJrN",
"name": "tcsg12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2NDbY7BTBJ9R09LUilTlRf/3728337cd3ba14ed6ab9de0c22e879af/TCSG12.png"
},
{
"id": "4hAJAIXdGAU0uCmTcwLoha",
"name": "rtila-electroclaw",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7rUOk2LhYIUjvYLot7GT8Y/94b72bfbbfdf50c2c807cdbf9f5b276e/Rtila-Electroclaw.png"
},
{
"id": "7gAppJYmlz1A8xXgPt0a5m",
"name": "m870",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2rkU6g4Rlg0e0U4rczWGTV/a51589a54c43f476d8eb984c0ea881e9/M870.png"
},
{
"id": "2Hre4GaBWs92I37LII1O8M",
"name": "416-c-carbine",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2I86r2a2QD8EHTZVZnxcxy/2913450ba952a16c29fac1f5ce58ba1a/416-C_Carbine.png"
},
{
"id": "17DSq6qMxwSmARSjIDwDKE",
"name": "active-defense-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1YCujceutAcJ7F10yhHC41/c5f870e7789b6396c9997ed45ccd3beb/Active-Defense-System.png"
},
{
"id": "2BpqLwwDeSr7QpNZqsLvBt",
"name": "f90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/62tE3th2ThcGHlrcqWkmEX/d69c9de199542e25fa55f6d293f15671/r6-operator-weapon-ar-f90.png"
},
{
"id": "3zbsuyeTh78X5KHV3M7Ctt",
"name": "m249-saw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3p0oG7GsLIoHaRullf7xsF/e2a9e135af63e8897355023cd34538c4/M249_SAW.png"
},
{
"id": "WliOiho6hjQFZ7BiJT7uV",
"name": "super-shorty",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Dq8LDmIxAveRqXM17orUW/cbd96b47cd8ca74a7827b16ef73fe7cf/r6-operator-weapon-sa-supershorty.png"
},
{
"id": "2IZvSVScGT9SAKL7oedtlN",
"name": "trax-stingers",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/QGVvmZeZ91FC2X4mvMzgn/601fa45e635872aea31f15ffebb9c366/Trax-Stingers.png"
},
{
"id": "5nXwwDj4qtPaGvCKrTqdpC",
"name": "ots-03",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4fXznwDtLt61VCF8QIF4N3/34e2e6d6c33d4c504c945bdd13c322f6/OTs-03.png"
},
{
"id": "5JiIaIiidLpM5wZGRmbZxO",
"name": "flip-sight",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/73bNPGhlIuhlWvi497sYqE/b68414436088f62f9da44cd42f702df7/Flip-Sight.png"
},
{
"id": "3seBqopkUJQZwKAodylxXj",
"name": "volcan-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1JqlRdbaVA73jDq8y46vX4/82e89f39c479526ace294ba246d0b085/Volcan-Shield.png"
},
{
"id": "bz7Z7LsOpGGFaLxmNl5nY",
"name": "ak-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7KAZZgnpqD07y47jVVXEuh/e0d7e67101f8f966aa6e1c59e835454f/AK-12.png"
},
{
"id": "4t1fOF2T7bKerBD9VJA5HH",
"name": "6p41",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1wxS2HOCvoPAfnJEDFWjfw/7feddb98582ec37b500243d3f3e19eca/6P41.png"
},
{
"id": "2dRlzAkeuYgN8yAw3538qs",
"name": "ballistic-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2C21gwsjOka5Rwp8qSM5hA/a38937032260bce4f690fb9bb8adf4c0/Ballistic_Shield.png"
},
{
"id": "5mHxExG3OPZkUmuXk4bzD6",
"name": "cluster-charge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3YaoPPUbFYeVSCemdj57EL/a4a4a8c0a935640f7d9a1d1ea82bc48c/Cluster-Charge.png"
},
{
"id": "5wGib1JAMhp1o32ZKqXmm6",
"name": "spear-308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/29LjYuJ4s6yA8k9Uv2u28C/89ec812559e7d74b7c269279f4c46d92/Spear_.308.png"
},
{
"id": "5drcQCH9GYIQ02G2qL1lUJ",
"name": "sasg-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2Q6mL4CbifmIgifV2yV3Hi/2bb2b323f055b03a2c1ba516c262c24e/SASG-12.png"
},
{
"id": "Gef0UGqp5PwYOIHnYuMqM",
"name": "adrenal-surge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/9xGRNPNznBKssvgQAtQNQ/9352fc88f2911ab40789412856b3e20e/Adrenal-Surge.png"
},
{
"id": "3ix2ui28VAIlHII80zcM5w",
"name": "ump45",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6X2EZPq2s8UKrP67uxz5FI/f0df4c57d5890c79311e4eb62d4470e7/UMP45.png"
},
{
"id": "1DQ0Gw0othORiig1DeyG9p",
"name": "m1014",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2pUiVbwNnQnDTesmWXktqW/f27c1fab9a354bb89cbe309a688f5e02/M1014.png"
},
{
"id": "3z0HQKCIxJGY6oyDt03sKb",
"name": "armor-panel",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/29N9nMqB8ZZxGCPz128ccD/439cb1fcb2f6d5385378cf073a5fbc30/Armor-Panel.png"
},
{
"id": "1LHrJG8fIAJhMI6aNtPkAK",
"name": "para-308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6ub8y2Cs5EYhVPfDWuVVkW/82ca131a41ee4ba2e0b75f2dc52ed9e3/PARA-308.png"
},
{
"id": "2u4Ha4SV5Gc18jlkABp5m5",
"name": "m249",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7z8UpVPS3P14OC1oL9dDIn/39c0c657f154218003fd4b2a9250b92f/M249.png"
},
{
"id": "48ucSL0frcAy6enBiqoCT7",
"name": "tactical-crossbow",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5ur3NZUGos3i2HR8f0HIzj/46cf23c97453ebfedeaa42a1088ff32f/Tactical-Crossbow.png"
},
{
"id": "2K9MC1d7AwBgBsELz8LqGt",
"name": "selma-aqua-breacher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2sjKOnwHeOX2xn3iIpja2A/e265f675c905ac25c23ed11fc85589bb/r6s-operator-ability-ace.png"
},
{
"id": "6zo1HGo261dNdW2J7dBKNF",
"name": "g8a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4TIb7oeJesaROOOfTlCBaZ/ffd6a802f9a779a0d39b2122c49b3254/G8A1.png"
},
{
"id": "6RefWdx10DL4hO0egguU6k",
"name": "smg-11",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3WExw7Kepz9uAiWAbWW457/875fc631a3cf9fcc2849d9db2989cbcd/SMG-11.png"
},
{
"id": "2EyyQjA0RqjezxwBQRjs9i",
"name": "garra-hook",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3WejtMAtiITfpjDMuq6j4t/b52e58da6b2625839aa23f940c8e6639/Garra-Hook.png"
},
{
"id": "3VKlSROsHKgSvtDPoTEkqA",
"name": "sc3000k",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7x7eDTm2NNpfGiFMrfQqEX/9898e74c780537be3ca6d88db32ea21e/F2000.png"
},
{
"id": "01BtNAaccSZAwYXHWPvftF",
"name": "mp7",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3a4dgTWGdiJqALhtRp4pKy/f2568d3de3cfe7e4b53179e8653cd2a2/MP7.png"
},
{
"id": "2JJFeZIJQhdNGXTnlihNlC",
"name": "argus-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6h4hyVSzG8IwAmEl1Objrd/6e51e64eeffcc68746b8ff59445fb103/r6s-operator-ability-zero.png"
},
{
"id": "3EA7ghdrMfBGGjUNcE4MBE",
"name": "r4-c",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/dQbqK9VxczuiscwBDSkT8/777a062f6095dde0371eab5200dcb451/R4-C.png"
},
{
"id": "4yuTxBnHbo06UOT2lN9aH7",
"name": "m45-meusoc",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3u5cecgWYl3WuJK50mKEGd/a4a0eb15c710edfc0d29e98c2ee7ea33/M45_MEUSOC.png"
},
{
"id": "52qdmZ4OCXOiyJY1ZKOaCS",
"name": "breaching-rounds",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/0114WqhzsMsnvaKc4FypkN/5ebb9b86e216a2d9e6b2ea01eb3346e8/Breaching-Rounds.png"
},
{
"id": "61aQ9zUboTRqi1enZxK9ly",
"name": "f2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HTvw1cJInVAGxOLXR0war/2f142437f5c0944fdcfcce8a03c37676/F2.png"
},
{
"id": "4tazZObB7cojVKPOSe7ECB",
"name": "shock-drones",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5dZ9kaUfUSF3piuFIUKf2t/7ebfc51caee42a776492b56251d45d92/Shock-Drones.png"
},
{
"id": "76NdyGdH2niECMq7R1mmcc",
"name": "cce-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5mmGgrYdJJHw2moBIEW9An/64e9727d959d7afdbb4fb06e2f75574a/CCE_Shield.png"
},
{
"id": "H9nxKMekxjjC0VY4dPkFl",
"name": "p-10c",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2l4qwB50zSFhFZVYRLNwqg/20df8114f69f96f2adc54779ccc5bbaa/P-10C.png"
},
{
"id": "6uUbnIyQCMCeOMHYOMY6U5",
"name": "cce-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1jck6fnzAMbMQrUMVsnA0M/d04a60eab0132e6bcc202a4f99186cdd/CCE-Shield.png"
},
{
"id": "4NYCY16B7qUBs0HYRIc7vB",
"name": "alda-5.56",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/39yB6TFl9ph6Rb4bDV4lqK/7f9b3abf8dff19bacc026a7212849ca4/ALDA_5.56.png"
},
{
"id": "52fFOJXcNhgjPzKJTXY7pM",
"name": "acs12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/13z63kT1NLzn1U99o7WC4T/8655d3200f24b87246c36f2622603457/ACS12_PB.png"
},
{
"id": "1syKrLJCDUI7WxAhuesUGJ",
"name": "keratos-357",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/15caVAsSCr8Rsb5Hid36uc/59632c4f90727931041ced62a620018b/Keratos_.357.png"
},
{
"id": "6YdURVQbxwxmZRjKY1ZwBP",
"name": "evil-eye",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/n2rfPidCv630jQEfnEWwb/42d454d0771218eb8f27f6d17d8a073e/Evil-Eye.png"
},
{
"id": "3tnLGjoWpUTeSpfzXNZcyr",
"name": "gu-mines",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6PJv86R8CtQCWA7a24sJE2/24f3751b2ed941ce80a4c1ef394ab7d5/Gu-mines.png"
},
{
"id": "1v4iBk8OSQ5FCg3RDPeIAN",
"name": "mk17-cqb",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4LytczDQmu0M63gO2WtCCm/331ef3b1938352ae71d7c0bd23de3596/Mk17_CQB.png"
},
{
"id": "6mq7Ochfrvvq8qX52OOR70",
"name": "sr-25",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3H3sICdj6BK8LhtQPRd2aJ/26826ebba73e0e5fd503256d069f3256/SR-25.png"
},
{
"id": "7xWTsMnS6KCAogW4wJAG8o",
"name": "rifle-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2dZeBTlDDdFQKb4PYb8F5v/162d60178a75cde9f65be362eacc880a/Rifle-Shield.png"
},
{
"id": "2o6tAempnPqYVtMSkBAiN7",
"name": "type-89",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7wLf325q9amF8bnVP1QGr0/2faff1a197f90dcded4472852a317d6b/Type-89.png"
},
{
"id": "5p6Nw5U3jQGLOn3u9VjkbI",
"name": "x-kairos",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1QSzVxpGhswXix3vn8XGKj/c4f64fa0895bdaf164448e3ae49950a0/X-Kairos.png"
},
{
"id": "eFmXKWVc4sXT4dOujQ75d",
"name": "l85a2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5vYQpoyk36foDzDq49jBd0/1479a2d7189e545555ceccecf6bd7cc3/L85A2.png"
},
{
"id": "3Vx4zT0vcf5CkxoQL1xJQi",
"name": "m590a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2zRHmgqENNiZqXQxC9Rsbj/e6542407c642f9b7c5a4546afb6db30a/M590A1.png"
},
{
"id": "5hjFvX6r4GLEqYNQu7p2yi",
"name": "p226-mk-25",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/RTQvPQcywlRwUS1FjIKCX/6fc72fee2191c2e723276bc10ae4114e/P226_Mk_25.png"
},
{
"id": "23YMIzsQbz1cld6LVhH9gL",
"name": "tactical-breaching-hammer",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2Vyo9CrQ1J7IZe43XpT4pV/4bc02e829d1b1745b9a527ff34f8fafb/Tactical-Breaching-Hammer.png"
},
{
"id": "6bqK3TkPT2RsEya3tCYiyS",
"name": "entry-denial-device",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/FLgwGbMiZTrWcK62KxPq8/d4e584420f85fa61c09e5e57e12d9dd9/Entry-Denial-Device.png"
},
{
"id": "7xrnNbilGusIwlgOWGgPci",
"name": "c7e",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/63vTDjkXeKq7rOoSBhoJD4/08603e6603d564e0fa38af9ec86b7c1f/C7E.png"
},
{
"id": "5Hum7CZF4TohcA6nWHd9pO",
"name": "pdw9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4yYCuRnduMq35CTHfq6wwU/b7d49cdbcb05917e014c99efeaadd33b/PDW9.png"
},
{
"id": "1Yd2g2vX5zLxInwgvLdkoS",
"name": "eyenox-model-iii",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2gexf5zLDsa74J7urCoDxk/50da09626395cbe1bf2a58e00a57a514/Eyenox-Model-III.png"
},
{
"id": "213tBMKn095fROa9B1cV6n",
"name": "552-commando",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1LT0N89YaOHvRwn3Pphr8K/02d4a3da9cda132d8201fd134f24fede/552_Commando.png"
},
{
"id": "6s1kidUloe8vrTrRtaj8On",
"name": "electronics-detector",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/23Nk2ie06rb3DcZnStryIY/e06226196dd582c905c33fad87dfdd63/Electronics-Detector.png"
},
{
"id": "633M8tPgKNHXSTWB7czGD7",
"name": "shock-wire",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/129HTNU2A5kIcMj0KZ5UjU/858b60dd0e9b8692e2dc693eded50e14/Shock-Wire.png"
},
{
"id": "3V9mmNyrWdClHpvgajW7Vb",
"name": "remote-gas-grenade",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3ZbADU6FxBqdvcA8vCpYhn/6c69d61202364fa420e2a319d817c6f3/Remote-Gas-Grenade.png"
},
{
"id": "1jYxUQekCIJRQJLsZeRU2u",
"name": "stim-pistol",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7njaeUjJj27iYH27HnH6jn/c5533d2d7191b879c313013f278f5f59/Stim-Pistol.png"
},
{
"id": "3NPuy5qu8ubwkr9kkWUqdz",
"name": "mpx",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HFewpAJ8npDDCKFnEadhL/d398bb477d6b56fe41bfdb5862ed31c0/MPX.png"
},
{
"id": "3y2d4dOdhOGKDc01uC9igS",
"name": "glance-smart-glasses",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/40RkJUEmmBCf7bmfTL8ao1/1d973adfe4d002c94655d9818776fb41/Glance-Smart-Glasses.png"
},
{
"id": "90Ex0AIJDFcQzHkKhyprN",
"name": "scorpion-evo-3-a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6OdwaLWxcnFvhlVwWbP2Du/4f7e94bdb6d34d5c0aa7b7b147b4092e/Scorpion_EVO_3_A1.png"
},
{
"id": "291z9C5QiWXQ5CzvQBSNe0",
"name": "f0-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4TDWnhbgvLkc6HBWDJp2ST/f50cbd83d6d295ab59f17f7e21d713bc/FO-12.png"
},
{
"id": "7DXXdxSwrCVMgBPNrOE8Lv",
"name": "grzmot-mine",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/10Md7ccaUO0pE0nCWimeoZ/35dddc67a4141e844d7904051a0314dc/Grzmot-Mine.png"
},
{
"id": "6KgccwgjLPqJjluHgRiryh",
"name": "556xi",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2dgpAeAWb3SkZV7rxDbVdQ/fa32323256b7c6f8a1977d3f71e7d4b2/556xi.png"
},
{
"id": "1FOQiOCNC7d7ZnLTBN8CuK",
"name": "exothermic-charge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/R5giHT90R2XOMMuUENZeK/840a5a391ed57a0c62208e72258407a7/Exothermic-Charge.png"
},
{
"id": "1JYfCA53I45QLVbP4b66Ir",
"name": "heartbeat-sensor",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7dPXIadD3D2a3uEqrCPvj2/103ad9d0d3b71adee3b92a5db96fe24d/Heartbeat-Sensor.png"
},
{
"id": "43JYQ0Gmjgy8D1QMkyThcg",
"name": "black-eye",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1EPfd4xeuMpt5nItOYm2Eb/b59223248a508d205264ece3c3553d36/Black-Eye.png"
},
{
"id": "47OwyR2cjFodSNwL8dlejO",
"name": "signal-disruptor",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1M5fsUELbaAzImzMte2ESa/9de588693ec317c87ef1a2021bd43b86/Signal-Disruptor.png"
},
{
"id": "3af3vtoUw8qRsUtSyDVJVt",
"name": "ar33",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/16U6xEvX8I5xQd9duveBLN/45d22960872cfa3fb6be9eb47fa0be4e/AR33.png"
},
{
"id": "1WAHBiTy7O9LYcNcPWIZB9",
"name": "emp-grenade",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4p4srpOH4sq55OHryHhn5t/d31728d1432ed28c429ea566caf0e083/EMP-Grenade.png"
},
{
"id": "5q7YklKN5fREPQk7jcSWiR",
"name": "g52-tactical-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7qmWjGZayvK4t6E80Gvu7g/8b789d6d639744dce100c2cfb9709e6a/G52-Tactical_Shield.png"
},
{
"id": "5xUtwLz2ADRyf0cfAjgCOj",
"name": "flash-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7EXDIOjPFMhPKZWY5OcEQC/f2df48ebe5673dca7773d81efd940b66/Flash-Shield.png"
},
{
"id": "2ejX9LEWZ8bnfTRfiYjuAc",
"name": "k1a",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5mUa2p8WXbiyD71qUI8sGk/ed753b6f0ae30ab5737486dfcf32ee9f/K1A.png"
},
{
"id": "5gU6SEAl07LepHSaNypGFy",
"name": "erc-7",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6WbhiNk0evsKWChPneCES6/af08476e2f917878e0326727d2d5fb8a/ERC-7.png"
},
{
"id": "4rdWW7rPhzJUlVnP5dGs8f",
"name": "mx4-storm",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4qRh1frGkQZxNyeKA4D6n1/20f89cd1d9953f06207b7340ea77fb17/Mx4_Storm.png"
},
{
"id": "Rdsy0lubw5hfwmW00FbR9",
"name": "prisma",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7sJYir66zAPq2omSvYeT2u/8fbe3370d32fb5433fb6d3a86d46a1b9/Prisma.png"
}
] | r6sapi/definitions/loadouts.py | loadouts_const = [
{
"id": "1MobOPbsFZoVpLWDUUgmeg",
"name": "dp27",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7LoT7yAe0LK7bDOeq6MZZM/33995bc704667674af1b73fe962d4c7c/Primary_gun_DP27.png"
},
{
"id": "3iisbOg3JC9epuJDdrMcAk",
"name": "9x19vsn",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/42gH96xTTYaTZsfXI3c0wL/a7edbf11af97091ee884b68e59fe6a4f/9x19VSN.png"
},
{
"id": "4EmVfbHbYqwRNnK02lU79C",
"name": "pmm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3y4LIwwm8YNQHAv8oOkWCK/a2375901cee34e68fa39c976d85de8aa/PMM.png"
},
{
"id": "3Ch5Pac0IKVBJe5oYZzIol",
"name": "gsh-18",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5s5Q33j3MNcXf9lwfxfd7m/4eb3a6af1d431481b6ddcec44fbc7602/GSh-18.png"
},
{
"id": "3WoO6qQpm6SkD2ceFlpIVq",
"name": "barbed-wire",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7igaibxuCcSpWDkZensEJS/bfa2cef52f3d860b7a06c2b4d7a6340e/Barbed_wire.png"
},
{
"id": "5QtTa00eoscVRzAfGy44y6",
"name": "proximity-alarm",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2TsFLmb2O6LRZpbxzkZDck/c9146913388a9567500b704c95600621/Proximity_alarm.png"
},
{
"id": "4rJKd9S4S3Edu84n3jaWbq",
"name": "shumikha-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/37wX75QnY7XA6KbjM4aF5n/0ab116d398cf71463e11d43913818ec1/Shumikha-Launcher.png"
},
{
"id": "1LVSwzrXIEAd1O3vntSQMs",
"name": "p10-roni",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7K86OBjL3zmYWt0ZvUcCLj/16a947334e39f27da177d787773593e4/r6-operator-weapon-smg-p10roni.png"
},
{
"id": "6xDz1HSwIn3ZcV9nKIeKUN",
"name": "mk-14-ebr",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6KIMqp5dA95z1RI3PrG9jv/eb939638169811a3fa858a44e6e5d97e/Mk_14_EBR.png"
},
{
"id": "5mI0sCcUxKW3Imv5ZMBBeL",
"name": "prb92",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/dl28J1HsE7mzhj66pmd5D/b8d8fc48d2dde13154047de94abbd8ca/PRB92.png"
},
{
"id": "7eb4vAG3ycZGuIRAoRl58a",
"name": "surya-gate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4hLJAAVKrf50wosG0471od/cde1867daf863c03754969f159ac00de/r6s-operator-ability-aruni.png"
},
{
"id": "2Mh1URS57n4Yuc2vHOojl7",
"name": "super-90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1TLWSu0xHJlAsfEfafeC9X/f9647e70a18962bf1627095c8b46832e/Super_90.png"
},
{
"id": "4pZ8kx4SSqhhLJ1iaSyEAU",
"name": "9mm-c1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60sbThKtOpNOwKu3OP0oGV/672fd9263f7786402a0d855273473a6f/9mm_C1.png"
},
{
"id": "2cDP1BjKw2UkKkDJhYLZAU",
"name": "mk1-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3tWoNeF3jQYs3w4EOydQYs/434409c96693df1fd3e969d778e70795/Mk1_9mm_BI.png"
},
{
"id": "6Urz2FjkmefuoCPGDoVZCm",
"name": "ita12s",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5G4DroaSdqHzJWCe7qqbHZ/5dd2e03f853182c78a1e7fcbc642f0cf/ITA12S.png"
},
{
"id": "7pAPyONkaR3xGR47gvXwSz",
"name": "bulletproof-camera",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/gZuOXvuTu2i8hQX0B6auy/259f379a6283bae618443d722a896f1a/Bulletproof_camera.png"
},
{
"id": "6ZPm8q8dyQXt1my5OHZWic",
"name": "deployable-shield",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/W0WE0X2VQlxwWIAFCJ6Jm/523650a39de5a23dd9520d7299c9e25a/Deployable_Shield.png"
},
{
"id": "2yKP1QdTJfIMQN9d7ZeTmU",
"name": "welcome-mate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/xsIzH7XCAqvn7F3tEfAPe/c41e59a9d7f2ed7ee38b16ed0a882351/Welcome-Mate.png"
},
{
"id": "zs4Rebj67KAk06ASjJPxO",
"name": "spas-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Hp6Fbss6uI59OT4nZNB6e/a4d09954803cb2580353cfa03e8c778b/SPAS-12.png"
},
{
"id": "41pnpfTTAjzKYvpEBmNKdD",
"name": "t-5-smg",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1Ne8bvX8BdCALevWKMllQN/4baa3e79d323de134dd182e0272b9c3b/T-5_SMG.png"
},
{
"id": "1fj1XX5YxggVcr5mU1OPy3",
"name": "bailiff-410",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/N8FLbo4fsNyBe8msKgRhT/8f403dc0b58087bcafab786dd95ba33f/Bailiff_410.png"
},
{
"id": "4HiVAhAUQe5BEXgBNg2ECe",
"name": "usp40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7FxemzWRtlpAhK9MyKp1Gp/817cc25b6b7c3575dc1ba53a6a8170a9/USP40.png"
},
{
"id": "6kF8p8NlbGPvjRz42YxYYE",
"name": "remah-dash",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3dM2B3qCdU0woydIbiy2xn/55aa99443002ad794d3f78dada26d035/r6s-operator-ability-oryx.png"
},
{
"id": "eR3JkIxE5GyWvNpybHCRr",
"name": "mp5",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60YbOvSBQt6ZUlu8YDXoZm/51ef3857b2986de700262432e8433714/MP5.png"
},
{
"id": "5QeGmJGqn3gZxACxzF4kbR",
"name": "rg15",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2LNSsp7B7wUnnPUweir7Jm/9f66d53be7a63a17a55253a0bea6eec1/RG15.png"
},
{
"id": "7zuAWr4kVRJFYVj33Ltfex",
"name": "impact-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7iJK9B1Vr3PDO3rGftU00l/c3d8edc5564a80580e4ac2f9a4fc3937/Impact_Grenade.png"
},
{
"id": "4Lnu4kaDPzUIxgCStqfrbR",
"name": "nitro-cell",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4pBSTw9U6l9GRnT12G6Xln/e0991bc03b48d217f510af8b611c8828/Nitro_Cell.png"
},
{
"id": "5CkPFHPPJB3909Fff9BYBs",
"name": "banshee",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/49ixqWhGgjvHu0Ay8JzeSH/c6a3fe584847850186e15c7fb4244385/r6s-operator-ability-melusi.png"
},
{
"id": "wfzQPegCiVkDRgsx6MOjZ",
"name": "c8-sfw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1itXpz2GnvdwwRyhX1SYa2/b58ff71048fa3bb5ed09d5d935dc90f4/C8-SFW.png"
},
{
"id": "5b6dGdkffoVKAyDycG5hjg",
"name": "camrs",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4dBzqVVmnpv1DZi91LAnEN/e374b4ea289fc992280b943cdbb94d60/CAMRS.png"
},
{
"id": "myf6Hy39exE9Cot5zDEis",
"name": "claymore",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4T4H5EJgUxorucGVtU2pkm/74fef324b89c220ce6426e8097f915b9/Claymore.png"
},
{
"id": "2NNtCVZhqQykqVEtze4fxJ",
"name": "stun-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3XnK8s1iQJQu5cfr6UyQfK/429480b96d6d6d6b830c32c75d2608f5/Stun_Grenade.png"
},
{
"id": "XNjuIs9nL1RQNnWOMNfC9",
"name": "skeleton-key",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2w8EQtN4FFtEMa9lBYyWGg/36bbc6d819761c11418c868d2e483991/Skeleton-Key.png"
},
{
"id": "65tbXPRuQxAV8RUaMoCYJh",
"name": "lmg-e",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7JVJIew6t3iKwgByvrFXyi/7ba44dfda28b525506633e453104a604/LMG-E.png"
},
{
"id": "2dvLoMLwWSRwyK70gARboS",
"name": "m762",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4oWAgi7tgQP1Tq0HooRtye/9109a74921ee17610d4bd85a61582823/M762.png"
},
{
"id": "75HMflo54bNBGSUyX2je5s",
"name": "breach-charge",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1OgTMhyF1FBsSAo4njk26m/9881001e9db03a4806b2eea6007e4a1a/Breach_Charge.png"
},
{
"id": "5tTXUrm4TLdSBHtsJ1p9d8",
"name": "ks79-lifeline",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1elqIEWJ6XsXKAbMNd0Cai/0b4c0591bad284d957e652cdae0b706b/KS79-Lifeline.png"
},
{
"id": "HCADlLBkfNlDRRvlq3iPo",
"name": "mp5sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HaMldwFltBwiiyDDfkPpD/6de3aa9aaa17458e7f6186ba59b8deff/MP5SD.png"
},
{
"id": "67fxpqXp4gjOQixPHPaQMB",
"name": "supernova",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2tpjCRFLcc3hogjJGbKDsi/5ad0ab63b7245022aca5c1c1fb42d473/SuperNova.png"
},
{
"id": "4mbLbnjsLEQ27BEXQ1vqGs",
"name": "p229",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/76ja0RxqzHW9PpvWgpG7Sk/cb753b50b20fe67deaef54d8b2a46b54/P229.png"
},
{
"id": "7sblClEPf57IKm77UCqFSj",
"name": "bearing-9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4mdftEOh5Vu9KhhpgKLKrT/abedcc75868774018295ec2a08a7b3de/Bearing_9.png"
},
{
"id": "5FUiujmYYXsvq1zQ0lZlVx",
"name": "yokai",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/TdDZyrKpjt9EQo8tHpIJk/d987db4da22046a0663be8be82dcda88/Yokai.png"
},
{
"id": "5cZ1wkLzuOHnnYjma40PwQ",
"name": "bosg-12-2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2ZjVndetsX8WEn5ZfyUQa0/e3a781be7eab22876d25f748e8fd0f5a/BOSG.12.2.png"
},
{
"id": "2zEjl6sxdsxgVBmAsDZxcq",
"name": "c75-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3wUuefwPjU705mZkTdJ9UH/8ccb11884cfa34c176ac5500af139177/C75_Auto.png"
},
{
"id": "4W61sh5pt9Ghkw4g7Muvee",
"name": "smg-12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/EwJgB7KdgOb6dDm7ro33u/b73f0890f992c1a365210f08efcc6db5/SMG-12.png"
},
{
"id": "7aslgBcBTFi4XKqlAkvvrc",
"name": "smoke-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3LaxoSLC49T5vgKnUAlTLT/c47c4636845a04478432c48be8c29aee/Smoke_Grenade.png"
},
{
"id": "5o5qaxqMxosu04407U4sBL",
"name": "logic-bomb",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5ej2g1iCMHdfjn8h8qgfmU/bf07fef4b063a46389ca650ed02b292a/Logic-Bomb.png"
},
{
"id": "kzR6vfRLXm9f1EvoK9dBP",
"name": "m12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4FxqA5pa8JY9QQ7FEcjwPw/ffc779fcde5b970e7b95db6653637dab/M12.png"
},
{
"id": "5gcX8x7LiBHg2LA1JIdEHp",
"name": "spas-15",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/CyofBgipHq4RTafvPFWd4/bc3d0ecc871b70e57735855f852efacf/SPAS-15.png"
},
{
"id": "1Y7hJmAXWWqh1MIkXqUbKw",
"name": "luison",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5cSDFUWb8P1BAQUgnTozeM/fd3a3348f42c95d6afa9f105ae23f2e5/Luison.png"
},
{
"id": "1ojdoiQ8AbqFX3FB7Neqmk",
"name": "silent-step",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6PTsBBBGTT5oixxzvYv1Y4/18e31c74ba1ca73ed2694134acd9c078/Silent-Step.png"
},
{
"id": "6Il345pPRhv4Xx4qzTFpmA",
"name": "aug-a2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1eO39zRe8XxJXH1KZiIWhM/02049ced0fbfa630833e8b0d3c03de07/AUG_A2.png"
},
{
"id": "3Xq4lwAY8Sc1Z687gD9mnD",
"name": "mp5k",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1pk8nOI7ybQjYOSI4fuzOm/fcd78df0f729be545e75c09aae85c360/MP5K.png"
},
{
"id": "4PHq1TcVzAqQp11Ve7CFFC",
"name": "d-40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4niSMDCeiryoMBXJZq60Vv/48339331d05e289868cf4050c49b1b2b/D-40.png"
},
{
"id": "5Y36nPWZ6lXp37GDupoLRV",
"name": "p12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2mpM7rah7rwEW0bViIirUC/ed9caa4db58421519fa4db390b1aa164/P12.png"
},
{
"id": "6L5PL3qOQjjpNUdA9l0WLD",
"name": "mag-net-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1IKNZzLv63AJd9vlbXj3Bo/883371432ffb22e5bf35bc82dd706384/Mag-net_System.png"
},
{
"id": "3ePDML7HMucggZaNG2nL0a",
"name": "t-95-lsw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/23HCxaNTRUHBlFAvCTMZQm/fe319cc164fac034a29e9b114ae7d5cb/T-95_LSW.png"
},
{
"id": "6pPXSrzgAKEyTiiyrs1Qbn",
"name": "six12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2v6MwsHwjOZ5Muid53lyfN/e5f1c4997db93abfe3ac356fce23376c/SIX12.png"
},
{
"id": "3ECycrhAlLH7str0T4F2hp",
"name": "q-929",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2fRVszR5yGDHbV0AL8muso/0838dac90b66aa810daa49d36382fb64/Q-929.png"
},
{
"id": "31sOhkze6zBhWkkM8HR44n",
"name": "secondary-breacher",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3OvnVPWY1UyrDE913kU0a1/eae4b2a1584234ea2ff4ad6481239f3b/SecondaryBreacher.png"
},
{
"id": "168akpqtP52LsTYlMIqeHX",
"name": "candela",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4vpN9vu5wD9dyb2knMosTy/430796de3c0c2a5c2eb2ac6f4217eba0/Candela.png"
},
{
"id": "5hAVF2eVv7NyeJPAJL07sg",
"name": "ak-74m",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1j5HiQP8aFphTe65fqDdg0/23eecb5c603c5ba9f59fc6cbc5e4a531/AK-74M.png"
},
{
"id": "Q4Q9LtkAztMdeUT53C39j",
"name": "arx200",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6VgkPBsr1WApI3rWc9kcM0/b18b8e25f3e951e8e722213f2ee59eb0/ARX200.png"
},
{
"id": "2NcOcqzfy4HnaHNUITnUYN",
"name": "-44-mag-semi-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6W3Jz0YcQzbZ6BOPr7VVel/4c67f342964132a652f7d5821e887050/.44_Mag_Semi-Auto.png"
},
{
"id": "6dV0styTnHMeqh4effTNF8",
"name": "airjab-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6d0LN1QWzviEkcYu3mTn6v/e49511a479756f71224f14225ad9cbd8/Airjab-Launcher.png"
},
{
"id": "4jhzD37iXaCsWyBAo1PQ5J",
"name": "p90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4nGrNspOvII2oS3lEMkg5x/2398a493c298bc654f97c58767aa40f3/P90.png"
},
{
"id": "43jUNG843Bn0knjA3tXwXo",
"name": "sg-cqb",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5JoL3b36Fsztt9Q2XYmrbJ/dacec96948d3f8fe92914a69b9aac593/SG-CQB.png"
},
{
"id": "ElQvUTqCd5JbW2PIJ0lTS",
"name": "p9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6Fd1cl17KA0CtgodEiiY6v/d0f145ea72f2aacbd04260ba7d8f1c74/P9.png"
},
{
"id": "55BZj1JeqRvuczMpa04gRU",
"name": "lfp586",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1zc7UtdBfCZakwbiYqBvSz/1fd3f1584de38ca7c9315d498f094276/LFP586.png"
},
{
"id": "6XCPWiyRqIM6rfCYnSRFKg",
"name": "armor-pack",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/MeoKw7iPY6EFYvjS07CRg/b2d7eba623f3c63d6b7097a8f2253954/Armor-Pack.png"
},
{
"id": "3ATrltpsW24BFhZMHNmhfI",
"name": "fmg-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/0oneJNsBR06QjuowxwtHG/bd3b391c6eec2bd615f2ed83197a13ac/FMG-9.png"
},
{
"id": "4ggSgqX4ixVHJZwhnenHC1",
"name": "six12-sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1GTua079Xbtkpjhx96sRsW/079ed1a71a0d12b5e48e1b0d40b87110/SIX12_SD.png"
},
{
"id": "01zcYOKDgxP24MPkEaswD7",
"name": "5.7-usg",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/tkYcSAJSe5yGkeUhzZqBO/e81feb86df4a7eb6951052bec26b6ed7/5.7_USG.png"
},
{
"id": "7GKyGyCXQ9vVZ0kCdSPJl4",
"name": "d-50",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6mMQRDsrComRFa7bC6cNkG/8cd17e545e3d28dcc11a040d000cfa16/D-50.png"
},
{
"id": "1p5ZdYWvISi4qDV0S2fDP4",
"name": "frag-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4GZsPHbm9H0k5EWz7TMkwO/33b9007bc6ee03dab15cfa15eb69e096/Frag_Grenade.png"
},
{
"id": "6GQ8on95B9PMLjMDrZjXgD",
"name": "hel-presence-reduction",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/57miqbOn8xWBh7ne7za3CV/35364108d49380a0ed33998f970e104f/HEL-Presence-Reduction.png"
},
{
"id": "56o4y5mOsXlFhnzWlq9xMJ",
"name": "commando-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4P9dpUph5w3MSsLNnW6be/04baba24990fcb75a9c0bcfd01b7d190/Commando_9.png"
},
{
"id": "J5YsiIB8uvpeZgrWXrhlA",
"name": "sdp-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/Tgsdyz3XEqmgUYi9aZZgb/6755f4da7af7a7179ffab92acf8d477e/SDP_9mm.png"
},
{
"id": "6m6vEqsps3Mhn6cOHo9yKS",
"name": "pest-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5L0fFKVOwozKMcmJoenfef/56e4efdf77363556b35a76fd4e0e60f6/Pest-Launcher.png"
},
{
"id": "5AVE3Ok87dbmTwuI5K5fZg",
"name": "le-rock-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "7J9icaPnaxguoiBWfdqomb",
"name": "le-rock-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "64NDkY7SFav037M3uh6KRD",
"name": "vector-45-acp",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7D1cDf13FqUhoLihzvuPln/068aa7e507155598449c58c0a49a90d6/Vector_.45_ACP.png"
},
{
"id": "1M88HlyLX6jD774vkptDLV",
"name": "ita12l",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4Y6ziRzm9RiPii83fm8BV1/1f472744d2c2dec8d9206f4d8733d92c/ITA12L.png"
},
{
"id": "6X2RibCre3jpetmCoFZaUu",
"name": "black-mirror",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1a1w8epOhWE8VtzvvCJG9d/b20cbb221f7d45e5838f839ce042f409/Black-mirror.png"
},
{
"id": "2xDl4cDXX48FuUiMPApZHo",
"name": "ar-15-50",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4lGGEGZLkbldz114Wl5hCo/78a04c46654f80fae03e730bd79f3563/AR-15.50.png"
},
{
"id": "4zn5v7GdQhRyojYT6qAwwM",
"name": "m4",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3jhi90ycmuc8mAiuSXFoCi/bcf354459e7becd6ede52ee97917c832/M4.png"
},
{
"id": "4GfGPq4g6TDwHvQEJII9ee",
"name": "1911-tacops",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/189UukZ6fVnvQR6LJtLYry/6eec29603d5b7b0ca8cab6ac0ef083ac/1911_TACOPS.png"
},
{
"id": "3ECK2BieW8MOShqE0XJVwd",
"name": "breaching-torch",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4rPBvxDKsKiQCMjt7GxJMw/09e45c68bbc41c1721acbbe0257e2465/Breaching-Torch.png"
},
{
"id": "4F64StqLivWX4lHm7iNgqG",
"name": "v308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5YBZe76NUDO32eF66wW90g/488c315743d59230962a4d67618223d6/V308.png"
},
{
"id": "7MC9QIlZkFL8AAqkAfGIbV",
"name": "417",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5djkS4YtAtOF0vBmg0T60x/ea2b1ff7e5367e66c99bc7ad7e95bfe3/417.png"
},
{
"id": "zRjInzDWpoahREdiE2RDM",
"name": "ee-one-d",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7fRknnWl2K2qjKle1t79j/0506d25798aeb0691c8a576665050f7d/EE-ONE-D.png"
},
{
"id": "7tItrCBHMWLbtvlBxYDWfS",
"name": "g36c",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2SZoqSXKoNPvZFIJsFsDE5/cb109885bf19c8697abf832f10cfd9a6/G36C.png"
},
{
"id": "4FYdLQfxYBnKCfY2cZ9flD",
"name": "gemini-replicator",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/K8E4EHWbD8wTjVqro6wVl/62339b2fbe1d3a2319dcd320f7a0b070/r6s-operator-ability-iana.png"
},
{
"id": "E0pKBweJkY0ok4BvfDSxv",
"name": "csrx-300",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7tUB9ZNXJhdN6ejAkCEeFQ/99691bcc19f641cf872925905d08a539/CSRX_300.png"
},
{
"id": "5EraRZbq9P8VR8Sd0Sarh9",
"name": "spsmg9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5EtwSgylXckBNg4n6gDR9J/bc6fc6c5c12ae11da59aee95828ebd76/SPSMG9.png"
},
{
"id": "2Eik88OMmWOse0qBVegpjG",
"name": "lv-explosive-lance",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/75eebt48ELO4eGGdIMVMpY/9533c7dc8f36651f5b5ad50c8ccb6c5a/LV_Explosive_Lance.png"
},
{
"id": "3rjbxjDZx9mwvN5xHkZDWp",
"name": "aug-a3",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3W9XJdMOgpHSw55HfwRSAv/cf8f220678d503e6c3e535c00b2e636a/AUG_A3.png"
},
{
"id": "3vCxcPpLsOovwCKEuXLJrN",
"name": "tcsg12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2NDbY7BTBJ9R09LUilTlRf/3728337cd3ba14ed6ab9de0c22e879af/TCSG12.png"
},
{
"id": "4hAJAIXdGAU0uCmTcwLoha",
"name": "rtila-electroclaw",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7rUOk2LhYIUjvYLot7GT8Y/94b72bfbbfdf50c2c807cdbf9f5b276e/Rtila-Electroclaw.png"
},
{
"id": "7gAppJYmlz1A8xXgPt0a5m",
"name": "m870",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2rkU6g4Rlg0e0U4rczWGTV/a51589a54c43f476d8eb984c0ea881e9/M870.png"
},
{
"id": "2Hre4GaBWs92I37LII1O8M",
"name": "416-c-carbine",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2I86r2a2QD8EHTZVZnxcxy/2913450ba952a16c29fac1f5ce58ba1a/416-C_Carbine.png"
},
{
"id": "17DSq6qMxwSmARSjIDwDKE",
"name": "active-defense-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1YCujceutAcJ7F10yhHC41/c5f870e7789b6396c9997ed45ccd3beb/Active-Defense-System.png"
},
{
"id": "2BpqLwwDeSr7QpNZqsLvBt",
"name": "f90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/62tE3th2ThcGHlrcqWkmEX/d69c9de199542e25fa55f6d293f15671/r6-operator-weapon-ar-f90.png"
},
{
"id": "3zbsuyeTh78X5KHV3M7Ctt",
"name": "m249-saw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3p0oG7GsLIoHaRullf7xsF/e2a9e135af63e8897355023cd34538c4/M249_SAW.png"
},
{
"id": "WliOiho6hjQFZ7BiJT7uV",
"name": "super-shorty",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Dq8LDmIxAveRqXM17orUW/cbd96b47cd8ca74a7827b16ef73fe7cf/r6-operator-weapon-sa-supershorty.png"
},
{
"id": "2IZvSVScGT9SAKL7oedtlN",
"name": "trax-stingers",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/QGVvmZeZ91FC2X4mvMzgn/601fa45e635872aea31f15ffebb9c366/Trax-Stingers.png"
},
{
"id": "5nXwwDj4qtPaGvCKrTqdpC",
"name": "ots-03",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4fXznwDtLt61VCF8QIF4N3/34e2e6d6c33d4c504c945bdd13c322f6/OTs-03.png"
},
{
"id": "5JiIaIiidLpM5wZGRmbZxO",
"name": "flip-sight",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/73bNPGhlIuhlWvi497sYqE/b68414436088f62f9da44cd42f702df7/Flip-Sight.png"
},
{
"id": "3seBqopkUJQZwKAodylxXj",
"name": "volcan-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1JqlRdbaVA73jDq8y46vX4/82e89f39c479526ace294ba246d0b085/Volcan-Shield.png"
},
{
"id": "bz7Z7LsOpGGFaLxmNl5nY",
"name": "ak-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7KAZZgnpqD07y47jVVXEuh/e0d7e67101f8f966aa6e1c59e835454f/AK-12.png"
},
{
"id": "4t1fOF2T7bKerBD9VJA5HH",
"name": "6p41",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1wxS2HOCvoPAfnJEDFWjfw/7feddb98582ec37b500243d3f3e19eca/6P41.png"
},
{
"id": "2dRlzAkeuYgN8yAw3538qs",
"name": "ballistic-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2C21gwsjOka5Rwp8qSM5hA/a38937032260bce4f690fb9bb8adf4c0/Ballistic_Shield.png"
},
{
"id": "5mHxExG3OPZkUmuXk4bzD6",
"name": "cluster-charge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3YaoPPUbFYeVSCemdj57EL/a4a4a8c0a935640f7d9a1d1ea82bc48c/Cluster-Charge.png"
},
{
"id": "5wGib1JAMhp1o32ZKqXmm6",
"name": "spear-308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/29LjYuJ4s6yA8k9Uv2u28C/89ec812559e7d74b7c269279f4c46d92/Spear_.308.png"
},
{
"id": "5drcQCH9GYIQ02G2qL1lUJ",
"name": "sasg-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2Q6mL4CbifmIgifV2yV3Hi/2bb2b323f055b03a2c1ba516c262c24e/SASG-12.png"
},
{
"id": "Gef0UGqp5PwYOIHnYuMqM",
"name": "adrenal-surge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/9xGRNPNznBKssvgQAtQNQ/9352fc88f2911ab40789412856b3e20e/Adrenal-Surge.png"
},
{
"id": "3ix2ui28VAIlHII80zcM5w",
"name": "ump45",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6X2EZPq2s8UKrP67uxz5FI/f0df4c57d5890c79311e4eb62d4470e7/UMP45.png"
},
{
"id": "1DQ0Gw0othORiig1DeyG9p",
"name": "m1014",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2pUiVbwNnQnDTesmWXktqW/f27c1fab9a354bb89cbe309a688f5e02/M1014.png"
},
{
"id": "3z0HQKCIxJGY6oyDt03sKb",
"name": "armor-panel",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/29N9nMqB8ZZxGCPz128ccD/439cb1fcb2f6d5385378cf073a5fbc30/Armor-Panel.png"
},
{
"id": "1LHrJG8fIAJhMI6aNtPkAK",
"name": "para-308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6ub8y2Cs5EYhVPfDWuVVkW/82ca131a41ee4ba2e0b75f2dc52ed9e3/PARA-308.png"
},
{
"id": "2u4Ha4SV5Gc18jlkABp5m5",
"name": "m249",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7z8UpVPS3P14OC1oL9dDIn/39c0c657f154218003fd4b2a9250b92f/M249.png"
},
{
"id": "48ucSL0frcAy6enBiqoCT7",
"name": "tactical-crossbow",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5ur3NZUGos3i2HR8f0HIzj/46cf23c97453ebfedeaa42a1088ff32f/Tactical-Crossbow.png"
},
{
"id": "2K9MC1d7AwBgBsELz8LqGt",
"name": "selma-aqua-breacher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2sjKOnwHeOX2xn3iIpja2A/e265f675c905ac25c23ed11fc85589bb/r6s-operator-ability-ace.png"
},
{
"id": "6zo1HGo261dNdW2J7dBKNF",
"name": "g8a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4TIb7oeJesaROOOfTlCBaZ/ffd6a802f9a779a0d39b2122c49b3254/G8A1.png"
},
{
"id": "6RefWdx10DL4hO0egguU6k",
"name": "smg-11",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3WExw7Kepz9uAiWAbWW457/875fc631a3cf9fcc2849d9db2989cbcd/SMG-11.png"
},
{
"id": "2EyyQjA0RqjezxwBQRjs9i",
"name": "garra-hook",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3WejtMAtiITfpjDMuq6j4t/b52e58da6b2625839aa23f940c8e6639/Garra-Hook.png"
},
{
"id": "3VKlSROsHKgSvtDPoTEkqA",
"name": "sc3000k",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7x7eDTm2NNpfGiFMrfQqEX/9898e74c780537be3ca6d88db32ea21e/F2000.png"
},
{
"id": "01BtNAaccSZAwYXHWPvftF",
"name": "mp7",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3a4dgTWGdiJqALhtRp4pKy/f2568d3de3cfe7e4b53179e8653cd2a2/MP7.png"
},
{
"id": "2JJFeZIJQhdNGXTnlihNlC",
"name": "argus-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6h4hyVSzG8IwAmEl1Objrd/6e51e64eeffcc68746b8ff59445fb103/r6s-operator-ability-zero.png"
},
{
"id": "3EA7ghdrMfBGGjUNcE4MBE",
"name": "r4-c",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/dQbqK9VxczuiscwBDSkT8/777a062f6095dde0371eab5200dcb451/R4-C.png"
},
{
"id": "4yuTxBnHbo06UOT2lN9aH7",
"name": "m45-meusoc",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3u5cecgWYl3WuJK50mKEGd/a4a0eb15c710edfc0d29e98c2ee7ea33/M45_MEUSOC.png"
},
{
"id": "52qdmZ4OCXOiyJY1ZKOaCS",
"name": "breaching-rounds",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/0114WqhzsMsnvaKc4FypkN/5ebb9b86e216a2d9e6b2ea01eb3346e8/Breaching-Rounds.png"
},
{
"id": "61aQ9zUboTRqi1enZxK9ly",
"name": "f2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HTvw1cJInVAGxOLXR0war/2f142437f5c0944fdcfcce8a03c37676/F2.png"
},
{
"id": "4tazZObB7cojVKPOSe7ECB",
"name": "shock-drones",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5dZ9kaUfUSF3piuFIUKf2t/7ebfc51caee42a776492b56251d45d92/Shock-Drones.png"
},
{
"id": "76NdyGdH2niECMq7R1mmcc",
"name": "cce-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5mmGgrYdJJHw2moBIEW9An/64e9727d959d7afdbb4fb06e2f75574a/CCE_Shield.png"
},
{
"id": "H9nxKMekxjjC0VY4dPkFl",
"name": "p-10c",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2l4qwB50zSFhFZVYRLNwqg/20df8114f69f96f2adc54779ccc5bbaa/P-10C.png"
},
{
"id": "6uUbnIyQCMCeOMHYOMY6U5",
"name": "cce-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1jck6fnzAMbMQrUMVsnA0M/d04a60eab0132e6bcc202a4f99186cdd/CCE-Shield.png"
},
{
"id": "4NYCY16B7qUBs0HYRIc7vB",
"name": "alda-5.56",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/39yB6TFl9ph6Rb4bDV4lqK/7f9b3abf8dff19bacc026a7212849ca4/ALDA_5.56.png"
},
{
"id": "52fFOJXcNhgjPzKJTXY7pM",
"name": "acs12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/13z63kT1NLzn1U99o7WC4T/8655d3200f24b87246c36f2622603457/ACS12_PB.png"
},
{
"id": "1syKrLJCDUI7WxAhuesUGJ",
"name": "keratos-357",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/15caVAsSCr8Rsb5Hid36uc/59632c4f90727931041ced62a620018b/Keratos_.357.png"
},
{
"id": "6YdURVQbxwxmZRjKY1ZwBP",
"name": "evil-eye",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/n2rfPidCv630jQEfnEWwb/42d454d0771218eb8f27f6d17d8a073e/Evil-Eye.png"
},
{
"id": "3tnLGjoWpUTeSpfzXNZcyr",
"name": "gu-mines",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6PJv86R8CtQCWA7a24sJE2/24f3751b2ed941ce80a4c1ef394ab7d5/Gu-mines.png"
},
{
"id": "1v4iBk8OSQ5FCg3RDPeIAN",
"name": "mk17-cqb",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4LytczDQmu0M63gO2WtCCm/331ef3b1938352ae71d7c0bd23de3596/Mk17_CQB.png"
},
{
"id": "6mq7Ochfrvvq8qX52OOR70",
"name": "sr-25",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3H3sICdj6BK8LhtQPRd2aJ/26826ebba73e0e5fd503256d069f3256/SR-25.png"
},
{
"id": "7xWTsMnS6KCAogW4wJAG8o",
"name": "rifle-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2dZeBTlDDdFQKb4PYb8F5v/162d60178a75cde9f65be362eacc880a/Rifle-Shield.png"
},
{
"id": "2o6tAempnPqYVtMSkBAiN7",
"name": "type-89",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7wLf325q9amF8bnVP1QGr0/2faff1a197f90dcded4472852a317d6b/Type-89.png"
},
{
"id": "5p6Nw5U3jQGLOn3u9VjkbI",
"name": "x-kairos",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1QSzVxpGhswXix3vn8XGKj/c4f64fa0895bdaf164448e3ae49950a0/X-Kairos.png"
},
{
"id": "eFmXKWVc4sXT4dOujQ75d",
"name": "l85a2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5vYQpoyk36foDzDq49jBd0/1479a2d7189e545555ceccecf6bd7cc3/L85A2.png"
},
{
"id": "3Vx4zT0vcf5CkxoQL1xJQi",
"name": "m590a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2zRHmgqENNiZqXQxC9Rsbj/e6542407c642f9b7c5a4546afb6db30a/M590A1.png"
},
{
"id": "5hjFvX6r4GLEqYNQu7p2yi",
"name": "p226-mk-25",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/RTQvPQcywlRwUS1FjIKCX/6fc72fee2191c2e723276bc10ae4114e/P226_Mk_25.png"
},
{
"id": "23YMIzsQbz1cld6LVhH9gL",
"name": "tactical-breaching-hammer",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2Vyo9CrQ1J7IZe43XpT4pV/4bc02e829d1b1745b9a527ff34f8fafb/Tactical-Breaching-Hammer.png"
},
{
"id": "6bqK3TkPT2RsEya3tCYiyS",
"name": "entry-denial-device",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/FLgwGbMiZTrWcK62KxPq8/d4e584420f85fa61c09e5e57e12d9dd9/Entry-Denial-Device.png"
},
{
"id": "7xrnNbilGusIwlgOWGgPci",
"name": "c7e",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/63vTDjkXeKq7rOoSBhoJD4/08603e6603d564e0fa38af9ec86b7c1f/C7E.png"
},
{
"id": "5Hum7CZF4TohcA6nWHd9pO",
"name": "pdw9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4yYCuRnduMq35CTHfq6wwU/b7d49cdbcb05917e014c99efeaadd33b/PDW9.png"
},
{
"id": "1Yd2g2vX5zLxInwgvLdkoS",
"name": "eyenox-model-iii",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2gexf5zLDsa74J7urCoDxk/50da09626395cbe1bf2a58e00a57a514/Eyenox-Model-III.png"
},
{
"id": "213tBMKn095fROa9B1cV6n",
"name": "552-commando",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1LT0N89YaOHvRwn3Pphr8K/02d4a3da9cda132d8201fd134f24fede/552_Commando.png"
},
{
"id": "6s1kidUloe8vrTrRtaj8On",
"name": "electronics-detector",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/23Nk2ie06rb3DcZnStryIY/e06226196dd582c905c33fad87dfdd63/Electronics-Detector.png"
},
{
"id": "633M8tPgKNHXSTWB7czGD7",
"name": "shock-wire",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/129HTNU2A5kIcMj0KZ5UjU/858b60dd0e9b8692e2dc693eded50e14/Shock-Wire.png"
},
{
"id": "3V9mmNyrWdClHpvgajW7Vb",
"name": "remote-gas-grenade",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3ZbADU6FxBqdvcA8vCpYhn/6c69d61202364fa420e2a319d817c6f3/Remote-Gas-Grenade.png"
},
{
"id": "1jYxUQekCIJRQJLsZeRU2u",
"name": "stim-pistol",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7njaeUjJj27iYH27HnH6jn/c5533d2d7191b879c313013f278f5f59/Stim-Pistol.png"
},
{
"id": "3NPuy5qu8ubwkr9kkWUqdz",
"name": "mpx",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HFewpAJ8npDDCKFnEadhL/d398bb477d6b56fe41bfdb5862ed31c0/MPX.png"
},
{
"id": "3y2d4dOdhOGKDc01uC9igS",
"name": "glance-smart-glasses",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/40RkJUEmmBCf7bmfTL8ao1/1d973adfe4d002c94655d9818776fb41/Glance-Smart-Glasses.png"
},
{
"id": "90Ex0AIJDFcQzHkKhyprN",
"name": "scorpion-evo-3-a1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6OdwaLWxcnFvhlVwWbP2Du/4f7e94bdb6d34d5c0aa7b7b147b4092e/Scorpion_EVO_3_A1.png"
},
{
"id": "291z9C5QiWXQ5CzvQBSNe0",
"name": "f0-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4TDWnhbgvLkc6HBWDJp2ST/f50cbd83d6d295ab59f17f7e21d713bc/FO-12.png"
},
{
"id": "7DXXdxSwrCVMgBPNrOE8Lv",
"name": "grzmot-mine",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/10Md7ccaUO0pE0nCWimeoZ/35dddc67a4141e844d7904051a0314dc/Grzmot-Mine.png"
},
{
"id": "6KgccwgjLPqJjluHgRiryh",
"name": "556xi",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2dgpAeAWb3SkZV7rxDbVdQ/fa32323256b7c6f8a1977d3f71e7d4b2/556xi.png"
},
{
"id": "1FOQiOCNC7d7ZnLTBN8CuK",
"name": "exothermic-charge",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/R5giHT90R2XOMMuUENZeK/840a5a391ed57a0c62208e72258407a7/Exothermic-Charge.png"
},
{
"id": "1JYfCA53I45QLVbP4b66Ir",
"name": "heartbeat-sensor",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7dPXIadD3D2a3uEqrCPvj2/103ad9d0d3b71adee3b92a5db96fe24d/Heartbeat-Sensor.png"
},
{
"id": "43JYQ0Gmjgy8D1QMkyThcg",
"name": "black-eye",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1EPfd4xeuMpt5nItOYm2Eb/b59223248a508d205264ece3c3553d36/Black-Eye.png"
},
{
"id": "47OwyR2cjFodSNwL8dlejO",
"name": "signal-disruptor",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1M5fsUELbaAzImzMte2ESa/9de588693ec317c87ef1a2021bd43b86/Signal-Disruptor.png"
},
{
"id": "3af3vtoUw8qRsUtSyDVJVt",
"name": "ar33",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/16U6xEvX8I5xQd9duveBLN/45d22960872cfa3fb6be9eb47fa0be4e/AR33.png"
},
{
"id": "1WAHBiTy7O9LYcNcPWIZB9",
"name": "emp-grenade",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4p4srpOH4sq55OHryHhn5t/d31728d1432ed28c429ea566caf0e083/EMP-Grenade.png"
},
{
"id": "5q7YklKN5fREPQk7jcSWiR",
"name": "g52-tactical-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7qmWjGZayvK4t6E80Gvu7g/8b789d6d639744dce100c2cfb9709e6a/G52-Tactical_Shield.png"
},
{
"id": "5xUtwLz2ADRyf0cfAjgCOj",
"name": "flash-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7EXDIOjPFMhPKZWY5OcEQC/f2df48ebe5673dca7773d81efd940b66/Flash-Shield.png"
},
{
"id": "2ejX9LEWZ8bnfTRfiYjuAc",
"name": "k1a",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5mUa2p8WXbiyD71qUI8sGk/ed753b6f0ae30ab5737486dfcf32ee9f/K1A.png"
},
{
"id": "5gU6SEAl07LepHSaNypGFy",
"name": "erc-7",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6WbhiNk0evsKWChPneCES6/af08476e2f917878e0326727d2d5fb8a/ERC-7.png"
},
{
"id": "4rdWW7rPhzJUlVnP5dGs8f",
"name": "mx4-storm",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4qRh1frGkQZxNyeKA4D6n1/20f89cd1d9953f06207b7340ea77fb17/Mx4_Storm.png"
},
{
"id": "Rdsy0lubw5hfwmW00FbR9",
"name": "prisma",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7sJYir66zAPq2omSvYeT2u/8fbe3370d32fb5433fb6d3a86d46a1b9/Prisma.png"
}
] | 0.259638 | 0.276211 |
import io
import json
import os
import re
import time
from ast import literal_eval
from collections import OrderedDict
from datetime import datetime
from tempfile import NamedTemporaryFile
from unittest import TestCase
import pandas as pd
import pytest
from hypothesis import (
given,
settings,
)
from hypothesis.strategies import (
text,
)
from oedtools.values import (
generate_values_profile,
get_column_range_by_value_group,
get_column_sampling_method,
get_column_validation_method,
get_values_profile,
SCHEMA_DIR,
)
class TestValues(TestCase):
def setUp(self):
self.SCHEMA_DIR = SCHEMA_DIR
self.values_csv_fp = os.path.join(self.SCHEMA_DIR, 'values.csv')
self.master_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'master_def.csv')
self.loc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'loc_def.csv')
self.acc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'acc_def.csv')
self.reinsinfo_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsinfo_def.csv')
self.reinsscope_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsscope_def.csv')
def test_generate_values_profile__write_to_target_file(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
with NamedTemporaryFile('w') as target_file:
target_fp = generate_values_profile(target_file.name)
self.assertEqual(target_fp, target_file.name)
with io.open(target_file.name, 'r', encoding='utf-8') as f:
values_profile = json.load(f)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_generate_values_profile__no_target_file__return_as_dict(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile = generate_values_profile()
self.assertIsInstance(values_profile, dict)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_get_values_profile(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile = get_values_profile()
self.assertIsInstance(values_profile, dict)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_get_column_range_by_value_group(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile_cols = []
area_codes = sorted(set(values_df[values_df['group'] == 'area codes']['id'].sort_values().tolist()))
def to_int(s):
try:
return int(s)
except ValueError:
return s
area_codes = [to_int(c) for c in area_codes]
self.assertEqual(set(area_codes), set(get_column_range_by_value_group('AreaCode')))
values_profile_cols += ['AreaCode']
construction_codes = sorted(set([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [
it['id'] for _, it in values_df[values_df['group'] == 'construction codes'].iterrows()
]
] for _v in v if _v
]))
self.assertEqual(construction_codes, get_column_range_by_value_group('ConstructionCode'))
values_profile_cols = ['ConstructionCode']
country_codes = sorted(set(values_df[values_df['group'] == 'country codes']['id'].sort_values().tolist()))
self.assertEqual(country_codes, get_column_range_by_value_group('CountryCode'))
values_profile_cols += ['CountryCode']
currencies = sorted(set([v for v in values_df[values_df['group'] == 'currencies']['id'].sort_values().fillna('').tolist() if v]))
cols = master_df[master_df['field_name'].str.lower().str.match(r'(acc|loc|reins)currency?')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(currencies, get_column_range_by_value_group(col))
values_profile_cols += cols
is_primary = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'location properties') & (values_df['key'] == 'is primary')].iloc[0]['id'].split(':')
]
self.assertEqual(is_primary, get_column_range_by_value_group('IsPrimary'))
values_profile_cols += ['IsPrimary']
is_tenant = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'location properties') & (values_df['key'] == 'is tenant')].iloc[0]['id'].split(':')
]
self.assertEqual(is_primary, get_column_range_by_value_group('IsTenant'))
values_profile_cols += ['IsTenant']
tivs = [
literal_eval(s) for s in
values_df[values_df['group'] == 'tivs'].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(building|contents|other|bi)tiv$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(tivs, get_column_range_by_value_group(col))
values_profile_cols += cols
deductibles = [
literal_eval(s) for s in
values_df[values_df['group'] == 'deductibles'].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(acc|cond|loc|pol)(min|max)?ded([1-6])(building|other|contents|bi|pd|all)$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductibles, get_column_range_by_value_group(col))
values_profile_cols += cols
deductible_codes = sorted(set([
int(c) for c in values_df[values_df['group']=='deductible codes']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('dedcode')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductible_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
deductible_types = sorted(set([
int(c) for c in values_df[values_df['group']=='deductible types']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('dedtype')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductible_types, get_column_range_by_value_group(col))
values_profile_cols += cols
sublayer_limits = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'limits') & (values_df['key'] == 'sublayer')].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(acc|cond|loc|pol)limit([1-6])(building|other|contents|bi|pd|all)$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(sublayer_limits, get_column_range_by_value_group(col))
values_profile_cols += cols
layer_attachment = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'attachments') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_attachment, get_column_range_by_value_group('LayerAttachment'))
values_profile_cols += ['LayerAttachment']
layer_limit = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'limits') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_limit, get_column_range_by_value_group('LayerLimit'))
values_profile_cols += ['LayerLimit']
location_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'shares') & (values_df['key'] == 'location')].iloc[0]['id'].split(':')
]
self.assertEqual(location_share, get_column_range_by_value_group('LocParticipation'))
values_profile_cols += ['LocParticipation']
layer_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'shares') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_share, get_column_range_by_value_group('LayerParticipation'))
values_profile_cols += ['LayerParticipation']
limit_codes = sorted(set([
int(c) for c in values_df[values_df['group']=='limit codes']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('limitcode')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(limit_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
limit_types = sorted(set([
int(c) for c in values_df[values_df['group']=='limit types']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('limittype')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(limit_types, get_column_range_by_value_group(col))
values_profile_cols += cols
longitude = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'geocoding') & (values_df['key'] == 'longitude')].iloc[0]['id'].split(':')
]
self.assertEqual(longitude, get_column_range_by_value_group('Longitude'))
values_profile_cols += ['Longitude']
latitude = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'geocoding') & (values_df['key'] == 'latitude')].iloc[0]['id'].split(':')
]
self.assertEqual(latitude, get_column_range_by_value_group('Latitude'))
values_profile_cols += cols
occupancy_types = sorted(set([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [it['id'] for _, it in values_df[values_df['group'] == 'occupancy types'].iterrows()]
] for _v in v if _v
]))
self.assertEqual(occupancy_types, get_column_range_by_value_group('OccupancyCode'))
values_profile_cols += ['OccupancyCode']
peril_codes = sorted(set(values_df[values_df['group'] == 'peril codes']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'(acc|loc|pol|reins)peril(scovered)?$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(peril_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
years = sorted([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [it['id'] for _, it in values_df[values_df['group'] == 'years'].iterrows()]
] for _v in v if _v
])
cols = master_df[master_df['field_name'].str.lower().str.match(r'(roofyearbuilt|yearbuilt|yearupgraded)$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(years, get_column_range_by_value_group(col))
values_profile_cols += cols
reins_types = sorted(set(values_df[values_df['group'] == 'reins types']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'reinstype$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(reins_types, get_column_range_by_value_group(col))
values_profile_cols += cols
reins_placed_percent = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'placed')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_placed_percent, get_column_range_by_value_group('PlacedPercent'))
values_profile_cols += ['PlacedPercent']
reins_ceded_percent = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'ceded')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_ceded_percent, get_column_range_by_value_group('CededPercent'))
values_profile_cols += ['CededPercent']
reins_treaty_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'treatyshare')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_treaty_share, get_column_range_by_value_group('TreatyShare'))
values_profile_cols += ['TreatyShare']
reins_risk_levels = sorted(set(values_df[values_df['group'] == 'reins risk levels']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'risklevel$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(reins_risk_levels, get_column_range_by_value_group(col))
values_profile_cols += cols
def test_get_column_sampling_method(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
all_values_profile_cols = sorted([
col for column_name_regex in values_df['column_name_regex'].unique().tolist()
for col in master_df[master_df['field_name'].str.lower().str.match(r'{}'.format(column_name_regex))]['field_name'].tolist()
])
for col in master_df['field_name'].tolist():
expected = values_df.loc[:, ['column_name_regex', 'sampling']].apply(
lambda it: it['sampling'] if re.match(r'{}'.format(it['column_name_regex']), col.lower()) else None,
axis=1
).dropna().unique().tolist() or None
self.assertEqual(expected[0] if expected else None, get_column_sampling_method(col))
def test_get_column_validation_method(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
all_values_profile_cols = sorted([
col for column_name_regex in values_df['column_name_regex'].unique().tolist()
for col in master_df[master_df['field_name'].str.lower().str.match(r'{}'.format(column_name_regex))]['field_name'].tolist()
])
for col in master_df['field_name'].tolist():
expected = values_df.loc[:, ['column_name_regex', 'validation']].apply(
lambda it: it['validation'] if re.match(r'{}'.format(it['column_name_regex']), col.lower()) else None,
axis=1
).dropna().unique().tolist() or None
self.assertEqual(expected[0] if expected else None, get_column_validation_method(col)) | tests/test_values.py | import io
import json
import os
import re
import time
from ast import literal_eval
from collections import OrderedDict
from datetime import datetime
from tempfile import NamedTemporaryFile
from unittest import TestCase
import pandas as pd
import pytest
from hypothesis import (
given,
settings,
)
from hypothesis.strategies import (
text,
)
from oedtools.values import (
generate_values_profile,
get_column_range_by_value_group,
get_column_sampling_method,
get_column_validation_method,
get_values_profile,
SCHEMA_DIR,
)
class TestValues(TestCase):
def setUp(self):
self.SCHEMA_DIR = SCHEMA_DIR
self.values_csv_fp = os.path.join(self.SCHEMA_DIR, 'values.csv')
self.master_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'master_def.csv')
self.loc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'loc_def.csv')
self.acc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'acc_def.csv')
self.reinsinfo_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsinfo_def.csv')
self.reinsscope_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsscope_def.csv')
def test_generate_values_profile__write_to_target_file(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
with NamedTemporaryFile('w') as target_file:
target_fp = generate_values_profile(target_file.name)
self.assertEqual(target_fp, target_file.name)
with io.open(target_file.name, 'r', encoding='utf-8') as f:
values_profile = json.load(f)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_generate_values_profile__no_target_file__return_as_dict(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile = generate_values_profile()
self.assertIsInstance(values_profile, dict)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_get_values_profile(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile = get_values_profile()
self.assertIsInstance(values_profile, dict)
groups = list(values_profile.keys())
self.assertEqual(groups, sorted(values_df['group'].unique().tolist()))
for g in groups:
keys = list(values_profile[g].keys())
group_df = values_df[values_df['group'] == g]
group_df = group_df.fillna('')
self.assertEqual(keys, sorted(group_df['key'].tolist()))
for k in keys:
it = group_df[group_df['key'] == k].iloc[0]
self.assertEqual(values_profile[g][k]['id'], it['id'])
self.assertEqual(values_profile[g][k]['desc'], it['desc'])
it_columns = sorted(master_df[
master_df['field_name'].str.lower().str.match(r'{}'.format(it['column_name_regex']))
]['field_name'].unique().tolist()) if it['column_name_regex'] else []
self.assertEqual(values_profile[g][k]['columns'], it_columns)
self.assertEqual(values_profile[g][k]['sampling'], it['sampling'])
self.assertEqual(values_profile[g][k]['validation'], it['validation'])
def test_get_column_range_by_value_group(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
values_profile_cols = []
area_codes = sorted(set(values_df[values_df['group'] == 'area codes']['id'].sort_values().tolist()))
def to_int(s):
try:
return int(s)
except ValueError:
return s
area_codes = [to_int(c) for c in area_codes]
self.assertEqual(set(area_codes), set(get_column_range_by_value_group('AreaCode')))
values_profile_cols += ['AreaCode']
construction_codes = sorted(set([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [
it['id'] for _, it in values_df[values_df['group'] == 'construction codes'].iterrows()
]
] for _v in v if _v
]))
self.assertEqual(construction_codes, get_column_range_by_value_group('ConstructionCode'))
values_profile_cols = ['ConstructionCode']
country_codes = sorted(set(values_df[values_df['group'] == 'country codes']['id'].sort_values().tolist()))
self.assertEqual(country_codes, get_column_range_by_value_group('CountryCode'))
values_profile_cols += ['CountryCode']
currencies = sorted(set([v for v in values_df[values_df['group'] == 'currencies']['id'].sort_values().fillna('').tolist() if v]))
cols = master_df[master_df['field_name'].str.lower().str.match(r'(acc|loc|reins)currency?')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(currencies, get_column_range_by_value_group(col))
values_profile_cols += cols
is_primary = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'location properties') & (values_df['key'] == 'is primary')].iloc[0]['id'].split(':')
]
self.assertEqual(is_primary, get_column_range_by_value_group('IsPrimary'))
values_profile_cols += ['IsPrimary']
is_tenant = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'location properties') & (values_df['key'] == 'is tenant')].iloc[0]['id'].split(':')
]
self.assertEqual(is_primary, get_column_range_by_value_group('IsTenant'))
values_profile_cols += ['IsTenant']
tivs = [
literal_eval(s) for s in
values_df[values_df['group'] == 'tivs'].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(building|contents|other|bi)tiv$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(tivs, get_column_range_by_value_group(col))
values_profile_cols += cols
deductibles = [
literal_eval(s) for s in
values_df[values_df['group'] == 'deductibles'].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(acc|cond|loc|pol)(min|max)?ded([1-6])(building|other|contents|bi|pd|all)$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductibles, get_column_range_by_value_group(col))
values_profile_cols += cols
deductible_codes = sorted(set([
int(c) for c in values_df[values_df['group']=='deductible codes']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('dedcode')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductible_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
deductible_types = sorted(set([
int(c) for c in values_df[values_df['group']=='deductible types']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('dedtype')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(deductible_types, get_column_range_by_value_group(col))
values_profile_cols += cols
sublayer_limits = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'limits') & (values_df['key'] == 'sublayer')].iloc[0]['id'].split(':')
]
cols = master_df[
master_df['field_name'].str.lower().str.match(r'(acc|cond|loc|pol)limit([1-6])(building|other|contents|bi|pd|all)$')
]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(sublayer_limits, get_column_range_by_value_group(col))
values_profile_cols += cols
layer_attachment = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'attachments') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_attachment, get_column_range_by_value_group('LayerAttachment'))
values_profile_cols += ['LayerAttachment']
layer_limit = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'limits') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_limit, get_column_range_by_value_group('LayerLimit'))
values_profile_cols += ['LayerLimit']
location_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'shares') & (values_df['key'] == 'location')].iloc[0]['id'].split(':')
]
self.assertEqual(location_share, get_column_range_by_value_group('LocParticipation'))
values_profile_cols += ['LocParticipation']
layer_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'shares') & (values_df['key'] == 'layer')].iloc[0]['id'].split(':')
]
self.assertEqual(layer_share, get_column_range_by_value_group('LayerParticipation'))
values_profile_cols += ['LayerParticipation']
limit_codes = sorted(set([
int(c) for c in values_df[values_df['group']=='limit codes']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('limitcode')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(limit_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
limit_types = sorted(set([
int(c) for c in values_df[values_df['group']=='limit types']['id'].sort_values().tolist()
]))
cols = master_df[master_df['field_name'].str.lower().str.contains('limittype')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(limit_types, get_column_range_by_value_group(col))
values_profile_cols += cols
longitude = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'geocoding') & (values_df['key'] == 'longitude')].iloc[0]['id'].split(':')
]
self.assertEqual(longitude, get_column_range_by_value_group('Longitude'))
values_profile_cols += ['Longitude']
latitude = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'geocoding') & (values_df['key'] == 'latitude')].iloc[0]['id'].split(':')
]
self.assertEqual(latitude, get_column_range_by_value_group('Latitude'))
values_profile_cols += cols
occupancy_types = sorted(set([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [it['id'] for _, it in values_df[values_df['group'] == 'occupancy types'].iterrows()]
] for _v in v if _v
]))
self.assertEqual(occupancy_types, get_column_range_by_value_group('OccupancyCode'))
values_profile_cols += ['OccupancyCode']
peril_codes = sorted(set(values_df[values_df['group'] == 'peril codes']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'(acc|loc|pol|reins)peril(scovered)?$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(peril_codes, get_column_range_by_value_group(col))
values_profile_cols += cols
years = sorted([
_v for v in [
range(int(s.split(':')[0]), int(s.split(':')[1]) + 1) if re.match(r'(\d+):(\d+)$', s)
else ([int(s)] if re.match(r'\d+$', s) else None)
for s in [it['id'] for _, it in values_df[values_df['group'] == 'years'].iterrows()]
] for _v in v if _v
])
cols = master_df[master_df['field_name'].str.lower().str.match(r'(roofyearbuilt|yearbuilt|yearupgraded)$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(years, get_column_range_by_value_group(col))
values_profile_cols += cols
reins_types = sorted(set(values_df[values_df['group'] == 'reins types']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'reinstype$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(reins_types, get_column_range_by_value_group(col))
values_profile_cols += cols
reins_placed_percent = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'placed')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_placed_percent, get_column_range_by_value_group('PlacedPercent'))
values_profile_cols += ['PlacedPercent']
reins_ceded_percent = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'ceded')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_ceded_percent, get_column_range_by_value_group('CededPercent'))
values_profile_cols += ['CededPercent']
reins_treaty_share = [
literal_eval(s) for s in
values_df[(values_df['group'] == 'reins percentages') & (values_df['key'] == 'treatyshare')].iloc[0]['id'].split(':')
]
self.assertEqual(reins_treaty_share, get_column_range_by_value_group('TreatyShare'))
values_profile_cols += ['TreatyShare']
reins_risk_levels = sorted(set(values_df[values_df['group'] == 'reins risk levels']['id'].sort_values().tolist()))
cols = master_df[master_df['field_name'].str.lower().str.match(r'risklevel$')]['field_name'].sort_values().tolist()
for col in cols:
self.assertEqual(reins_risk_levels, get_column_range_by_value_group(col))
values_profile_cols += cols
def test_get_column_sampling_method(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
all_values_profile_cols = sorted([
col for column_name_regex in values_df['column_name_regex'].unique().tolist()
for col in master_df[master_df['field_name'].str.lower().str.match(r'{}'.format(column_name_regex))]['field_name'].tolist()
])
for col in master_df['field_name'].tolist():
expected = values_df.loc[:, ['column_name_regex', 'sampling']].apply(
lambda it: it['sampling'] if re.match(r'{}'.format(it['column_name_regex']), col.lower()) else None,
axis=1
).dropna().unique().tolist() or None
self.assertEqual(expected[0] if expected else None, get_column_sampling_method(col))
def test_get_column_validation_method(self):
values_df = pd.read_csv(self.values_csv_fp)
values_df.columns = values_df.columns.str.lower()
master_df = pd.read_csv(self.master_csv_schema_fp)
master_df.columns = master_df.columns.str.lower()
all_values_profile_cols = sorted([
col for column_name_regex in values_df['column_name_regex'].unique().tolist()
for col in master_df[master_df['field_name'].str.lower().str.match(r'{}'.format(column_name_regex))]['field_name'].tolist()
])
for col in master_df['field_name'].tolist():
expected = values_df.loc[:, ['column_name_regex', 'validation']].apply(
lambda it: it['validation'] if re.match(r'{}'.format(it['column_name_regex']), col.lower()) else None,
axis=1
).dropna().unique().tolist() or None
self.assertEqual(expected[0] if expected else None, get_column_validation_method(col)) | 0.418459 | 0.328987 |
from .pya import Asig
import numpy as np
from scipy import signal
from .helpers import get_length, normalize
class Ugen(Asig):
"""Unit Generator for to create Asig with predefined signal"""
def __init__(self):
pass
def sine(self, freq=440, amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="sine"):
"""Generate Sine signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "sine")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * np.sin(2 * np.pi * freq * np.linspace(0, dur, length))
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def cos(self, freq=440, amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="cosine"):
"""Generate Cosine signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "cosine")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * np.cos(2 * np.pi * freq * np.linspace(0, dur, length))
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def square(self, freq=440, amp=1.0, dur=1.0, duty=0.4, sr=44100, channels=1, cn=None, label="square"):
"""Generate square wave signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
duty : float
duty cycle (Default value = 0.4)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "square")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * signal.square(2 * np.pi * freq * np.linspace(0, dur, length, endpoint=False),
duty=duty)
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def sawtooth(self, freq=440, amp=1.0, dur=1.0, width=1., sr=44100, channels=1, cn=None, label="sawtooth"):
"""Generate sawtooth wave signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
width : float
tooth width (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "sawtooth")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * signal.sawtooth(2 * np.pi * freq * np.linspace(0, dur, length, endpoint=False),
width=width)
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def noise(self, type="white", amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="noise"):
"""Generate noise signal Asig object.
Parameters
----------
type : string
type of noise, currently available: 'white' and 'pink' (Default value = 'white')
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "square")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
# Question is that will be that be too slow.
if type == "white" or "white_noise":
sig = np.random.rand(length) * amp # oR may switch to normal
elif type == "pink" or "pink_noise":
# Based on Paul Kellet's method
b0, b1, b2, b3, b4, b5, b6 = 0, 0, 0, 0, 0, 0, 0
sig = []
for i in range(length):
white = np.random.random() * 1.98 - 0.99
b0 = 0.99886 * b0 + white * 0.0555179
b1 = 0.99332 * b1 + white * 0.0750759
b2 = 0.96900 * b2 + white * 0.1538520
b3 = 0.86650 * b3 + white * 0.3104856
b4 = 0.55000 * b4 + white * 0.5329522
b5 = -0.7616 * b5 - white * 0.0168980
sig.append(b0 + b1 + b2 + b3 + b4 + b5 + b6 + white * 0.5362)
b6 = white * 0.115926
sig = normalize(sig) * amp
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, channels=channels, cn=cn, label=label) | pya/Ugen.py | from .pya import Asig
import numpy as np
from scipy import signal
from .helpers import get_length, normalize
class Ugen(Asig):
"""Unit Generator for to create Asig with predefined signal"""
def __init__(self):
pass
def sine(self, freq=440, amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="sine"):
"""Generate Sine signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "sine")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * np.sin(2 * np.pi * freq * np.linspace(0, dur, length))
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def cos(self, freq=440, amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="cosine"):
"""Generate Cosine signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "cosine")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * np.cos(2 * np.pi * freq * np.linspace(0, dur, length))
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def square(self, freq=440, amp=1.0, dur=1.0, duty=0.4, sr=44100, channels=1, cn=None, label="square"):
"""Generate square wave signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
duty : float
duty cycle (Default value = 0.4)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "square")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * signal.square(2 * np.pi * freq * np.linspace(0, dur, length, endpoint=False),
duty=duty)
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def sawtooth(self, freq=440, amp=1.0, dur=1.0, width=1., sr=44100, channels=1, cn=None, label="sawtooth"):
"""Generate sawtooth wave signal Asig object.
Parameters
----------
freq : int, float
signal frequency (Default value = 440)
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
width : float
tooth width (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "sawtooth")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
sig = amp * signal.sawtooth(2 * np.pi * freq * np.linspace(0, dur, length, endpoint=False),
width=width)
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, label=label, channels=channels, cn=cn)
def noise(self, type="white", amp=1.0, dur=1.0, sr=44100, channels=1, cn=None, label="noise"):
"""Generate noise signal Asig object.
Parameters
----------
type : string
type of noise, currently available: 'white' and 'pink' (Default value = 'white')
amp : int, float
signal amplitude (Default value = 1.0)
dur : int, float
duration in second (Default value = 1.0)
sr : int
sampling rate (Default value = 44100)
channels : int
number of channels (Default value = 1)
cn : list of string
channel names as a list. The size needs to match the number of channels (Default value = None)
label : string
identifier of the object (Default value = "square")
Returns
-------
Asig object
"""
length = get_length(dur, sr)
# Question is that will be that be too slow.
if type == "white" or "white_noise":
sig = np.random.rand(length) * amp # oR may switch to normal
elif type == "pink" or "pink_noise":
# Based on Paul Kellet's method
b0, b1, b2, b3, b4, b5, b6 = 0, 0, 0, 0, 0, 0, 0
sig = []
for i in range(length):
white = np.random.random() * 1.98 - 0.99
b0 = 0.99886 * b0 + white * 0.0555179
b1 = 0.99332 * b1 + white * 0.0750759
b2 = 0.96900 * b2 + white * 0.1538520
b3 = 0.86650 * b3 + white * 0.3104856
b4 = 0.55000 * b4 + white * 0.5329522
b5 = -0.7616 * b5 - white * 0.0168980
sig.append(b0 + b1 + b2 + b3 + b4 + b5 + b6 + white * 0.5362)
b6 = white * 0.115926
sig = normalize(sig) * amp
if channels > 1:
sig = np.repeat(sig, channels)
sig = sig.reshape((length, channels))
return Asig(sig, sr=sr, channels=channels, cn=cn, label=label) | 0.941271 | 0.392541 |
import logging
from datetime import datetime
from typing import Optional, Mapping, List
import paho.mqtt.client as mqtt
from p2k16.core import P2k16UserException, membership_management
from p2k16.core import account_management, event_management, badge_management
from p2k16.core.models import db, Account, Circle, Event, Company, ToolDescription, ToolCheckout
logger = logging.getLogger(__name__)
class DummyClient(object):
pass
@event_management.converter_for("tool", "checkout")
class ToolCheckoutEvent(object):
def __init__(self, tool_name: str, created_at: Optional[datetime] = None, created_by: Optional[Account] = None):
self.tool_name = tool_name
self.created_at = created_at
self.created_by = created_by
def to_event(self):
return {"text1": self.tool_name}
@staticmethod
def from_event(event: Event) -> "ToolCheckoutEvent":
return ToolCheckoutEvent(event.text1, event.created_at, event.created_by)
def to_dict(self):
return {**event_management.base_dict(self), **{
"created_at": self.created_at,
"created_by": self.created_by,
"created_by_username": self.created_by.username,
"tool_name": self.tool_name
}}
@event_management.converter_for("tool", "checkin")
class ToolCheckinEvent(object):
def __init__(self, tool_name: str, created_at: Optional[datetime] = None, created_by: Optional[Account] = None):
self.tool_name = tool_name
self.created_at = created_at
self.created_by = created_by
def to_event(self):
return {"text1": self.tool_name}
@staticmethod
def from_event(event: Event) -> "ToolCheckinEvent":
return ToolCheckinEvent(event.text1, event.created_at, event.created_by)
def to_dict(self):
return {**event_management.base_dict(self), **{
"created_at": self.created_at,
"created_by": self.created_by,
"created_by_username": self.created_by.username,
"tool_name": self.tool_name
}}
class ToolClient(object):
def __init__(self, cfg: Mapping[str, str]):
host = cfg["MQTT_HOST"]
port = cfg["MQTT_PORT"]
username = cfg["MQTT_USERNAME"]
password = cfg["MQTT_PASSWORD"]
self.prefix = cfg["MQTT_PREFIX_TOOL"]
logger.info("Connecting to {}:{}".format(host, port))
logger.info("config: username={}, prefix={}".format(username, self.prefix))
keep_alive = 60
c = mqtt.Client()
if username:
c.username_pw_set(username=username, password=password)
c.connect_async(host, port, keep_alive)
c.enable_logger()
c.loop_start()
self._client = c
def _mqtt_topic(self, tool, action):
return '/'.join([self.prefix, tool, action])
def checkout_tool(self, account: Account, tool: ToolDescription):
# Check that user has correct circle and is paying member
if not account_management.is_account_in_circle(account, tool.circle):
raise P2k16UserException('{} is not in the {} circle'.format(account.display_name(), tool.circle.name))
if not membership_management.active_member(account) \
and len(Company.find_active_companies_with_account(account.id)) == 0:
raise P2k16UserException('{} does not have an active membership and is not employed in an active company'.
format(account.display_name()))
logger.info('Checking out tool. username={}, tool={}'.format(account.username, tool.name))
# Verify that tool is not checked out by someone else. Check in first if it is.
checkout = ToolCheckout.find_by_tool(tool)
if checkout:
if checkout.account is account:
raise P2k16UserException('Tools can only be checked out once.')
logger.info('Tool checked out by someone else. Assuming control: username={}, tool={}, old_username={}'
.format(account.username, tool.name, checkout.account.name))
self.checkin_tool(checkout.account, checkout.tool_description)
# Make a new checkout reservation
event_management.save_event(ToolCheckoutEvent(tool.name, datetime.now(), account))
checkout = ToolCheckout(tool, account, datetime.now())
db.session.add(checkout)
# Make sure everything has been written to the database before actually opening the door.
db.session.flush()
# TODO: move this to a handler that runs after the transaction is done
# TODO: we can look at the responses and see if they where successfully sent/received.
# for topic, open_time in publishes:
# logger.info("Sending message: {}: {}".format(topic, open_time))
# self._client.publish(topic, open_time)
topic = self._mqtt_topic(tool=tool.name, action='unlock')
payload = 'true'
logger.info("Sending message: {}: {}".format(topic, payload))
self._client.publish(topic, payload)
def checkin_tool(self, account: Account, tool: ToolDescription):
logger.info('Checking in tool. username={}, tool={}'.format(account.username, tool.name))
event_management.save_event(ToolCheckinEvent(tool.name, datetime.now(), account))
checkout = ToolCheckout.find_by_tool(tool)
db.session.delete(checkout)
db.session.flush()
topic = self._mqtt_topic(tool=tool.name, action='lock')
payload = 'true'
logger.info("Sending message: {}: {}".format(topic, payload))
self._client.publish(topic, payload)
def create_client(cfg: Mapping[str, str]) -> ToolClient:
if "MQTT_HOST" not in cfg:
logger.info("No MQTT host configured for door, not starting door mqtt client")
return DummyClient()
return ToolClient(cfg) | web/src/p2k16/core/tool.py | import logging
from datetime import datetime
from typing import Optional, Mapping, List
import paho.mqtt.client as mqtt
from p2k16.core import P2k16UserException, membership_management
from p2k16.core import account_management, event_management, badge_management
from p2k16.core.models import db, Account, Circle, Event, Company, ToolDescription, ToolCheckout
logger = logging.getLogger(__name__)
class DummyClient(object):
pass
@event_management.converter_for("tool", "checkout")
class ToolCheckoutEvent(object):
def __init__(self, tool_name: str, created_at: Optional[datetime] = None, created_by: Optional[Account] = None):
self.tool_name = tool_name
self.created_at = created_at
self.created_by = created_by
def to_event(self):
return {"text1": self.tool_name}
@staticmethod
def from_event(event: Event) -> "ToolCheckoutEvent":
return ToolCheckoutEvent(event.text1, event.created_at, event.created_by)
def to_dict(self):
return {**event_management.base_dict(self), **{
"created_at": self.created_at,
"created_by": self.created_by,
"created_by_username": self.created_by.username,
"tool_name": self.tool_name
}}
@event_management.converter_for("tool", "checkin")
class ToolCheckinEvent(object):
def __init__(self, tool_name: str, created_at: Optional[datetime] = None, created_by: Optional[Account] = None):
self.tool_name = tool_name
self.created_at = created_at
self.created_by = created_by
def to_event(self):
return {"text1": self.tool_name}
@staticmethod
def from_event(event: Event) -> "ToolCheckinEvent":
return ToolCheckinEvent(event.text1, event.created_at, event.created_by)
def to_dict(self):
return {**event_management.base_dict(self), **{
"created_at": self.created_at,
"created_by": self.created_by,
"created_by_username": self.created_by.username,
"tool_name": self.tool_name
}}
class ToolClient(object):
def __init__(self, cfg: Mapping[str, str]):
host = cfg["MQTT_HOST"]
port = cfg["MQTT_PORT"]
username = cfg["MQTT_USERNAME"]
password = cfg["MQTT_PASSWORD"]
self.prefix = cfg["MQTT_PREFIX_TOOL"]
logger.info("Connecting to {}:{}".format(host, port))
logger.info("config: username={}, prefix={}".format(username, self.prefix))
keep_alive = 60
c = mqtt.Client()
if username:
c.username_pw_set(username=username, password=password)
c.connect_async(host, port, keep_alive)
c.enable_logger()
c.loop_start()
self._client = c
def _mqtt_topic(self, tool, action):
return '/'.join([self.prefix, tool, action])
def checkout_tool(self, account: Account, tool: ToolDescription):
# Check that user has correct circle and is paying member
if not account_management.is_account_in_circle(account, tool.circle):
raise P2k16UserException('{} is not in the {} circle'.format(account.display_name(), tool.circle.name))
if not membership_management.active_member(account) \
and len(Company.find_active_companies_with_account(account.id)) == 0:
raise P2k16UserException('{} does not have an active membership and is not employed in an active company'.
format(account.display_name()))
logger.info('Checking out tool. username={}, tool={}'.format(account.username, tool.name))
# Verify that tool is not checked out by someone else. Check in first if it is.
checkout = ToolCheckout.find_by_tool(tool)
if checkout:
if checkout.account is account:
raise P2k16UserException('Tools can only be checked out once.')
logger.info('Tool checked out by someone else. Assuming control: username={}, tool={}, old_username={}'
.format(account.username, tool.name, checkout.account.name))
self.checkin_tool(checkout.account, checkout.tool_description)
# Make a new checkout reservation
event_management.save_event(ToolCheckoutEvent(tool.name, datetime.now(), account))
checkout = ToolCheckout(tool, account, datetime.now())
db.session.add(checkout)
# Make sure everything has been written to the database before actually opening the door.
db.session.flush()
# TODO: move this to a handler that runs after the transaction is done
# TODO: we can look at the responses and see if they where successfully sent/received.
# for topic, open_time in publishes:
# logger.info("Sending message: {}: {}".format(topic, open_time))
# self._client.publish(topic, open_time)
topic = self._mqtt_topic(tool=tool.name, action='unlock')
payload = 'true'
logger.info("Sending message: {}: {}".format(topic, payload))
self._client.publish(topic, payload)
def checkin_tool(self, account: Account, tool: ToolDescription):
logger.info('Checking in tool. username={}, tool={}'.format(account.username, tool.name))
event_management.save_event(ToolCheckinEvent(tool.name, datetime.now(), account))
checkout = ToolCheckout.find_by_tool(tool)
db.session.delete(checkout)
db.session.flush()
topic = self._mqtt_topic(tool=tool.name, action='lock')
payload = 'true'
logger.info("Sending message: {}: {}".format(topic, payload))
self._client.publish(topic, payload)
def create_client(cfg: Mapping[str, str]) -> ToolClient:
if "MQTT_HOST" not in cfg:
logger.info("No MQTT host configured for door, not starting door mqtt client")
return DummyClient()
return ToolClient(cfg) | 0.725162 | 0.090454 |
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import RoiCalc
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
def default_inputs(key=False):
default_inputs = {
"user_count": 250,
"user_cost": 20,
"it_cost": 500,
"frequency": 25,
"duration": 15,
"critical_services": 1,
}
if key:
return default_inputs[key]
else:
return default_inputs
inputs = default_inputs()
header = html.H1(children="PingPlotter ROI Calculator")
form = dbc.Form(
dbc.Row(
[
dbc.Col(
[
dbc.Label(i.replace("_", " ").title(), html_for=i),
dcc.Input(id=i, value=default_inputs(key=i), type="text"),
]
)
for i in inputs.keys()
]
)
)
plots = dbc.Row(
[
dbc.Col(dcc.Graph()),
dbc.Col(dcc.Graph()),
],
id="plots",
)
content = [
header,
form,
plots,
]
app.layout = dbc.Container(content)
def makePlots(roi):
report = roi.report()
content = [
dbc.Col(dcc.Graph(figure=report["plots"]["roi"])),
dbc.Col(dcc.Graph(figure=report["plots"]["breakeven"])),
]
return content
@app.callback(
dash.Output("plots", "children"),
[dash.Input(i, "value") for i in inputs.keys()],
)
def update_figure(*args):
pingplotter_impact = 0.5
arg_names = list(default_inputs().keys())
try:
inputs = {arg_names[i]: int(args[i]) for i in range(0, len(args))}
downtime_cost = RoiCalc.calc_downtime_cost(
inputs["user_cost"],
inputs["it_cost"],
inputs["frequency"],
inputs["duration"],
)
roi = RoiCalc.PingPlotterRoi(
inputs["user_count"],
inputs["critical_services"],
downtime_cost,
pingplotter_impact,
)
plots = makePlots(roi)
return plots
except ValueError:
pass
if __name__ == "__main__":
app.run_server(debug=True) | routes.py | import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import RoiCalc
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
def default_inputs(key=False):
default_inputs = {
"user_count": 250,
"user_cost": 20,
"it_cost": 500,
"frequency": 25,
"duration": 15,
"critical_services": 1,
}
if key:
return default_inputs[key]
else:
return default_inputs
inputs = default_inputs()
header = html.H1(children="PingPlotter ROI Calculator")
form = dbc.Form(
dbc.Row(
[
dbc.Col(
[
dbc.Label(i.replace("_", " ").title(), html_for=i),
dcc.Input(id=i, value=default_inputs(key=i), type="text"),
]
)
for i in inputs.keys()
]
)
)
plots = dbc.Row(
[
dbc.Col(dcc.Graph()),
dbc.Col(dcc.Graph()),
],
id="plots",
)
content = [
header,
form,
plots,
]
app.layout = dbc.Container(content)
def makePlots(roi):
report = roi.report()
content = [
dbc.Col(dcc.Graph(figure=report["plots"]["roi"])),
dbc.Col(dcc.Graph(figure=report["plots"]["breakeven"])),
]
return content
@app.callback(
dash.Output("plots", "children"),
[dash.Input(i, "value") for i in inputs.keys()],
)
def update_figure(*args):
pingplotter_impact = 0.5
arg_names = list(default_inputs().keys())
try:
inputs = {arg_names[i]: int(args[i]) for i in range(0, len(args))}
downtime_cost = RoiCalc.calc_downtime_cost(
inputs["user_cost"],
inputs["it_cost"],
inputs["frequency"],
inputs["duration"],
)
roi = RoiCalc.PingPlotterRoi(
inputs["user_count"],
inputs["critical_services"],
downtime_cost,
pingplotter_impact,
)
plots = makePlots(roi)
return plots
except ValueError:
pass
if __name__ == "__main__":
app.run_server(debug=True) | 0.378574 | 0.135032 |
import numpy
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as T
import torchvision
import numpy as np
import cv2
import random
class ObjectDescriptor:
def __init__(self, name, image, borders):
self.name = name
self.image = image
self.borders = borders
# ade20k
class ObjectFetcher:
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def __init__(self):
self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
self.model.eval()
def _get_prediction(self, img, threshold):
transform = T.Compose([T.ToTensor()])
img = transform(img)
pred = self.model([img])
pred_score = list(pred[0]['scores'].detach().numpy())
pred_list = [pred_score.index(x) for x in pred_score if x > threshold]
pred_t = 0
if len(pred_list) != 0:
pred_t = pred_list[-1]
masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
pred_class = [self.COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())]
masks = masks[:pred_t + 1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_class = pred_class[:pred_t + 1]
return masks, pred_boxes, pred_class
def _instance_segmentation_api(self, img_, object_name, threshold=0.5):
masks, boxes, pred_cls = self._get_prediction(img_, threshold)
img = numpy.array(img_)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
objects = []
if object_name in pred_cls:
main_object_idx = -1
for i in range(len(masks)):
if pred_cls[i] == object_name:
main_object_idx = i
a1, a2, a3, a4 = (int(boxes[i][0][0])), (int(boxes[i][0][1])), \
(int(boxes[i][1][0])), (int(boxes[i][1][1]))
objects.append(ObjectDescriptor(pred_cls[i], img, (a2, a4, a1, a3)))
if len(objects) > 1:
objects[0], objects[main_object_idx] = objects[main_object_idx], objects[0]
return objects
else:
return None
def fetch_from_video(self, video, object_name):
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
objects = self._instance_segmentation_api(frame, object_name)
if objects is not None:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
for o in objects:
cv2.imwrite('../output/' + o.name + '.png', o.image)
return objects | src/ImageUtils/ObjectFetcher.py | import numpy
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as T
import torchvision
import numpy as np
import cv2
import random
class ObjectDescriptor:
def __init__(self, name, image, borders):
self.name = name
self.image = image
self.borders = borders
# ade20k
class ObjectFetcher:
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def __init__(self):
self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
self.model.eval()
def _get_prediction(self, img, threshold):
transform = T.Compose([T.ToTensor()])
img = transform(img)
pred = self.model([img])
pred_score = list(pred[0]['scores'].detach().numpy())
pred_list = [pred_score.index(x) for x in pred_score if x > threshold]
pred_t = 0
if len(pred_list) != 0:
pred_t = pred_list[-1]
masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
pred_class = [self.COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())]
masks = masks[:pred_t + 1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_class = pred_class[:pred_t + 1]
return masks, pred_boxes, pred_class
def _instance_segmentation_api(self, img_, object_name, threshold=0.5):
masks, boxes, pred_cls = self._get_prediction(img_, threshold)
img = numpy.array(img_)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
objects = []
if object_name in pred_cls:
main_object_idx = -1
for i in range(len(masks)):
if pred_cls[i] == object_name:
main_object_idx = i
a1, a2, a3, a4 = (int(boxes[i][0][0])), (int(boxes[i][0][1])), \
(int(boxes[i][1][0])), (int(boxes[i][1][1]))
objects.append(ObjectDescriptor(pred_cls[i], img, (a2, a4, a1, a3)))
if len(objects) > 1:
objects[0], objects[main_object_idx] = objects[main_object_idx], objects[0]
return objects
else:
return None
def fetch_from_video(self, video, object_name):
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
objects = self._instance_segmentation_api(frame, object_name)
if objects is not None:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
for o in objects:
cv2.imwrite('../output/' + o.name + '.png', o.image)
return objects | 0.446736 | 0.381076 |
import os
import io
import time
import logging
# 8 data types
import collections
# 13 data compression and archiving
import tarfile
import humanfriendly
from backup.reporter import Reporter, ReporterCheck, ReporterCheckResult
from backup.utils import formatkv, timestamp4now, timestamp2date
class ArchiveResult(collections.namedtuple('Result', ['size'])):
""" Class for results of archive operations with proper formatting. """
__slots__ = ()
def __str__(self):
return "Result(size={})".format(humanfriendly.format_size(self.size))
class ArchiveFile(object):
def __init__(self, name, binmode=False):
super(ArchiveFile, self).__init__()
self.name = name
self.binmode = binmode
self.ctime = time.time()
self.mtime = self.ctime
self.handle = io.BytesIO()
def write(self, data):
self.handle.write(data if self.binmode else data.encode())
def writeline(self, data):
self.handle.write(data if self.binmode else data.encode())
self.handle.write(b'\n')
def size(self):
self.handle.seek(0, os.SEEK_END)
return self.handle.tell()
def fileobject(self):
self.handle.seek(0)
return self.handle
class Archive(Reporter, object):
def __init__(self, label, timestamp=None):
super(Archive, self).__init__()
self.timestamp = timestamp or timestamp4now()
self.name = "{}-{}".format(label, self.timestamp)
self.path = '.'
self.ctime = timestamp2date(self.timestamp)
self.filename = "{}.tgz".format(self.name)
self.tar = None
@classmethod
def fromfilename(cls, filename, check_label=None):
import re
m = re.match(r"^(.*)-([^-]+)\.tgz$", filename)
if not m:
raise ValueError("filename '{}' invalid format".format(filename))
label, timestamp = m.groups()
if check_label and label != check_label:
raise ValueError("filename '{}' not matching label '{}'".format(filename, check_label))
return cls(label, timestamp)
def __repr__(self):
return "Archive[name={}, timestamp={}]".format(self.name, self.timestamp)
def __str__(self):
return formatkv([("Name", self.name)], title="ARCHIVE")
def tarname(self, path=None):
if not path:
path = self.path
return os.path.join(path, self.filename)
def __enter__(self):
self.tar = tarfile.open(
self.tarname(), 'w:gz',
debug=1 if logging.getLogger().getEffectiveLevel() == logging.DEBUG else 0
)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.tar.close()
self.storeResult("createArchive", ArchiveResult(os.path.getsize(self.tarname())))
def createArchiveFile(self, name, binmode=False):
return ArchiveFile(name, binmode=binmode)
@ReporterCheckResult
def addArchiveFile(self, archivefile):
tarinfo = tarfile.TarInfo(archivefile.name)
tarinfo.mtime = archivefile.mtime
tarinfo.size = archivefile.size()
self.tar.addfile(tarinfo, archivefile.fileobject())
return archivefile.name
@ReporterCheckResult
def addPath(self, path, name=None):
self.tar.add(path, arcname=name)
return path
@ReporterCheck
def addManifest(self, timestamp):
f = self.createArchiveFile("MANIFEST")
f.writeline("Timestamp: {}".format(timestamp))
self.addArchiveFile(f)
@ReporterCheckResult
def rename(self, path):
tarname = self.tarname()
if not path == self.path:
self.path = path
destination_tarname = self.tarname()
os.rename(tarname, destination_tarname)
tarname = destination_tarname
return tarname
@ReporterCheckResult
def remove(self):
tarname = self.tarname()
if os.path.isfile(tarname):
os.remove(tarname)
return tarname | backup/archive.py | import os
import io
import time
import logging
# 8 data types
import collections
# 13 data compression and archiving
import tarfile
import humanfriendly
from backup.reporter import Reporter, ReporterCheck, ReporterCheckResult
from backup.utils import formatkv, timestamp4now, timestamp2date
class ArchiveResult(collections.namedtuple('Result', ['size'])):
""" Class for results of archive operations with proper formatting. """
__slots__ = ()
def __str__(self):
return "Result(size={})".format(humanfriendly.format_size(self.size))
class ArchiveFile(object):
def __init__(self, name, binmode=False):
super(ArchiveFile, self).__init__()
self.name = name
self.binmode = binmode
self.ctime = time.time()
self.mtime = self.ctime
self.handle = io.BytesIO()
def write(self, data):
self.handle.write(data if self.binmode else data.encode())
def writeline(self, data):
self.handle.write(data if self.binmode else data.encode())
self.handle.write(b'\n')
def size(self):
self.handle.seek(0, os.SEEK_END)
return self.handle.tell()
def fileobject(self):
self.handle.seek(0)
return self.handle
class Archive(Reporter, object):
def __init__(self, label, timestamp=None):
super(Archive, self).__init__()
self.timestamp = timestamp or timestamp4now()
self.name = "{}-{}".format(label, self.timestamp)
self.path = '.'
self.ctime = timestamp2date(self.timestamp)
self.filename = "{}.tgz".format(self.name)
self.tar = None
@classmethod
def fromfilename(cls, filename, check_label=None):
import re
m = re.match(r"^(.*)-([^-]+)\.tgz$", filename)
if not m:
raise ValueError("filename '{}' invalid format".format(filename))
label, timestamp = m.groups()
if check_label and label != check_label:
raise ValueError("filename '{}' not matching label '{}'".format(filename, check_label))
return cls(label, timestamp)
def __repr__(self):
return "Archive[name={}, timestamp={}]".format(self.name, self.timestamp)
def __str__(self):
return formatkv([("Name", self.name)], title="ARCHIVE")
def tarname(self, path=None):
if not path:
path = self.path
return os.path.join(path, self.filename)
def __enter__(self):
self.tar = tarfile.open(
self.tarname(), 'w:gz',
debug=1 if logging.getLogger().getEffectiveLevel() == logging.DEBUG else 0
)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.tar.close()
self.storeResult("createArchive", ArchiveResult(os.path.getsize(self.tarname())))
def createArchiveFile(self, name, binmode=False):
return ArchiveFile(name, binmode=binmode)
@ReporterCheckResult
def addArchiveFile(self, archivefile):
tarinfo = tarfile.TarInfo(archivefile.name)
tarinfo.mtime = archivefile.mtime
tarinfo.size = archivefile.size()
self.tar.addfile(tarinfo, archivefile.fileobject())
return archivefile.name
@ReporterCheckResult
def addPath(self, path, name=None):
self.tar.add(path, arcname=name)
return path
@ReporterCheck
def addManifest(self, timestamp):
f = self.createArchiveFile("MANIFEST")
f.writeline("Timestamp: {}".format(timestamp))
self.addArchiveFile(f)
@ReporterCheckResult
def rename(self, path):
tarname = self.tarname()
if not path == self.path:
self.path = path
destination_tarname = self.tarname()
os.rename(tarname, destination_tarname)
tarname = destination_tarname
return tarname
@ReporterCheckResult
def remove(self):
tarname = self.tarname()
if os.path.isfile(tarname):
os.remove(tarname)
return tarname | 0.442155 | 0.147065 |
__all__ = [
'bind_label',
'define_binder',
'define_binder_for',
'define_maker',
'depend_parameter_for',
'get_annotations',
]
import functools
import inspect
from startup import startup
from . import parameters
def get_annotations(func):
signature = inspect.signature(func)
annotations = {
p.name: p.annotation
for p in signature.parameters.values()
if p.annotation is not p.empty
}
if signature.return_annotation is not signature.empty:
annotations['return'] = signature.return_annotation
return annotations
def bind_label(source_label, target_label):
startup.add_func(lambda x: x, {'x': source_label, 'return': target_label})
def _prepare(defaults, kwargs):
# Since ``startup`` only calls functions once, it should be fine to
# update ``defaults`` directly.
if defaults is None:
defaults = {}
defaults.update(kwargs)
for name in defaults:
value = defaults[name]
if isinstance(value, parameters.ParameterBase):
defaults[name] = value.get()
return defaults
def define_binder(func, func_label, annotations=None, defaults=None):
"""Define a binder function and add it to ``startup``.
This is a helper for this common pattern:
.. code-block:: python
def f(x: 'x') -> 'y':
return x * x
@startup
def bind_f(x: 'x') -> 'f':
if isinstance(x, Parameter):
x = x.get()
return functools.partial(f, x=x)
It is shortened to:
>>> bind_f = define_binder(f, 'f')
"""
# Since ``startup`` only calls ``bind`` once, it should be fine to
# update ``defaults`` directly.
def bind(**kwargs):
return functools.partial(func, **_prepare(defaults, kwargs))
bind.__name__ = bind.__qualname__ = 'bind_%s' % func.__name__
bind_annotations = get_annotations(func)
bind_annotations.update(annotations or ())
bind_annotations['return'] = func_label
return startup.add_func(bind, bind_annotations)
def define_binder_for(func_label, annotations=None, defaults=None):
"""Return a decorator for ``define_binder``.
Examples:
>>> @define_binder_for('f')
... def f(x: 'x') -> 'y':
... return x * x
"""
def decorate(func):
define_binder(func, func_label, annotations, defaults)
return func
return decorate
def define_maker(func, annotations=None, defaults=None):
"""Define a maker function and add it to ``startup``.
This is slightly more versatile than ``startup.add_func``.
"""
# Since ``startup`` only calls ``make`` once, it should be fine to
# update ``defaults`` directly.
def make(**kwargs):
return func(**_prepare(defaults, kwargs))
make.__name__ = make.__qualname__ = 'make_%s' % func.__name__
make_annotations = get_annotations(func)
make_annotations.update(annotations or ())
return startup.add_func(make, make_annotations)
def depend_parameter_for(label, value):
"""Add a dependency on parameter initialization for ``value``.
You need this when you want to use parameter value during starting
up, where you need to sequence the access to be after parameter
initialization.
"""
startup.add_func(
lambda _: value,
{
'_': parameters.LABELS.parameters,
'return': label,
},
)
return label | py/g1/apps/g1/apps/utils.py |
__all__ = [
'bind_label',
'define_binder',
'define_binder_for',
'define_maker',
'depend_parameter_for',
'get_annotations',
]
import functools
import inspect
from startup import startup
from . import parameters
def get_annotations(func):
signature = inspect.signature(func)
annotations = {
p.name: p.annotation
for p in signature.parameters.values()
if p.annotation is not p.empty
}
if signature.return_annotation is not signature.empty:
annotations['return'] = signature.return_annotation
return annotations
def bind_label(source_label, target_label):
startup.add_func(lambda x: x, {'x': source_label, 'return': target_label})
def _prepare(defaults, kwargs):
# Since ``startup`` only calls functions once, it should be fine to
# update ``defaults`` directly.
if defaults is None:
defaults = {}
defaults.update(kwargs)
for name in defaults:
value = defaults[name]
if isinstance(value, parameters.ParameterBase):
defaults[name] = value.get()
return defaults
def define_binder(func, func_label, annotations=None, defaults=None):
"""Define a binder function and add it to ``startup``.
This is a helper for this common pattern:
.. code-block:: python
def f(x: 'x') -> 'y':
return x * x
@startup
def bind_f(x: 'x') -> 'f':
if isinstance(x, Parameter):
x = x.get()
return functools.partial(f, x=x)
It is shortened to:
>>> bind_f = define_binder(f, 'f')
"""
# Since ``startup`` only calls ``bind`` once, it should be fine to
# update ``defaults`` directly.
def bind(**kwargs):
return functools.partial(func, **_prepare(defaults, kwargs))
bind.__name__ = bind.__qualname__ = 'bind_%s' % func.__name__
bind_annotations = get_annotations(func)
bind_annotations.update(annotations or ())
bind_annotations['return'] = func_label
return startup.add_func(bind, bind_annotations)
def define_binder_for(func_label, annotations=None, defaults=None):
"""Return a decorator for ``define_binder``.
Examples:
>>> @define_binder_for('f')
... def f(x: 'x') -> 'y':
... return x * x
"""
def decorate(func):
define_binder(func, func_label, annotations, defaults)
return func
return decorate
def define_maker(func, annotations=None, defaults=None):
"""Define a maker function and add it to ``startup``.
This is slightly more versatile than ``startup.add_func``.
"""
# Since ``startup`` only calls ``make`` once, it should be fine to
# update ``defaults`` directly.
def make(**kwargs):
return func(**_prepare(defaults, kwargs))
make.__name__ = make.__qualname__ = 'make_%s' % func.__name__
make_annotations = get_annotations(func)
make_annotations.update(annotations or ())
return startup.add_func(make, make_annotations)
def depend_parameter_for(label, value):
"""Add a dependency on parameter initialization for ``value``.
You need this when you want to use parameter value during starting
up, where you need to sequence the access to be after parameter
initialization.
"""
startup.add_func(
lambda _: value,
{
'_': parameters.LABELS.parameters,
'return': label,
},
)
return label | 0.83762 | 0.191479 |
import unittest
from inclusion_analysis.graph import FileGraph
from inclusion_analysis.graph import Graph
from inclusion_analysis.graph import cycle_detect
def list_rotate(l):
item = l.pop()
l.insert(0, item)
def assertSameList(l1, l2):
for i in range(0, len(l1)):
list_rotate(l1)
if l1 == l2:
return True
return False
class GraphTest(unittest.TestCase):
def test_cycle_detect_explicit_cycle(self):
target = Graph()
# 0
target.connect(0, 1)
target.connect(0, 7)
target.connect(0, 4)
# 1
target.connect(1, 7)
target.connect(1, 2)
target.connect(1, 3)
# 2
target.connect(2, 3)
target.connect(2, 5)
# 3
target.connect(4, 7)
target.connect(4, 5)
target.connect(4, 6)
# 5
target.connect(5, 2)
target.connect(5, 6)
# 6
target.connect(7, 2)
target.connect(7, 5)
# here is cycle forms
target.connect(3, 7)
cycles = cycle_detect(target, 0)
expected = [3, 2, 7]
self.assertEqual(len(cycles), 1)
assertSameList(cycles[0], expected)
class FileGraphTest(unittest.TestCase):
def test_cycle_detect_explicit_cycle(self):
target = FileGraph()
target.connect("header0", "header1")
target.connect("header1", "header2")
target.connect("header2", "header0")
target.connect("header2", "header3")
cycles = target.cycle_detect("header0")
expected = ["header2", "header1", "header0"]
self.assertEqual(len(cycles), 1)
assertSameList(cycles[0], expected)
"""
def test_reverse(self):
target = Graph()
target.connect("header0", "header1")
target.connect("header1", "header2")
target.connect("header2", "header0")
reversed_graph = target.reversed()
reversed_graph.is_adjacent("header1", "header0")
reversed_graph.is_adjacent("header2", "header1")
reversed_graph.is_adjacent("header0", "header2")
def test_strong_connection(self):
target = Graph()
target.connect(0, 1)
target.connect(0, 5)
target.connect(2, 0)
target.connect(2, 3)
target.connect(3, 2)
target.connect(3, 5)
target.connect(4, 2)
target.connect(4, 3)
target.connect(5, 4)
target.connect(6, 8)
target.connect(6, 7)
target.connect(6, 0)
target.connect(6, 4)
target.connect(8, 6)
target.connect(7, 6)
target.connect(7, 9)
target.connect(9, 10)
target.connect(9, 11)
target.connect(10, 12)
target.connect(11, 4)
target.connect(11, 12)
target.connect(12, 9)
groups = target.strong_connection()
expected = [[1], [0, 2, 3, 4, 5], [6, 8], [7], [9, 10, 11, 12]]
def list_comparator(list1, list2):
return reduce(lambda v1, v2: v1 or v2, map(lambda v: v in list2, list1))
for expected_group in expected:
if any(map(built_group)):
"""
if __name__ == '__main__':
unittest.main() | tests/graph_test.py | import unittest
from inclusion_analysis.graph import FileGraph
from inclusion_analysis.graph import Graph
from inclusion_analysis.graph import cycle_detect
def list_rotate(l):
item = l.pop()
l.insert(0, item)
def assertSameList(l1, l2):
for i in range(0, len(l1)):
list_rotate(l1)
if l1 == l2:
return True
return False
class GraphTest(unittest.TestCase):
def test_cycle_detect_explicit_cycle(self):
target = Graph()
# 0
target.connect(0, 1)
target.connect(0, 7)
target.connect(0, 4)
# 1
target.connect(1, 7)
target.connect(1, 2)
target.connect(1, 3)
# 2
target.connect(2, 3)
target.connect(2, 5)
# 3
target.connect(4, 7)
target.connect(4, 5)
target.connect(4, 6)
# 5
target.connect(5, 2)
target.connect(5, 6)
# 6
target.connect(7, 2)
target.connect(7, 5)
# here is cycle forms
target.connect(3, 7)
cycles = cycle_detect(target, 0)
expected = [3, 2, 7]
self.assertEqual(len(cycles), 1)
assertSameList(cycles[0], expected)
class FileGraphTest(unittest.TestCase):
def test_cycle_detect_explicit_cycle(self):
target = FileGraph()
target.connect("header0", "header1")
target.connect("header1", "header2")
target.connect("header2", "header0")
target.connect("header2", "header3")
cycles = target.cycle_detect("header0")
expected = ["header2", "header1", "header0"]
self.assertEqual(len(cycles), 1)
assertSameList(cycles[0], expected)
"""
def test_reverse(self):
target = Graph()
target.connect("header0", "header1")
target.connect("header1", "header2")
target.connect("header2", "header0")
reversed_graph = target.reversed()
reversed_graph.is_adjacent("header1", "header0")
reversed_graph.is_adjacent("header2", "header1")
reversed_graph.is_adjacent("header0", "header2")
def test_strong_connection(self):
target = Graph()
target.connect(0, 1)
target.connect(0, 5)
target.connect(2, 0)
target.connect(2, 3)
target.connect(3, 2)
target.connect(3, 5)
target.connect(4, 2)
target.connect(4, 3)
target.connect(5, 4)
target.connect(6, 8)
target.connect(6, 7)
target.connect(6, 0)
target.connect(6, 4)
target.connect(8, 6)
target.connect(7, 6)
target.connect(7, 9)
target.connect(9, 10)
target.connect(9, 11)
target.connect(10, 12)
target.connect(11, 4)
target.connect(11, 12)
target.connect(12, 9)
groups = target.strong_connection()
expected = [[1], [0, 2, 3, 4, 5], [6, 8], [7], [9, 10, 11, 12]]
def list_comparator(list1, list2):
return reduce(lambda v1, v2: v1 or v2, map(lambda v: v in list2, list1))
for expected_group in expected:
if any(map(built_group)):
"""
if __name__ == '__main__':
unittest.main() | 0.292696 | 0.450662 |
import os
import re
import sys
import subprocess
import json
from collections import namedtuple, OrderedDict
import requests
from requests.auth import HTTPBasicAuth
def _str(s):
if sys.version_info >= (3, 0):
return s.decode('utf-8')
return s
os.makedirs(".git/changes", exist_ok=True)
if len(sys.argv) > 1:
since = sys.argv[1]
else:
tag_rev = _str(subprocess.check_output(
['git', 'rev-list', '--tags', '--max-count=1']).strip())
since = _str(subprocess.check_output(
['git', 'describe', '--tags', tag_rev]).strip())
logs = _str(subprocess.check_output(
['git', 'log', '--reverse', '--merges', '--first-parent', '--pretty=tformat:%s', '{}...HEAD'.format(since)]))
PR_NUMBER_RE = re.compile(r'\s*Merge pull request #(\d+) from')
PR_TITLE_RE = re.compile(
r'(?:\[[^]]+\]\s*)*(?:(\w+)(?:\(([^\)]+)\))?:\s*)?(.*)')
Change = namedtuple('Change', ['scope', 'module', 'title', 'text'])
changes = OrderedDict()
for scope in ['feat', 'fix']:
changes[scope] = []
SCOPE_MAPPING = {
'bug': 'fix',
'bugs': 'fix',
'chore': False,
'docs': False,
'feature': 'feat',
'perf': 'refactor',
'test': False,
}
SCOPE_TITLE = {
'feat': 'Features',
'fix': 'Bug Fixes',
'refactor': 'Improvements',
}
auth = HTTPBasicAuth('', os.environ['GITHUB_ACCESS_TOKEN'])
for line in logs.splitlines():
pr_number_match = PR_NUMBER_RE.match(line)
if pr_number_match:
pr_number = pr_number_match.group(1)
cache_file = ".git/changes/{}.json".format(pr_number)
if os.path.exists(cache_file):
print("read pr #" + pr_number, file=sys.stderr)
with open(cache_file) as fd:
pr = json.load(fd)
else:
print("get pr #" + pr_number, file=sys.stderr)
pr = requests.get('https://api.github.com/repos/nervosnetwork/ckb/pulls/' +
pr_number, auth=auth).json()
if 'message' in pr:
print(pr['message'], file=sys.stderr)
sys.exit(1)
with open(cache_file, 'w') as fd:
json.dump(pr, fd)
scope, module, message = PR_TITLE_RE.match(pr['title']).groups()
if not scope:
scope = 'misc'
scope = SCOPE_MAPPING.get(scope, scope)
if not scope:
continue
user = pr['user']['login']
message = message.strip()
message = message[0].upper() + message[1:]
if module:
title = '* #{0} **{1}:** {2} (@{3})'.format(pr_number,
module, message, user)
else:
title = '* #{0}: {1} (@{2})'.format(pr_number, message, user)
change = Change(scope, module, title, [])
Change = namedtuple('Change', ['scope', 'module', 'title', 'text'])
if scope not in changes:
changes[scope] = []
body = pr['body'] or ""
labels = [label['name'] for label in pr['labels']]
is_breaking = "breaking change" in labels or any(
l.startswith('b:') for l in labels)
if is_breaking:
breaking_banner = ", ".join(
l for l in labels if l.startswith('b:'))
if breaking_banner != "" or "breaking change" not in body.lower():
if breaking_banner == "":
breaking_banner = "This is a breaking change"
else:
breaking_banner = "This is a breaking change: " + breaking_banner
if body == "":
body = breaking_banner
elif breaking_banner != "":
body = breaking_banner + "\n\n" + body
changes[scope].append(Change(scope, module, title, body))
if os.path.exists(".git/changes/extra.json"):
with open(".git/changes/extra.json") as fin:
extra = json.load(fin)
for (scope, extra_changes) in extra.items():
if scope not in changes:
changes[scope] = []
for change in extra_changes:
changes[scope].append(
Change(scope, change.get('module'), change['title'], change.get('text', '')))
out = open(".git/changes/out.md", "w")
for scope, changes in changes.items():
if len(changes) == 0:
continue
scope_title = SCOPE_TITLE.get(scope, scope.title())
print('### {}'.format(scope_title), file=out)
print('', file=out)
for change in changes:
print(change.title, file=out)
if change.text != '':
print('', file=out)
for line in change.text.splitlines():
print(' ' + line, file=out)
print('', file=out)
print('', file=out) | devtools/git/github-changelog.py |
import os
import re
import sys
import subprocess
import json
from collections import namedtuple, OrderedDict
import requests
from requests.auth import HTTPBasicAuth
def _str(s):
if sys.version_info >= (3, 0):
return s.decode('utf-8')
return s
os.makedirs(".git/changes", exist_ok=True)
if len(sys.argv) > 1:
since = sys.argv[1]
else:
tag_rev = _str(subprocess.check_output(
['git', 'rev-list', '--tags', '--max-count=1']).strip())
since = _str(subprocess.check_output(
['git', 'describe', '--tags', tag_rev]).strip())
logs = _str(subprocess.check_output(
['git', 'log', '--reverse', '--merges', '--first-parent', '--pretty=tformat:%s', '{}...HEAD'.format(since)]))
PR_NUMBER_RE = re.compile(r'\s*Merge pull request #(\d+) from')
PR_TITLE_RE = re.compile(
r'(?:\[[^]]+\]\s*)*(?:(\w+)(?:\(([^\)]+)\))?:\s*)?(.*)')
Change = namedtuple('Change', ['scope', 'module', 'title', 'text'])
changes = OrderedDict()
for scope in ['feat', 'fix']:
changes[scope] = []
SCOPE_MAPPING = {
'bug': 'fix',
'bugs': 'fix',
'chore': False,
'docs': False,
'feature': 'feat',
'perf': 'refactor',
'test': False,
}
SCOPE_TITLE = {
'feat': 'Features',
'fix': 'Bug Fixes',
'refactor': 'Improvements',
}
auth = HTTPBasicAuth('', os.environ['GITHUB_ACCESS_TOKEN'])
for line in logs.splitlines():
pr_number_match = PR_NUMBER_RE.match(line)
if pr_number_match:
pr_number = pr_number_match.group(1)
cache_file = ".git/changes/{}.json".format(pr_number)
if os.path.exists(cache_file):
print("read pr #" + pr_number, file=sys.stderr)
with open(cache_file) as fd:
pr = json.load(fd)
else:
print("get pr #" + pr_number, file=sys.stderr)
pr = requests.get('https://api.github.com/repos/nervosnetwork/ckb/pulls/' +
pr_number, auth=auth).json()
if 'message' in pr:
print(pr['message'], file=sys.stderr)
sys.exit(1)
with open(cache_file, 'w') as fd:
json.dump(pr, fd)
scope, module, message = PR_TITLE_RE.match(pr['title']).groups()
if not scope:
scope = 'misc'
scope = SCOPE_MAPPING.get(scope, scope)
if not scope:
continue
user = pr['user']['login']
message = message.strip()
message = message[0].upper() + message[1:]
if module:
title = '* #{0} **{1}:** {2} (@{3})'.format(pr_number,
module, message, user)
else:
title = '* #{0}: {1} (@{2})'.format(pr_number, message, user)
change = Change(scope, module, title, [])
Change = namedtuple('Change', ['scope', 'module', 'title', 'text'])
if scope not in changes:
changes[scope] = []
body = pr['body'] or ""
labels = [label['name'] for label in pr['labels']]
is_breaking = "breaking change" in labels or any(
l.startswith('b:') for l in labels)
if is_breaking:
breaking_banner = ", ".join(
l for l in labels if l.startswith('b:'))
if breaking_banner != "" or "breaking change" not in body.lower():
if breaking_banner == "":
breaking_banner = "This is a breaking change"
else:
breaking_banner = "This is a breaking change: " + breaking_banner
if body == "":
body = breaking_banner
elif breaking_banner != "":
body = breaking_banner + "\n\n" + body
changes[scope].append(Change(scope, module, title, body))
if os.path.exists(".git/changes/extra.json"):
with open(".git/changes/extra.json") as fin:
extra = json.load(fin)
for (scope, extra_changes) in extra.items():
if scope not in changes:
changes[scope] = []
for change in extra_changes:
changes[scope].append(
Change(scope, change.get('module'), change['title'], change.get('text', '')))
out = open(".git/changes/out.md", "w")
for scope, changes in changes.items():
if len(changes) == 0:
continue
scope_title = SCOPE_TITLE.get(scope, scope.title())
print('### {}'.format(scope_title), file=out)
print('', file=out)
for change in changes:
print(change.title, file=out)
if change.text != '':
print('', file=out)
for line in change.text.splitlines():
print(' ' + line, file=out)
print('', file=out)
print('', file=out) | 0.187542 | 0.092074 |
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
class TestMechanismDriver(api.MechanismDriver):
"""Test mechanism driver for testing mechanism driver api."""
def initialize(self):
pass
def _check_network_context(self, context, original_expected):
assert(isinstance(context, driver_context.NetworkContext))
assert(context.current())
if original_expected:
assert(context.original())
else:
assert(not context.original())
assert(context.network_segments())
def create_network_precommit(self, context):
self._check_network_context(context, False)
def create_network_postcommit(self, context):
self._check_network_context(context, False)
def update_network_precommit(self, context):
self._check_network_context(context, True)
def update_network_postcommit(self, context):
self._check_network_context(context, True)
def delete_network_precommit(self, context):
self._check_network_context(context, False)
def delete_network_postcommit(self, context):
self._check_network_context(context, False)
def _check_port_context(self, context, original_expected):
assert(isinstance(context, driver_context.PortContext))
assert(context.current())
if original_expected:
assert(context.original())
else:
assert(not context.original())
network_context = context.network()
assert(network_context)
self._check_network_context(network_context, False)
def create_port_precommit(self, context):
self._check_port_context(context, False)
def create_port_postcommit(self, context):
self._check_port_context(context, False)
def update_port_precommit(self, context):
self._check_port_context(context, True)
def update_port_postcommit(self, context):
self._check_port_context(context, True)
def delete_port_precommit(self, context):
self._check_port_context(context, False)
def delete_port_postcommit(self, context):
self._check_port_context(context, False) | neutron/tests/unit/ml2/drivers/mechanism_test.py |
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
class TestMechanismDriver(api.MechanismDriver):
"""Test mechanism driver for testing mechanism driver api."""
def initialize(self):
pass
def _check_network_context(self, context, original_expected):
assert(isinstance(context, driver_context.NetworkContext))
assert(context.current())
if original_expected:
assert(context.original())
else:
assert(not context.original())
assert(context.network_segments())
def create_network_precommit(self, context):
self._check_network_context(context, False)
def create_network_postcommit(self, context):
self._check_network_context(context, False)
def update_network_precommit(self, context):
self._check_network_context(context, True)
def update_network_postcommit(self, context):
self._check_network_context(context, True)
def delete_network_precommit(self, context):
self._check_network_context(context, False)
def delete_network_postcommit(self, context):
self._check_network_context(context, False)
def _check_port_context(self, context, original_expected):
assert(isinstance(context, driver_context.PortContext))
assert(context.current())
if original_expected:
assert(context.original())
else:
assert(not context.original())
network_context = context.network()
assert(network_context)
self._check_network_context(network_context, False)
def create_port_precommit(self, context):
self._check_port_context(context, False)
def create_port_postcommit(self, context):
self._check_port_context(context, False)
def update_port_precommit(self, context):
self._check_port_context(context, True)
def update_port_postcommit(self, context):
self._check_port_context(context, True)
def delete_port_precommit(self, context):
self._check_port_context(context, False)
def delete_port_postcommit(self, context):
self._check_port_context(context, False) | 0.754734 | 0.327064 |
from coppertop.core import PP, Missing
from coppertop.pipe import *
from bones.core.types import N, num, aliased, anon, pytuple, pylist, tv, named
from bones.metatypes import BTAtom, cacheAndUpdate, fitsWithin as _fitsWithin
from coppertop.std import tvarray, check, equal
matrix = N**N**num
colvec = matrix['colvec']
rowvec = matrix['rowvec']
square = BTAtom.ensure('square')
colmaj = BTAtom.ensure('colmaj').setImplicit
rowmaj = BTAtom.ensure('rowmaj').setOrthogonal
lol = BTAtom.ensure('lol').setConstructor(tv)
_matrix = matrix & tvarray
_colvec = _matrix & colvec
_rowvec = _matrix & rowvec
_llmatrix = (matrix & lol).setConstructor(tv) # lol is a tvlist of tvlist, i.e. ragged array that we are making regular
_llcolvec = _llmatrix & colvec
_llrowvec = _llmatrix & rowvec
_lmatrix = matrix & pylist # store the matrix in a linear fashion starting with n, m (colmaj and possibly transposed are in type)
_lcolvec = matrix & pylist
_lrowvec = matrix & pylist
# square and transposed is more of a dynamic / contingent type than a static type on the value but is static in
# terms of function sig and binding and sometimes static in terms of dispatch
@coppertop(style=unary1)
def T(A: _matrix) -> _matrix:
return A.T
@coppertop(style=unary1)
def T(A: _llmatrix & aliased & colmaj) -> _llmatrix & anon & colmaj:
answer = lol(A >> shape, colmaj)
for i, col in enumerate(A):
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & anon & colmaj) -> _llmatrix & anon & colmaj:
sh = A >> shape
if sh[0] == sh[1]:
A | +square >> T # dispatch to T(A:_lmatrix & anon & colmaj & square)
else:
A | -anon >> T # dispatch to T(A:_lmatrix & aliased & colmaj)
@coppertop(style=unary1)
def T(A: _llmatrix & anon & colmaj & square) -> _llmatrix & anon & colmaj & square:
answer = lol(A >> shape, colmaj)
for i, col in enumerate(A):
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & aliased & rowmaj) -> _llmatrix & anon & rowmaj:
answer = [[]]
for i in x:
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & anon & rowmaj) -> _llmatrix & anon & rowmaj:
for i in x:
A
return A
@coppertop(style=unary1)
def T(A: _llmatrix & named & colmaj) -> _llmatrix & anon & colmaj:
old = A._v
new = [[] for r in old[0]]
for i, col in enumerate(old):
for j, e in enumerate(col):
new[j].append(e)
return tv(_llmatrix & anon, new)
@coppertop(style=unary1)
def shape(A: _lmatrix) -> pytuple:
return tuple(A[0:2])
@coppertop(style=unary1)
def shape(A: _llmatrix) -> pytuple:
lol = A._v
return (len(lol[0]) if len(lol) > 0 else 0, len(lol))
def main():
A = _llmatrix([[1, 2, 3], [3, 4, 5]]) | +named
A >> shape >> PP
A >> T >> PP >> shape >> check >> equal >> (2, 3)
# facts
# B: A causes both A and B to change type to aliased
# A's type is now aliased+named+anon as before it was named or anon
# AST Node results can be anon but A (upon assignment can only be named)
# any value addressable in a container is "named", e.g. A: (1,2,3) - each element is "named"
# can tranpose as a type be static? no - consider rand(1,6) timesRepeat: [A: A T]?
if __name__ == '__main__':
main()
print('pass') | src/std/coppertop/std/examples/linalg.py |
from coppertop.core import PP, Missing
from coppertop.pipe import *
from bones.core.types import N, num, aliased, anon, pytuple, pylist, tv, named
from bones.metatypes import BTAtom, cacheAndUpdate, fitsWithin as _fitsWithin
from coppertop.std import tvarray, check, equal
matrix = N**N**num
colvec = matrix['colvec']
rowvec = matrix['rowvec']
square = BTAtom.ensure('square')
colmaj = BTAtom.ensure('colmaj').setImplicit
rowmaj = BTAtom.ensure('rowmaj').setOrthogonal
lol = BTAtom.ensure('lol').setConstructor(tv)
_matrix = matrix & tvarray
_colvec = _matrix & colvec
_rowvec = _matrix & rowvec
_llmatrix = (matrix & lol).setConstructor(tv) # lol is a tvlist of tvlist, i.e. ragged array that we are making regular
_llcolvec = _llmatrix & colvec
_llrowvec = _llmatrix & rowvec
_lmatrix = matrix & pylist # store the matrix in a linear fashion starting with n, m (colmaj and possibly transposed are in type)
_lcolvec = matrix & pylist
_lrowvec = matrix & pylist
# square and transposed is more of a dynamic / contingent type than a static type on the value but is static in
# terms of function sig and binding and sometimes static in terms of dispatch
@coppertop(style=unary1)
def T(A: _matrix) -> _matrix:
return A.T
@coppertop(style=unary1)
def T(A: _llmatrix & aliased & colmaj) -> _llmatrix & anon & colmaj:
answer = lol(A >> shape, colmaj)
for i, col in enumerate(A):
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & anon & colmaj) -> _llmatrix & anon & colmaj:
sh = A >> shape
if sh[0] == sh[1]:
A | +square >> T # dispatch to T(A:_lmatrix & anon & colmaj & square)
else:
A | -anon >> T # dispatch to T(A:_lmatrix & aliased & colmaj)
@coppertop(style=unary1)
def T(A: _llmatrix & anon & colmaj & square) -> _llmatrix & anon & colmaj & square:
answer = lol(A >> shape, colmaj)
for i, col in enumerate(A):
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & aliased & rowmaj) -> _llmatrix & anon & rowmaj:
answer = [[]]
for i in x:
answer
return answer
@coppertop(style=unary1)
def T(A: _llmatrix & anon & rowmaj) -> _llmatrix & anon & rowmaj:
for i in x:
A
return A
@coppertop(style=unary1)
def T(A: _llmatrix & named & colmaj) -> _llmatrix & anon & colmaj:
old = A._v
new = [[] for r in old[0]]
for i, col in enumerate(old):
for j, e in enumerate(col):
new[j].append(e)
return tv(_llmatrix & anon, new)
@coppertop(style=unary1)
def shape(A: _lmatrix) -> pytuple:
return tuple(A[0:2])
@coppertop(style=unary1)
def shape(A: _llmatrix) -> pytuple:
lol = A._v
return (len(lol[0]) if len(lol) > 0 else 0, len(lol))
def main():
A = _llmatrix([[1, 2, 3], [3, 4, 5]]) | +named
A >> shape >> PP
A >> T >> PP >> shape >> check >> equal >> (2, 3)
# facts
# B: A causes both A and B to change type to aliased
# A's type is now aliased+named+anon as before it was named or anon
# AST Node results can be anon but A (upon assignment can only be named)
# any value addressable in a container is "named", e.g. A: (1,2,3) - each element is "named"
# can tranpose as a type be static? no - consider rand(1,6) timesRepeat: [A: A T]?
if __name__ == '__main__':
main()
print('pass') | 0.380068 | 0.538983 |
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
import numpy as np
import collections
import datetime
import os
import sys
import random
import csv
import config
import logging
from DNN import flags
FLAGS, unparsed = flags.parse_args()
def en_dummy(data, cate_vn_list):
# one_hot 编码
for feature in cate_vn_list:
le = LabelEncoder()
data[feature] = le.fit_transform(data[feature])
max_ = data[feature].max()
data[feature] = (data[feature] - max_) * (-1)
data = data.loc[:, cate_vn_list]
en_d = preprocessing.OneHotEncoder()
en_d.fit(data)
data = en_d.transform(data).toarray()
result = pd.DataFrame(data)
return result
def one_hot_representation(sample, fields_dict, array_length):
"""
One hot presentation for every sample data
:param fields_dict: fields value to array index
:param sample: sample data, type of pd.series
:param array_length: length of one-hot representation
:return: one-hot representation, type of np.array
"""
array = np.zeros([array_length])
for field in fields_dict:
# get index of array
if field == 'hour':
field_value = int(str(sample[field])[-2:])
else:
field_value = sample[field]
ind = fields_dict[field][field_value]
array[ind] = 1
return array
NR_BINS = 1000000
def hashstr(input):
'''
对特征hash
'''
return str(int(hashlib.md5(input.encode('utf8')).hexdigest(), 16) % (NR_BINS - 1) + 1)
def gen_folder(dnsp_data_dir, fe_gen_dir, model_data_dir):
'''
构建文件夹
'''
if not os.path.exists(dnsp_data_dir):
os.makedirs(dnsp_data_dir)
if not os.path.exists(fe_gen_dir):
os.makedirs(fe_gen_dir)
if not os.path.exists(model_data_dir):
os.makedirs(model_data_dir)
def def_user(row):
'''
定位用户,作为用户id
'''
if row['device_id'] == 'a99f214a':
user = 'ip-' + row['device_ip'] + '-' + row['device_model']
else:
user = 'id-' + row['device_id']
return user
def gen_userid(data):
user = np.where(data['device_id'] == 'a99f214a', 'ip-' + data['device_ip']
+ '-' + data['device_model'], 'id-' + data['device_id'])
return user
def is_app(site_id):
'''
判断用户访问方式,是否为app
'''
return True if site_id == '85f751fd' else False
def to_weekday(date):
'''
判断日期为星期几
'''
week = datetime.datetime.strptime(
str(date // 100), '%y%m%d').strftime('%a')
return week
def gen_continous_clip(tr_path,filename):
tr_data = pd.read_csv(os.path.join(tr_path, filename))
continous_clip = []
for i in range(1,7):
perc = np.percentile(tr_data.iloc[:,i],95)
continous_clip.append(perc)
del tr_data
return continous_clip
def down_sampling(tr_path, label, outpath):
'''
数据下采样
'''
tr_data = pd.read_csv(os.path.join(tr_path, 'train.csv'))
print('train data is loaded,down_sampling start')
temp_0 = tr_data[label] == 0
data_0 = tr_data[temp_0]
temp_1 = tr_data[label] == 1
data_1 = tr_data[temp_1]
sampler = np.random.permutation(data_0.shape[0])[:data_1.shape[0]]
data_0_ed = data_0.iloc[sampler, :]
data_downsampled = pd.concat([data_1, data_0_ed], ignore_index=True)
del tr_data
del data_0
del data_1
del data_0_ed
sampler = np.random.permutation(len(data_downsampled))
data_downsampled=data_downsampled.take(sampler)
data_downsampled.to_csv(os.path.join(
outpath, 'train.csv'), index=None)
print('train data is loaded,down_sampling end')
del data_downsampled
def up_sampling(tr_path, label, outpath):
'''
数据上采样,只取最后最近三天数据
'''
tr_data = pd.read_csv(os.path.join(tr_path, 'train.csv'))
print('train data is loaded,up_sampling start')
tr_data = tr_data.loc[tr_data['hour'] >= 14102800, :]
temp_0 = tr_data[label] == 0
data_0 = tr_data[temp_0]
temp_1 = tr_data[label] == 1
data_1 = tr_data[temp_1]
del tr_data
sampler = np.random.randint(data_1.shape[0], size=data_0.shape[0])
data_1_ed = data_1.iloc[sampler, :]
data_upsampled = pd.concat([data_1_ed, data_0], ignore_index=True)
del data_0
del data_1
del data_1_ed
data_upsampled.to_csv(os.path.join(
outpath, 'train.csv'), index=None)
print('train data is loaded,up_sampling end')
del data_upsampled
def scan(path, is_trian):
'''
统计设备id,设备ip,用户,用户-时间的频数,各设备id的点击率,各设备ip的点击率
'''
id_cnt = collections.defaultdict(int)
id_cnt_1 = collections.defaultdict(int)
ip_cnt = collections.defaultdict(int)
ip_cnt_1 = collections.defaultdict(int)
user_cnt = collections.defaultdict(int)
user_hour_cnt = collections.defaultdict(int)
file = open(path)
for i, row in enumerate(csv.DictReader(file), start=1):
# print(row)
user = def_user(row)
id_cnt[row['device_id']] += 1 # 统计device_id各特征值计数,反映该设备浏览广告数目
ip_cnt[row['device_ip']] += 1 # 统计device_ip各特征值计数,反映该ip浏览广告数目
if is_trian:
id_cnt_1[row['device_id']] += int(row['click'])
ip_cnt_1[row['device_ip']] += int(row['click'])
user_cnt[user] += 1 # 用户计数,各浏览者浏览广告数目,反映具体人广告推送的情况
user_hour_cnt[user + '-' + row['hour']] += 1 # 组合具体人与时间,反映具体人的活动时间分布
file.close()
return id_cnt, ip_cnt, user_cnt, user_hour_cnt,id_cnt_1, ip_cnt_1
# 数据标准化
def standard(data):
encoder = StandardScaler()
encoderdata = encoder.fit_transform(data)
return encoderdata
# 数据拆分,默认数据最后一列数据为label
def splitfealabdata(data,flag='train'):
feature_data = data[:,0:-1]
if flag == 'train'or flag == 'valid':
label_data = data[:,-1]
return feature_data, label_data
elif flag == 'test':
return feature_data
else:
logging.error('arguments of function splitfealabdata must be train,test or valid')
sys.exit()
# 连续型数据和类别型数据拆分,默认数据最后一列数据为label
def splitdata(data,flag='train',index_begin=4,index_end=30):
encod_cat_index_begin = index_begin
encod_cat_index_end = index_end
continous_data = data[:, 0:encod_cat_index_begin]
categorial_data = data[:,encod_cat_index_begin:encod_cat_index_end]
if flag == 'train'or flag == 'valid':
label_data = data[:,-1]
return continous_data,categorial_data, label_data
elif flag == 'test':
return continous_data,categorial_data
else:
logging.error('arguments of function splitdata must be train,test or valid')
sys.exit()
# 根据batchsize获取数据
def genbatch(feature_data,label_data=None,batch_size=200):
for start in range(0, len(feature_data), batch_size):
end = min(start + batch_size, len(feature_data))
if label_data is None:
yield feature_data[start:end]
else:
yield feature_data[start:end], label_data[start:end]
# 获取特征工程处理后的数据
def gendata(flag='train',train_path='output/model_data/train.txt',vaild_path='output/model_data/valid.txt',test_path='output/model_data/test.txt'):
encod_train_path = train_path
encod_vaild_path = vaild_path
encod_test_path = test_path
if flag == 'train':
train_data = np.loadtxt(encod_train_path,delimiter=',')
# 数据拆分
train_continous_data, train_categorial_data, train_data_label = splitdata(train_data,index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
train_continous_standard_data = standard(train_continous_data)
train_feature_data = np.concatenate([train_continous_standard_data,train_categorial_data],axis=1)
return train_feature_data, train_data_label
elif flag == 'valid':
valid_data = np.loadtxt(encod_vaild_path,delimiter=',')
valid_continous_data, valid_categorial_data, valid_data_label = splitdata(valid_data, flag='valid',index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
valid_continous_standard_data = standard(valid_continous_data)
valid_feature_data = np.concatenate([valid_continous_standard_data, valid_categorial_data], axis=1)
return valid_feature_data, valid_data_label
elif flag == 'test':
test_data = np.loadtxt(encod_test_path,delimiter=',')
test_continous_data, test_categorial_data = splitdata(test_data, flag='test',index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
test_continous_standard_data = standard(test_continous_data)
test_feature_data = np.concatenate([test_continous_standard_data, test_categorial_data], axis=1)
return test_feature_data
else:
logging.error('arguments of function gendata must be train,test or valid')
sys.exit()
# 获取FFM模型的数据
def genffm(flag='train',train_path='output/model_data/train_pred.txt',vaild_path='output/model_data/vaild_pred.txt',test_path='output/model_data/test_pred.txt'):
if flag == 'train':
ffm_path = train_path
elif flag == 'valid':
ffm_path = vaild_path
elif flag == 'test':
ffm_path = test_path
else:
logging.error('arguments of function gendata must be train,test or valid')
sys.exit()
#filename_queue = tf.train.string_input_producer([ffm_path])
#reader = tf.FixedLengthRecordReader(record_bytes=1)
#ffm_data = reader.read(filename_queue)
#ffm_data = np.fromfile(ffm_path, dtype=np.int32)
ffm_data = np.loadtxt(ffm_path)
return ffm_data[:,np.newaxis] | utils.py | import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
import numpy as np
import collections
import datetime
import os
import sys
import random
import csv
import config
import logging
from DNN import flags
FLAGS, unparsed = flags.parse_args()
def en_dummy(data, cate_vn_list):
# one_hot 编码
for feature in cate_vn_list:
le = LabelEncoder()
data[feature] = le.fit_transform(data[feature])
max_ = data[feature].max()
data[feature] = (data[feature] - max_) * (-1)
data = data.loc[:, cate_vn_list]
en_d = preprocessing.OneHotEncoder()
en_d.fit(data)
data = en_d.transform(data).toarray()
result = pd.DataFrame(data)
return result
def one_hot_representation(sample, fields_dict, array_length):
"""
One hot presentation for every sample data
:param fields_dict: fields value to array index
:param sample: sample data, type of pd.series
:param array_length: length of one-hot representation
:return: one-hot representation, type of np.array
"""
array = np.zeros([array_length])
for field in fields_dict:
# get index of array
if field == 'hour':
field_value = int(str(sample[field])[-2:])
else:
field_value = sample[field]
ind = fields_dict[field][field_value]
array[ind] = 1
return array
NR_BINS = 1000000
def hashstr(input):
'''
对特征hash
'''
return str(int(hashlib.md5(input.encode('utf8')).hexdigest(), 16) % (NR_BINS - 1) + 1)
def gen_folder(dnsp_data_dir, fe_gen_dir, model_data_dir):
'''
构建文件夹
'''
if not os.path.exists(dnsp_data_dir):
os.makedirs(dnsp_data_dir)
if not os.path.exists(fe_gen_dir):
os.makedirs(fe_gen_dir)
if not os.path.exists(model_data_dir):
os.makedirs(model_data_dir)
def def_user(row):
'''
定位用户,作为用户id
'''
if row['device_id'] == 'a99f214a':
user = 'ip-' + row['device_ip'] + '-' + row['device_model']
else:
user = 'id-' + row['device_id']
return user
def gen_userid(data):
user = np.where(data['device_id'] == 'a99f214a', 'ip-' + data['device_ip']
+ '-' + data['device_model'], 'id-' + data['device_id'])
return user
def is_app(site_id):
'''
判断用户访问方式,是否为app
'''
return True if site_id == '85f751fd' else False
def to_weekday(date):
'''
判断日期为星期几
'''
week = datetime.datetime.strptime(
str(date // 100), '%y%m%d').strftime('%a')
return week
def gen_continous_clip(tr_path,filename):
tr_data = pd.read_csv(os.path.join(tr_path, filename))
continous_clip = []
for i in range(1,7):
perc = np.percentile(tr_data.iloc[:,i],95)
continous_clip.append(perc)
del tr_data
return continous_clip
def down_sampling(tr_path, label, outpath):
'''
数据下采样
'''
tr_data = pd.read_csv(os.path.join(tr_path, 'train.csv'))
print('train data is loaded,down_sampling start')
temp_0 = tr_data[label] == 0
data_0 = tr_data[temp_0]
temp_1 = tr_data[label] == 1
data_1 = tr_data[temp_1]
sampler = np.random.permutation(data_0.shape[0])[:data_1.shape[0]]
data_0_ed = data_0.iloc[sampler, :]
data_downsampled = pd.concat([data_1, data_0_ed], ignore_index=True)
del tr_data
del data_0
del data_1
del data_0_ed
sampler = np.random.permutation(len(data_downsampled))
data_downsampled=data_downsampled.take(sampler)
data_downsampled.to_csv(os.path.join(
outpath, 'train.csv'), index=None)
print('train data is loaded,down_sampling end')
del data_downsampled
def up_sampling(tr_path, label, outpath):
'''
数据上采样,只取最后最近三天数据
'''
tr_data = pd.read_csv(os.path.join(tr_path, 'train.csv'))
print('train data is loaded,up_sampling start')
tr_data = tr_data.loc[tr_data['hour'] >= 14102800, :]
temp_0 = tr_data[label] == 0
data_0 = tr_data[temp_0]
temp_1 = tr_data[label] == 1
data_1 = tr_data[temp_1]
del tr_data
sampler = np.random.randint(data_1.shape[0], size=data_0.shape[0])
data_1_ed = data_1.iloc[sampler, :]
data_upsampled = pd.concat([data_1_ed, data_0], ignore_index=True)
del data_0
del data_1
del data_1_ed
data_upsampled.to_csv(os.path.join(
outpath, 'train.csv'), index=None)
print('train data is loaded,up_sampling end')
del data_upsampled
def scan(path, is_trian):
'''
统计设备id,设备ip,用户,用户-时间的频数,各设备id的点击率,各设备ip的点击率
'''
id_cnt = collections.defaultdict(int)
id_cnt_1 = collections.defaultdict(int)
ip_cnt = collections.defaultdict(int)
ip_cnt_1 = collections.defaultdict(int)
user_cnt = collections.defaultdict(int)
user_hour_cnt = collections.defaultdict(int)
file = open(path)
for i, row in enumerate(csv.DictReader(file), start=1):
# print(row)
user = def_user(row)
id_cnt[row['device_id']] += 1 # 统计device_id各特征值计数,反映该设备浏览广告数目
ip_cnt[row['device_ip']] += 1 # 统计device_ip各特征值计数,反映该ip浏览广告数目
if is_trian:
id_cnt_1[row['device_id']] += int(row['click'])
ip_cnt_1[row['device_ip']] += int(row['click'])
user_cnt[user] += 1 # 用户计数,各浏览者浏览广告数目,反映具体人广告推送的情况
user_hour_cnt[user + '-' + row['hour']] += 1 # 组合具体人与时间,反映具体人的活动时间分布
file.close()
return id_cnt, ip_cnt, user_cnt, user_hour_cnt,id_cnt_1, ip_cnt_1
# 数据标准化
def standard(data):
encoder = StandardScaler()
encoderdata = encoder.fit_transform(data)
return encoderdata
# 数据拆分,默认数据最后一列数据为label
def splitfealabdata(data,flag='train'):
feature_data = data[:,0:-1]
if flag == 'train'or flag == 'valid':
label_data = data[:,-1]
return feature_data, label_data
elif flag == 'test':
return feature_data
else:
logging.error('arguments of function splitfealabdata must be train,test or valid')
sys.exit()
# 连续型数据和类别型数据拆分,默认数据最后一列数据为label
def splitdata(data,flag='train',index_begin=4,index_end=30):
encod_cat_index_begin = index_begin
encod_cat_index_end = index_end
continous_data = data[:, 0:encod_cat_index_begin]
categorial_data = data[:,encod_cat_index_begin:encod_cat_index_end]
if flag == 'train'or flag == 'valid':
label_data = data[:,-1]
return continous_data,categorial_data, label_data
elif flag == 'test':
return continous_data,categorial_data
else:
logging.error('arguments of function splitdata must be train,test or valid')
sys.exit()
# 根据batchsize获取数据
def genbatch(feature_data,label_data=None,batch_size=200):
for start in range(0, len(feature_data), batch_size):
end = min(start + batch_size, len(feature_data))
if label_data is None:
yield feature_data[start:end]
else:
yield feature_data[start:end], label_data[start:end]
# 获取特征工程处理后的数据
def gendata(flag='train',train_path='output/model_data/train.txt',vaild_path='output/model_data/valid.txt',test_path='output/model_data/test.txt'):
encod_train_path = train_path
encod_vaild_path = vaild_path
encod_test_path = test_path
if flag == 'train':
train_data = np.loadtxt(encod_train_path,delimiter=',')
# 数据拆分
train_continous_data, train_categorial_data, train_data_label = splitdata(train_data,index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
train_continous_standard_data = standard(train_continous_data)
train_feature_data = np.concatenate([train_continous_standard_data,train_categorial_data],axis=1)
return train_feature_data, train_data_label
elif flag == 'valid':
valid_data = np.loadtxt(encod_vaild_path,delimiter=',')
valid_continous_data, valid_categorial_data, valid_data_label = splitdata(valid_data, flag='valid',index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
valid_continous_standard_data = standard(valid_continous_data)
valid_feature_data = np.concatenate([valid_continous_standard_data, valid_categorial_data], axis=1)
return valid_feature_data, valid_data_label
elif flag == 'test':
test_data = np.loadtxt(encod_test_path,delimiter=',')
test_continous_data, test_categorial_data = splitdata(test_data, flag='test',index_begin=FLAGS.encod_cat_index_begin,index_end=FLAGS.encod_cat_index_end)
test_continous_standard_data = standard(test_continous_data)
test_feature_data = np.concatenate([test_continous_standard_data, test_categorial_data], axis=1)
return test_feature_data
else:
logging.error('arguments of function gendata must be train,test or valid')
sys.exit()
# 获取FFM模型的数据
def genffm(flag='train',train_path='output/model_data/train_pred.txt',vaild_path='output/model_data/vaild_pred.txt',test_path='output/model_data/test_pred.txt'):
if flag == 'train':
ffm_path = train_path
elif flag == 'valid':
ffm_path = vaild_path
elif flag == 'test':
ffm_path = test_path
else:
logging.error('arguments of function gendata must be train,test or valid')
sys.exit()
#filename_queue = tf.train.string_input_producer([ffm_path])
#reader = tf.FixedLengthRecordReader(record_bytes=1)
#ffm_data = reader.read(filename_queue)
#ffm_data = np.fromfile(ffm_path, dtype=np.int32)
ffm_data = np.loadtxt(ffm_path)
return ffm_data[:,np.newaxis] | 0.18628 | 0.320888 |
from pathlib import Path
import logging
import os
import subprocess
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import requests
log = logging.getLogger()
esp32 = "http://board_ip"
# Original Source: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
# Pulled function to handle connection errors/timeouts
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Get the matching process
p1 = subprocess.Popen(['ps', 'x'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "-i", "zoom.us"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(
["grep", "-E", "\-key [0-9]{10,10}"], stdin=p2.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
output = p3.communicate()[0]
if output:
args = output.decode().split()
for item in args:
if item == "-pid":
pid = args[args.index(item)-len(args) + 1]
# Attempt to open the crashlog corresponding to the PID of the process
logfile = Path.joinpath(Path.home(), "Library",
"Logs", "zoom.us", "crashlog", f"{pid}.log")
if os.path.exists(logfile):
meeting_id = "unknown"
logdata = open(logfile, 'r').readlines()
logdata.reverse()
# Parse through the log and find the most recent meeting-id
for line in logdata:
try:
key, value = line.split(":", 1)
if key == "meeting-id":
meeting_id = value
break
except ValueError:
pass
print(f"Zoom Meeting # {value}")
else:
# If the log doesn't exist, just use the key
code = output.split()[-1].decode()
print("Zoom Meeting # ", str(code))
try:
requests_retry_session().get(f"{esp32}/meeting/on")
except Exception as e:
log.error(f"Unable to set meeting status as on. Reason was {e}")
else:
try:
requests_retry_session().get(f"{esp32}/meeting/off")
except Exception as e:
log.error(f"Unable to set meeting status as off. Reason was {e}")
print("Avail.") | scripts/main.py |
from pathlib import Path
import logging
import os
import subprocess
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import requests
log = logging.getLogger()
esp32 = "http://board_ip"
# Original Source: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
# Pulled function to handle connection errors/timeouts
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Get the matching process
p1 = subprocess.Popen(['ps', 'x'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "-i", "zoom.us"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(
["grep", "-E", "\-key [0-9]{10,10}"], stdin=p2.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.stdout.close()
output = p3.communicate()[0]
if output:
args = output.decode().split()
for item in args:
if item == "-pid":
pid = args[args.index(item)-len(args) + 1]
# Attempt to open the crashlog corresponding to the PID of the process
logfile = Path.joinpath(Path.home(), "Library",
"Logs", "zoom.us", "crashlog", f"{pid}.log")
if os.path.exists(logfile):
meeting_id = "unknown"
logdata = open(logfile, 'r').readlines()
logdata.reverse()
# Parse through the log and find the most recent meeting-id
for line in logdata:
try:
key, value = line.split(":", 1)
if key == "meeting-id":
meeting_id = value
break
except ValueError:
pass
print(f"Zoom Meeting # {value}")
else:
# If the log doesn't exist, just use the key
code = output.split()[-1].decode()
print("Zoom Meeting # ", str(code))
try:
requests_retry_session().get(f"{esp32}/meeting/on")
except Exception as e:
log.error(f"Unable to set meeting status as on. Reason was {e}")
else:
try:
requests_retry_session().get(f"{esp32}/meeting/off")
except Exception as e:
log.error(f"Unable to set meeting status as off. Reason was {e}")
print("Avail.") | 0.430866 | 0.106691 |
import numpy
import random
from yoolkit.constant import Constant
constant = Constant()
constant.PADDING = '<PAD>'
constant.MATCHING = '<MAT>'
constant.DELETION = '<DEL>'
constant.INSERTION = '<INS>'
constant.SUBSTITUTION = '<SUB>'
"""LevenshteinTool can Calculating Levenshtein-Distance, Printing Manipulation-Sequence, Aligned-Sequences and so on
Levenshtein-Distance(L-Distance, LD) is a measure of the similarity between two sequences, which we will refer to as the reference sequence (ref) and the hypothesis sequence (hyp).
L-Distance is the number of three type manipulations which is required to transform source to target. If the elements of two sequences is the same, we also call this as a kind of manipulation -- matching.
These manipulations is:
insertions - <INS> ;
substitutions - <SUB> ;
deletions - <DEL> ;
matching - <MAT> .
This tool can also give the aligned source and target with manipulation-Sequence, we call these as Aligned-Sequences.
Example:
Input:
source: ['t', 'e', 'n', 't']
target: ['t', 'e', 's', 't', '!']
Output:
Levenshtein-Distance: 1
Manipulation-Sequence: ['<MAT>', '<MAT>', '<SUB>', '<MAT>', '<INS>']
Aligned-Sequences:
aligned-source: ['t', 'e', 'n', 't', '<PAD>']
aligned-target: ['t', 'e', 's', 't', '!']
Note:
Levenshtein-Distance is also sometimes called Edit-Distance.
"""
class Levenshtein(object):
def __init__(self,
deletion_weight=1, insertion_weight=1, substitution_weight=1,
mat_prob=0.25, del_prob=0.25, ins_prob=0.25, sub_prob=0.25
):
self.deletion_weight = deletion_weight
self.insertion_weight = insertion_weight
self.substitution_weight = substitution_weight
self.manipulation_choice_probabilities = {
constant.MATCHING : mat_prob,
constant.DELETION : del_prob,
constant.INSERTION : ins_prob,
constant.SUBSTITUTION : sub_prob
}
def calculate_stage_matrix(self, source, target):
stage_matrix_size = (len(source) + 1, len(target) + 1)
stage_matrix = numpy.zeros(stage_matrix_size, dtype=numpy.uint)
stage_matrix[:,0] = numpy.arange(stage_matrix_size[0])
stage_matrix[0,:] = numpy.arange(stage_matrix_size[1])
for source_stage_index in range(1, stage_matrix_size[0]):
for target_stage_index in range(1, stage_matrix_size[1]):
source_index = source_stage_index - 1
target_index = target_stage_index - 1
if source[source_index] == target[target_index]:
#Matching
stage_matrix[source_stage_index, target_stage_index] = stage_matrix[source_stage_index-1, target_stage_index-1]
else:
deletion = stage_matrix[source_stage_index-1, target_stage_index] #Deletion
insertion = stage_matrix[source_stage_index, target_stage_index-1] #Insertion
substitution = stage_matrix[source_stage_index-1, target_stage_index-1] #Substitution
stage_matrix[source_stage_index, target_stage_index] = min(
deletion + self.deletion_weight,
insertion + self.insertion_weight,
substitution + self.substitution_weight
)
return stage_matrix
def backtrack_manipulation_sequence(self, source, target, stage_matrix):
manipulation_sequence = []
x, y = len(source), len(target)
while x != 0 or y != 0:
candidate_actions = []
choice_probs = []
if x>0 and y>0:
if stage_matrix[x, y] == stage_matrix[x-1, y-1]:
if source[x-1] == target[y-1]:
candidate_actions.append(constant.MATCHING) #Matching
choice_probs.append(self.manipulation_choice_probabilities[constant.MATCHING])
elif stage_matrix[x, y] == (stage_matrix[x-1, y-1] + self.substitution_weight):
candidate_actions.append(constant.SUBSTITUTION) #Substitution
choice_probs.append(self.manipulation_choice_probabilities[constant.SUBSTITUTION])
if x>0:
if stage_matrix[x, y] == (stage_matrix[x-1, y] + self.deletion_weight):
candidate_actions.append(constant.DELETION) #Deletion
choice_probs.append(self.manipulation_choice_probabilities[constant.DELETION])
if y>0:
if stage_matrix[x, y] == (stage_matrix[x, y-1] + self.insertion_weight):
candidate_actions.append(constant.INSERTION) #INSERTION
choice_probs.append(self.manipulation_choice_probabilities[constant.INSERTION])
choice_prob_sum = sum(choice_probs)
choice_probs = [choice_prob/choice_prob_sum for choice_prob in choice_probs]
action_index = numpy.random.choice(len(candidate_actions), 1, choice_probs)[0]
action = candidate_actions[action_index]
if action == constant.MATCHING:
manipulation_sequence.append(constant.MATCHING)
x, y = x - 1, y - 1
elif action == constant.SUBSTITUTION:
manipulation_sequence.append(constant.SUBSTITUTION)
x, y = x - 1, y - 1
elif action == constant.DELETION:
manipulation_sequence.append(constant.DELETION)
x = x - 1
elif action == constant.INSERTION:
manipulation_sequence.append(constant.INSERTION)
y = y - 1
manipulation_sequence = manipulation_sequence[::-1]
return manipulation_sequence
def align_source_and_target(self, source, target, manipulation_sequence):
source_index = 0
target_index = 0
aligned_source = []
aligned_target = []
for action in manipulation_sequence:
if action == constant.MATCHING or action == constant.SUBSTITUTION:
aligned_source.append(source[source_index])
aligned_target.append(target[target_index])
source_index, target_index = source_index + 1, target_index + 1
elif action == constant.DELETION:
aligned_source.append(source[source_index])
aligned_target.append(constant.PADDING)
source_index = source_index + 1
elif action == constant.INSERTION:
aligned_source.append(constant.PADDING)
aligned_target.append(target[target_index])
target_index = target_index + 1
return aligned_source, aligned_target
def get_levenshtein_distance(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
return stage_matrix[-1, -1]
def get_manipulation_sequence(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
manipulation_sequence = self.backtrack_manipulation_sequence(source, target, stage_matrix)
return manipulation_sequence
def get_aligned_sequences(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
manipulation_sequence = self.backtrack_manipulation_sequence(source, target, stage_matrix)
aligned_source, aligned_target = self.align_source_and_target(source, target, manipulation_sequence)
return dict(
aligned_source=aligned_source,
aligned_target=aligned_target,
manipulation=manipulation_sequence
) | yoolkit/levenshtein.py |
import numpy
import random
from yoolkit.constant import Constant
constant = Constant()
constant.PADDING = '<PAD>'
constant.MATCHING = '<MAT>'
constant.DELETION = '<DEL>'
constant.INSERTION = '<INS>'
constant.SUBSTITUTION = '<SUB>'
"""LevenshteinTool can Calculating Levenshtein-Distance, Printing Manipulation-Sequence, Aligned-Sequences and so on
Levenshtein-Distance(L-Distance, LD) is a measure of the similarity between two sequences, which we will refer to as the reference sequence (ref) and the hypothesis sequence (hyp).
L-Distance is the number of three type manipulations which is required to transform source to target. If the elements of two sequences is the same, we also call this as a kind of manipulation -- matching.
These manipulations is:
insertions - <INS> ;
substitutions - <SUB> ;
deletions - <DEL> ;
matching - <MAT> .
This tool can also give the aligned source and target with manipulation-Sequence, we call these as Aligned-Sequences.
Example:
Input:
source: ['t', 'e', 'n', 't']
target: ['t', 'e', 's', 't', '!']
Output:
Levenshtein-Distance: 1
Manipulation-Sequence: ['<MAT>', '<MAT>', '<SUB>', '<MAT>', '<INS>']
Aligned-Sequences:
aligned-source: ['t', 'e', 'n', 't', '<PAD>']
aligned-target: ['t', 'e', 's', 't', '!']
Note:
Levenshtein-Distance is also sometimes called Edit-Distance.
"""
class Levenshtein(object):
def __init__(self,
deletion_weight=1, insertion_weight=1, substitution_weight=1,
mat_prob=0.25, del_prob=0.25, ins_prob=0.25, sub_prob=0.25
):
self.deletion_weight = deletion_weight
self.insertion_weight = insertion_weight
self.substitution_weight = substitution_weight
self.manipulation_choice_probabilities = {
constant.MATCHING : mat_prob,
constant.DELETION : del_prob,
constant.INSERTION : ins_prob,
constant.SUBSTITUTION : sub_prob
}
def calculate_stage_matrix(self, source, target):
stage_matrix_size = (len(source) + 1, len(target) + 1)
stage_matrix = numpy.zeros(stage_matrix_size, dtype=numpy.uint)
stage_matrix[:,0] = numpy.arange(stage_matrix_size[0])
stage_matrix[0,:] = numpy.arange(stage_matrix_size[1])
for source_stage_index in range(1, stage_matrix_size[0]):
for target_stage_index in range(1, stage_matrix_size[1]):
source_index = source_stage_index - 1
target_index = target_stage_index - 1
if source[source_index] == target[target_index]:
#Matching
stage_matrix[source_stage_index, target_stage_index] = stage_matrix[source_stage_index-1, target_stage_index-1]
else:
deletion = stage_matrix[source_stage_index-1, target_stage_index] #Deletion
insertion = stage_matrix[source_stage_index, target_stage_index-1] #Insertion
substitution = stage_matrix[source_stage_index-1, target_stage_index-1] #Substitution
stage_matrix[source_stage_index, target_stage_index] = min(
deletion + self.deletion_weight,
insertion + self.insertion_weight,
substitution + self.substitution_weight
)
return stage_matrix
def backtrack_manipulation_sequence(self, source, target, stage_matrix):
manipulation_sequence = []
x, y = len(source), len(target)
while x != 0 or y != 0:
candidate_actions = []
choice_probs = []
if x>0 and y>0:
if stage_matrix[x, y] == stage_matrix[x-1, y-1]:
if source[x-1] == target[y-1]:
candidate_actions.append(constant.MATCHING) #Matching
choice_probs.append(self.manipulation_choice_probabilities[constant.MATCHING])
elif stage_matrix[x, y] == (stage_matrix[x-1, y-1] + self.substitution_weight):
candidate_actions.append(constant.SUBSTITUTION) #Substitution
choice_probs.append(self.manipulation_choice_probabilities[constant.SUBSTITUTION])
if x>0:
if stage_matrix[x, y] == (stage_matrix[x-1, y] + self.deletion_weight):
candidate_actions.append(constant.DELETION) #Deletion
choice_probs.append(self.manipulation_choice_probabilities[constant.DELETION])
if y>0:
if stage_matrix[x, y] == (stage_matrix[x, y-1] + self.insertion_weight):
candidate_actions.append(constant.INSERTION) #INSERTION
choice_probs.append(self.manipulation_choice_probabilities[constant.INSERTION])
choice_prob_sum = sum(choice_probs)
choice_probs = [choice_prob/choice_prob_sum for choice_prob in choice_probs]
action_index = numpy.random.choice(len(candidate_actions), 1, choice_probs)[0]
action = candidate_actions[action_index]
if action == constant.MATCHING:
manipulation_sequence.append(constant.MATCHING)
x, y = x - 1, y - 1
elif action == constant.SUBSTITUTION:
manipulation_sequence.append(constant.SUBSTITUTION)
x, y = x - 1, y - 1
elif action == constant.DELETION:
manipulation_sequence.append(constant.DELETION)
x = x - 1
elif action == constant.INSERTION:
manipulation_sequence.append(constant.INSERTION)
y = y - 1
manipulation_sequence = manipulation_sequence[::-1]
return manipulation_sequence
def align_source_and_target(self, source, target, manipulation_sequence):
source_index = 0
target_index = 0
aligned_source = []
aligned_target = []
for action in manipulation_sequence:
if action == constant.MATCHING or action == constant.SUBSTITUTION:
aligned_source.append(source[source_index])
aligned_target.append(target[target_index])
source_index, target_index = source_index + 1, target_index + 1
elif action == constant.DELETION:
aligned_source.append(source[source_index])
aligned_target.append(constant.PADDING)
source_index = source_index + 1
elif action == constant.INSERTION:
aligned_source.append(constant.PADDING)
aligned_target.append(target[target_index])
target_index = target_index + 1
return aligned_source, aligned_target
def get_levenshtein_distance(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
return stage_matrix[-1, -1]
def get_manipulation_sequence(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
manipulation_sequence = self.backtrack_manipulation_sequence(source, target, stage_matrix)
return manipulation_sequence
def get_aligned_sequences(self, source, target):
stage_matrix = self.calculate_stage_matrix(source, target)
manipulation_sequence = self.backtrack_manipulation_sequence(source, target, stage_matrix)
aligned_source, aligned_target = self.align_source_and_target(source, target, manipulation_sequence)
return dict(
aligned_source=aligned_source,
aligned_target=aligned_target,
manipulation=manipulation_sequence
) | 0.671578 | 0.607401 |
import time
from .. import DB
from . import STD_STRING_SIZE
from .attributeDefinition import AttributeDefinition
from . import item
__all__ = ['ItemType', 'ItemTypeToItemType', 'ItemTypeToAttributeDefinition']
class ItemType (DB.Model):
__tablename__ = 'ItemType'
id = DB.Column(DB.Integer, primary_key=True)
name = DB.Column(DB.String(STD_STRING_SIZE), unique=True)
name_schema = DB.Column(DB.String(STD_STRING_SIZE))
lendable = DB.Column(DB.Boolean, default=True)
lending_duration = DB.Column(DB.Integer, nullable=True)
deleted_time = DB.Column(DB.Integer, default=None)
visible_for = DB.Column(DB.String(STD_STRING_SIZE), nullable=True)
how_to = DB.Column(DB.Text, nullable=True)
def __init__(self, name: str, name_schema: str, lendable: bool, lending_duration: int, visible_for: str = '', how_to: str = ''):
self.name = name
self.name_schema = name_schema
self.lending_duration = lending_duration
if visible_for != '' and visible_for != None:
self.visible_for = visible_for
if how_to != '' and how_to != None:
self.how_to = how_to
def update(self, name: str, name_schema: str, lendable: bool, lending_duration: int, visible_for: str, how_to: str):
self.name = name
self.name_schema = name_schema
self.lendable = lendable
self.lending_duration = lending_duration
self.visible_for = visible_for
self.how_to = how_to
@property
def deleted(self):
return self.deleted_time is not None
@deleted.setter
def deleted(self, value: bool):
if value:
self.deleted_time = int(time.time())
else:
self.deleted_time = None
def unassociate_attr_def(self, attribute_definition_id):
"""
Does all necessary changes to the database for unassociating a attribute definition from this type.
Does not commit the changes.
"""
if AttributeDefinition.query.filter(AttributeDefinition.id == attribute_definition_id).filter(AttributeDefinition.deleted_time == None).first() is None:
return(400, 'Requested attribute definition not found!', False)
association = (ItemTypeToAttributeDefinition
.query
.filter(ItemTypeToAttributeDefinition.item_type_id == self.id)
.filter(ItemTypeToAttributeDefinition.attribute_definition_id == attribute_definition_id)
.first())
if association is None:
return(204, '', False)
itads = item.ItemToAttributeDefinition.query.filter(
item.ItemToAttributeDefinition.attribute_definition_id == attribute_definition_id).all()
items = [itad.item for itad in itads]
DB.session.delete(association)
for i in items:
_, attributes_to_delete, _ = i.get_attribute_changes([attribute_definition_id], True)
for attr in attributes_to_delete:
attr.deleted = True
return(204, '', True)
class ItemTypeToItemType (DB.Model):
__tablename__ = 'ItemTypeToItemType'
parent_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id', ondelete='CASCADE'), primary_key=True)
item_type_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id'), primary_key=True)
parent = DB.relationship('ItemType', foreign_keys=[parent_id], lazy='select',
backref=DB.backref('_contained_item_types', lazy='select',
single_parent=True, cascade="all, delete-orphan"))
item_type = DB.relationship('ItemType', foreign_keys=[item_type_id], lazy='select',
backref=DB.backref('_possible_parent_item_types', lazy='select',
single_parent=True, cascade="all, delete-orphan"))
def __init__(self, parent_id: int, item_type_id: int):
self.parent_id = parent_id
self.item_type_id = item_type_id
class ItemTypeToAttributeDefinition (DB.Model):
__tablename__ = 'ItemTypeToAttributeDefinition'
item_type_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id'), primary_key=True)
attribute_definition_id = DB.Column(DB.Integer, DB.ForeignKey('AttributeDefinition.id'), primary_key=True)
item_type = DB.relationship('ItemType', lazy='select',
backref=DB.backref('_item_type_to_attribute_definitions', lazy='select'))
attribute_definition = DB.relationship('AttributeDefinition', lazy='joined')
def __init__(self, item_type_id: int, attribute_definition_id: int):
self.item_type_id = item_type_id
self.attribute_definition_id = attribute_definition_id | total_tolles_ferleihsystem/db_models/itemType.py | import time
from .. import DB
from . import STD_STRING_SIZE
from .attributeDefinition import AttributeDefinition
from . import item
__all__ = ['ItemType', 'ItemTypeToItemType', 'ItemTypeToAttributeDefinition']
class ItemType (DB.Model):
__tablename__ = 'ItemType'
id = DB.Column(DB.Integer, primary_key=True)
name = DB.Column(DB.String(STD_STRING_SIZE), unique=True)
name_schema = DB.Column(DB.String(STD_STRING_SIZE))
lendable = DB.Column(DB.Boolean, default=True)
lending_duration = DB.Column(DB.Integer, nullable=True)
deleted_time = DB.Column(DB.Integer, default=None)
visible_for = DB.Column(DB.String(STD_STRING_SIZE), nullable=True)
how_to = DB.Column(DB.Text, nullable=True)
def __init__(self, name: str, name_schema: str, lendable: bool, lending_duration: int, visible_for: str = '', how_to: str = ''):
self.name = name
self.name_schema = name_schema
self.lending_duration = lending_duration
if visible_for != '' and visible_for != None:
self.visible_for = visible_for
if how_to != '' and how_to != None:
self.how_to = how_to
def update(self, name: str, name_schema: str, lendable: bool, lending_duration: int, visible_for: str, how_to: str):
self.name = name
self.name_schema = name_schema
self.lendable = lendable
self.lending_duration = lending_duration
self.visible_for = visible_for
self.how_to = how_to
@property
def deleted(self):
return self.deleted_time is not None
@deleted.setter
def deleted(self, value: bool):
if value:
self.deleted_time = int(time.time())
else:
self.deleted_time = None
def unassociate_attr_def(self, attribute_definition_id):
"""
Does all necessary changes to the database for unassociating a attribute definition from this type.
Does not commit the changes.
"""
if AttributeDefinition.query.filter(AttributeDefinition.id == attribute_definition_id).filter(AttributeDefinition.deleted_time == None).first() is None:
return(400, 'Requested attribute definition not found!', False)
association = (ItemTypeToAttributeDefinition
.query
.filter(ItemTypeToAttributeDefinition.item_type_id == self.id)
.filter(ItemTypeToAttributeDefinition.attribute_definition_id == attribute_definition_id)
.first())
if association is None:
return(204, '', False)
itads = item.ItemToAttributeDefinition.query.filter(
item.ItemToAttributeDefinition.attribute_definition_id == attribute_definition_id).all()
items = [itad.item for itad in itads]
DB.session.delete(association)
for i in items:
_, attributes_to_delete, _ = i.get_attribute_changes([attribute_definition_id], True)
for attr in attributes_to_delete:
attr.deleted = True
return(204, '', True)
class ItemTypeToItemType (DB.Model):
__tablename__ = 'ItemTypeToItemType'
parent_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id', ondelete='CASCADE'), primary_key=True)
item_type_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id'), primary_key=True)
parent = DB.relationship('ItemType', foreign_keys=[parent_id], lazy='select',
backref=DB.backref('_contained_item_types', lazy='select',
single_parent=True, cascade="all, delete-orphan"))
item_type = DB.relationship('ItemType', foreign_keys=[item_type_id], lazy='select',
backref=DB.backref('_possible_parent_item_types', lazy='select',
single_parent=True, cascade="all, delete-orphan"))
def __init__(self, parent_id: int, item_type_id: int):
self.parent_id = parent_id
self.item_type_id = item_type_id
class ItemTypeToAttributeDefinition (DB.Model):
__tablename__ = 'ItemTypeToAttributeDefinition'
item_type_id = DB.Column(DB.Integer, DB.ForeignKey('ItemType.id'), primary_key=True)
attribute_definition_id = DB.Column(DB.Integer, DB.ForeignKey('AttributeDefinition.id'), primary_key=True)
item_type = DB.relationship('ItemType', lazy='select',
backref=DB.backref('_item_type_to_attribute_definitions', lazy='select'))
attribute_definition = DB.relationship('AttributeDefinition', lazy='joined')
def __init__(self, item_type_id: int, attribute_definition_id: int):
self.item_type_id = item_type_id
self.attribute_definition_id = attribute_definition_id | 0.548915 | 0.161452 |
import password_generator as pg
import user_login as ul
import user_document_folder as udl
import encrypt_cred_file as ecf
import time
import os
from os import path
class UserCred:
def __init__(self):
self.USER_PATH = udl.DOC_LOC
self.star = "*" * 50
self.line = "-" * 50
# Checks the User's Document's folder for the 'Login Credential'
# folder
self.FILE = (udl.DOC_LOC +
r"\Login Credentials\Login_Credential_Gen"
r"\database\key\login_credentials.txt")
self.KEY_LOC = (udl.DOC_LOC +
r"\Login Credentials\Login_Credential_Gen"
r"\database\key\key.key")
def cred_file(self) -> None:
"""
Generates a .txt file that stores the users generated username and
password. Also displays the users randomly generated username and
password.
"""
username = ul.UsernameGen().gen_user_name()
time.sleep(1)
password = pg.GenPass().gen_password()
time.sleep(1)
print()
print("Your username is:")
print("*" * len(username))
print(username)
print("*" * len(username))
print()
print("Your password is:")
print("*" * len(username))
print(password)
print("*" * len(username))
time.sleep(1)
print()
user_input = input("Would you like to save your login "
"credentials?\n=> ").casefold()
if user_input == "yes" or user_input == "y":
# Opens the existing file
if os.path.exists(self.FILE):
print("Opening 'login_credentials' file...")
key = ecf.load_key()
ecf.decrypt(self.FILE, key)
time.sleep(1)
print("File successfully opened.")
time.sleep(1)
print()
user_input = input("What application are the "
"credentials for?\n=> ").upper()
while True:
with open(os.path.join(self.FILE), "r") as f:
application = f.read()
if user_input in application.split():
print("This application name already has a "
"login credential.\nPlease modify the "
"name.")
time.sleep(1)
user_input = input("What application are the "
"credentials for?\n=> ").upper()
else:
with open(os.path.join(self.FILE), "a") as f:
f.write(self.star + "\n")
f.write(user_input + "\n")
f.write(self.line + "\n")
f.write("Username: " + username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
print("Saving File...")
time.sleep(1)
saved = "Your login credentials has been " \
"saved into your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
if os.path.exists(self.USER_PATH +
r'\Login Credentials\Login_Credential_Gen '
r'\database\key\key.key'):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
break
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
break
else:
print("Creating new login credential file...")
time.sleep(1)
"""
Creates a directory called 'Login Credentials' in the users
document folder.
"""
# Directory name
directory = r'Login Credentials\Login_Credential_Gen\database\key'
# Parent Directory path to users default Document folder
# location
parent_dir = self.USER_PATH
# Path
dir_path = os.path.join(parent_dir, directory)
# Create the directory
# 'Login Credential' in users Document folder
os.mkdir(dir_path)
print("Login credential file created.")
time.sleep(1)
"""
Creates a new file that stores users login credentials.
"""
user_input = input("What service are the credentials for?"
"\n=> ").upper()
with open(os.path.join(self.FILE), "w") as f:
f.write(self.star + "\n")
f.write(user_input + "\n")
f.write(self.line + "\n")
f.write("Username: " + username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
print("Saving File...")
time.sleep(1)
saved = "Your login credentials has been " \
"saved into your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
if os.path.exists(self.USER_PATH +
r'\Login Credentials\Login_Credential_Gen '
r'\database\key\key.key'):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
elif user_input == "no" or user_input == "n":
print("Login credentials was not saved.")
print()
time.sleep(1)
else:
print()
print("*" * 30)
print("Please type 'yes' or 'no'")
print("*" * 30)
time.sleep(1)
print()
def gen_pas(self) -> None:
"""
saves the randomly generated password to a .txt file in the users
Documents folder. The user generates their own username and saves the
users username and a randomly generated password into a .txt file.
"""
while True:
if path.exists(self.FILE):
password = pg.GenPass().gen_password()
time.sleep(1)
print("Your password is:")
print("*" * len(password))
print(password)
print("*" * len(password))
time.sleep(1)
user_input = input("Would you like to save your login "
"credentials?\n=>").casefold()
if user_input == "yes" or user_input == "y":
print()
print("Decrypting file...")
# Checks path for existing key.key file
if os.path.exists(self.KEY_LOC):
key = ecf.load_key()
ecf.decrypt(self.FILE, key)
# Creates key.key file
else:
ecf.write_key()
time.sleep(1)
print("File successfully unencrypted.")
print()
user_app_name = input("Enter application name\n=> ").upper()
while True:
with open(os.path.join(self.FILE), "r") as f:
application = f.read()
if user_app_name in application.split():
print("This application name already has a "
"login credential.\nPlease modify the "
"name.")
time.sleep(1)
user_app_name = input(
"What application are the credentials for?"
"\n=> ").upper()
user_username = input("Enter in your username\n=> ")
print("Saving login information...")
time.sleep(1)
# Appends the users self generated username and
# randomly generated password
with open(self.FILE, 'a') as f:
f.write(self.star + "\n")
f.write(user_app_name + "\n")
f.write(self.line + "\n")
f.write("Username: " + user_username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
time.sleep(1)
saved = "Your login credentials has been saved into " \
"your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
break
break
elif user_input == "no" or user_input == "n":
time.sleep(1)
print("Login credentials were not saved")
break
else:
"""
Creates a directory called 'Login Credentials' in the users
document folder.
"""
# Directory name
directory = r"Login Credentials\Login_Credential_Gen"
r"\database\key"
# Parent Directory path to users default Document folder
# location
parent_dir = self.USER_PATH
# Path
dir_path = os.path.join(parent_dir, directory)
# Create the directory
# 'Login Credential' in users Document folder
os.mkdir(dir_path)
with open(os.path.join(self.FILE), "a") as f:
f.write('')
print("Login credential file created.")
if os.path.exists(self.KEY_LOC):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
# Creates key.key file
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
time.sleep(1)
if __name__ == "__main__":
UserCred().cred_file()
UserCred().gen_pas() | Login Credentials Generator/M_Func.py | import password_generator as pg
import user_login as ul
import user_document_folder as udl
import encrypt_cred_file as ecf
import time
import os
from os import path
class UserCred:
def __init__(self):
self.USER_PATH = udl.DOC_LOC
self.star = "*" * 50
self.line = "-" * 50
# Checks the User's Document's folder for the 'Login Credential'
# folder
self.FILE = (udl.DOC_LOC +
r"\Login Credentials\Login_Credential_Gen"
r"\database\key\login_credentials.txt")
self.KEY_LOC = (udl.DOC_LOC +
r"\Login Credentials\Login_Credential_Gen"
r"\database\key\key.key")
def cred_file(self) -> None:
"""
Generates a .txt file that stores the users generated username and
password. Also displays the users randomly generated username and
password.
"""
username = ul.UsernameGen().gen_user_name()
time.sleep(1)
password = pg.GenPass().gen_password()
time.sleep(1)
print()
print("Your username is:")
print("*" * len(username))
print(username)
print("*" * len(username))
print()
print("Your password is:")
print("*" * len(username))
print(password)
print("*" * len(username))
time.sleep(1)
print()
user_input = input("Would you like to save your login "
"credentials?\n=> ").casefold()
if user_input == "yes" or user_input == "y":
# Opens the existing file
if os.path.exists(self.FILE):
print("Opening 'login_credentials' file...")
key = ecf.load_key()
ecf.decrypt(self.FILE, key)
time.sleep(1)
print("File successfully opened.")
time.sleep(1)
print()
user_input = input("What application are the "
"credentials for?\n=> ").upper()
while True:
with open(os.path.join(self.FILE), "r") as f:
application = f.read()
if user_input in application.split():
print("This application name already has a "
"login credential.\nPlease modify the "
"name.")
time.sleep(1)
user_input = input("What application are the "
"credentials for?\n=> ").upper()
else:
with open(os.path.join(self.FILE), "a") as f:
f.write(self.star + "\n")
f.write(user_input + "\n")
f.write(self.line + "\n")
f.write("Username: " + username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
print("Saving File...")
time.sleep(1)
saved = "Your login credentials has been " \
"saved into your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
if os.path.exists(self.USER_PATH +
r'\Login Credentials\Login_Credential_Gen '
r'\database\key\key.key'):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
break
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
break
else:
print("Creating new login credential file...")
time.sleep(1)
"""
Creates a directory called 'Login Credentials' in the users
document folder.
"""
# Directory name
directory = r'Login Credentials\Login_Credential_Gen\database\key'
# Parent Directory path to users default Document folder
# location
parent_dir = self.USER_PATH
# Path
dir_path = os.path.join(parent_dir, directory)
# Create the directory
# 'Login Credential' in users Document folder
os.mkdir(dir_path)
print("Login credential file created.")
time.sleep(1)
"""
Creates a new file that stores users login credentials.
"""
user_input = input("What service are the credentials for?"
"\n=> ").upper()
with open(os.path.join(self.FILE), "w") as f:
f.write(self.star + "\n")
f.write(user_input + "\n")
f.write(self.line + "\n")
f.write("Username: " + username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
print("Saving File...")
time.sleep(1)
saved = "Your login credentials has been " \
"saved into your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
if os.path.exists(self.USER_PATH +
r'\Login Credentials\Login_Credential_Gen '
r'\database\key\key.key'):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
elif user_input == "no" or user_input == "n":
print("Login credentials was not saved.")
print()
time.sleep(1)
else:
print()
print("*" * 30)
print("Please type 'yes' or 'no'")
print("*" * 30)
time.sleep(1)
print()
def gen_pas(self) -> None:
"""
saves the randomly generated password to a .txt file in the users
Documents folder. The user generates their own username and saves the
users username and a randomly generated password into a .txt file.
"""
while True:
if path.exists(self.FILE):
password = pg.GenPass().gen_password()
time.sleep(1)
print("Your password is:")
print("*" * len(password))
print(password)
print("*" * len(password))
time.sleep(1)
user_input = input("Would you like to save your login "
"credentials?\n=>").casefold()
if user_input == "yes" or user_input == "y":
print()
print("Decrypting file...")
# Checks path for existing key.key file
if os.path.exists(self.KEY_LOC):
key = ecf.load_key()
ecf.decrypt(self.FILE, key)
# Creates key.key file
else:
ecf.write_key()
time.sleep(1)
print("File successfully unencrypted.")
print()
user_app_name = input("Enter application name\n=> ").upper()
while True:
with open(os.path.join(self.FILE), "r") as f:
application = f.read()
if user_app_name in application.split():
print("This application name already has a "
"login credential.\nPlease modify the "
"name.")
time.sleep(1)
user_app_name = input(
"What application are the credentials for?"
"\n=> ").upper()
user_username = input("Enter in your username\n=> ")
print("Saving login information...")
time.sleep(1)
# Appends the users self generated username and
# randomly generated password
with open(self.FILE, 'a') as f:
f.write(self.star + "\n")
f.write(user_app_name + "\n")
f.write(self.line + "\n")
f.write("Username: " + user_username + "\n")
f.write("Password: " + password + "\n")
f.write(self.star + "\n")
f.write("\n")
f.close()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
time.sleep(1)
saved = "Your login credentials has been saved into " \
"your Document's folder"
print("*" * len(saved))
print(saved)
print("*" * len(saved))
break
break
elif user_input == "no" or user_input == "n":
time.sleep(1)
print("Login credentials were not saved")
break
else:
"""
Creates a directory called 'Login Credentials' in the users
document folder.
"""
# Directory name
directory = r"Login Credentials\Login_Credential_Gen"
r"\database\key"
# Parent Directory path to users default Document folder
# location
parent_dir = self.USER_PATH
# Path
dir_path = os.path.join(parent_dir, directory)
# Create the directory
# 'Login Credential' in users Document folder
os.mkdir(dir_path)
with open(os.path.join(self.FILE), "a") as f:
f.write('')
print("Login credential file created.")
if os.path.exists(self.KEY_LOC):
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
# Creates key.key file
else:
ecf.write_key()
key = ecf.load_key()
ecf.encrypt(self.FILE, key)
time.sleep(1)
if __name__ == "__main__":
UserCred().cred_file()
UserCred().gen_pas() | 0.267695 | 0.068725 |
### Load libraries
import argparse, os, sys
import numpy as np
import nibabel as nib
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import sardunet
if __name__ == "__main__":
### Print help and parse arguments
parser = argparse.ArgumentParser(description='This program uses a trained SARDU-net to upsample a multi-contrast MRI scan on a voxel-by-voxel basis. It requires as input a SARDU-net, a 4D NIFTI file with an MRI scan to upsample, a mask (optional) and outputs the upsampled MRI scan as a 4D NIFTI. Author: <NAME>, University College London (<<EMAIL>><<EMAIL>>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('nifti_in', help='path to a 4D NIFTI file storing a multi-contrast MRI scan to upsample')
parser.add_argument('nifti_out', help='path of the output 4D NIFTI file that will store the upsampled MRI scan')
parser.add_argument('sardunet_file', help='path to a pickle binary file containing a trained SARDU-net')
parser.add_argument('sigmax_file', help='path to a pickle binary file containing the value of the maximum signal smax used for normalisation (smax = 1 in normalised signal space)')
parser.add_argument('sigmin_file', help='path to a pickle binary file containing the value of the minimum signal smin used for normalisation (smin = 0 in normalised signal space)')
parser.add_argument('--mask', metavar='<nifti>', help='path to a mask flagging with 1 voxels to analysise and with 0 voxels to ignore (the output NIFTI will contain 0 in voxels to ignore)')
parser.add_argument('--bits', metavar='<value>', default='32', help='number of bits per voxel in the output file (default 32 bits; choose either 32 or 64 bits)')
args = parser.parse_args()
### Print some information
print('')
print('********************************************************************')
print(' UPSAMPLE WITH S.A.R.D.U.-NET ')
print('********************************************************************')
print('')
print('** Called on 4D Nifti file: {}'.format(args.nifti_in))
print('** SARDU-net file: {}'.format(args.sardunet_file))
print('** 4D output Nifti file: {}'.format(args.nifti_out))
print('** File storing maximum signal for linear scaling: {}'.format(args.sigmax_file))
print('** File storing minimum signal for linear scaling: {}'.format(args.sigmin_file))
print('')
### Load input NIFTI
print('')
print(' ... loading data ...')
sin_obj = nib.load(args.nifti_in)
sin_header = sin_obj.header
sin_affine = sin_header.get_best_affine()
sin_data = sin_obj.get_fdata()
sin_dims = sin_data.shape
imgsize = sin_data.shape
imgsize = np.array(imgsize)
sin_data = np.array(sin_data,'float64')
if imgsize.size!=4:
print('')
print('ERROR: the input 4D NIFTI file {} is not actually not 4D. Exiting with 1...'.format(args.nifti_in))
print('')
sys.exit(1)
### Deal with optional arguments: mask
if isinstance(args.mask, str)==1:
# A mask for SARDU-net has been provided
mask_obj = nib.load(args.mask)
mask_dims = mask_obj.get_shape()
mask_header = mask_obj.header
mask_affine = mask_header.get_best_affine()
# Make sure the mask is a 3D file
mask_data = mask_obj.get_fdata()
masksize = mask_data.shape
masksize = np.array(masksize)
if masksize.size!=3:
print('')
print('WARNING: the mask file {} is not a 3D Nifti file. Ignoring mask...'.format(mask_nifti))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
elif ( (np.sum(sin_affine==mask_affine)!=16) or (sin_dims[0]!=mask_dims[0]) or (sin_dims[1]!=mask_dims[1]) or (sin_dims[2]!=mask_dims[2]) ):
print('')
print('WARNING: the geometry of the mask file {} does not match that of the input data. Ignoring mask...'.format(args.mask))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
else:
mask_data = np.array(mask_data,'float64')
# Make sure mask data is a numpy array
mask_data[mask_data>0] = 1
mask_data[mask_data<=0] = 0
else:
# A mask for fitting has not been provided
mask_data = np.ones(imgsize[0:3],'float64')
### Bits per pixel
nbit = int(args.bits)
if( (nbit!=32) and (nbit!=64) ):
print('')
print('ERROR: the voxel bit depth must be either 32 or 64. You set {}. Exiting with 1...'.format(args.bits))
print('')
sys.exit(1)
#### Load SARDU-net in evaluation mode
h = open(args.sardunet_file,'rb')
net = pk.load(h)
h.close()
net.eval()
#### Load normalisation factors
h = open(args.sigmax_file,'rb')
smax = pk.load(h)
h.close()
h = open(args.sigmin_file,'rb')
smin = pk.load(h)
h.close()
### Allocate memory for network inputs and outputs
if(imgsize[3]!=net.selector_downsampsize):
print('')
print('ERROR: the input 4D NIFTI file {} has a number of measurements that does not match that of SARDU-net. Exiting with 1...'.format(args.nifti_in))
print('')
sys.exit(1)
idx = Tensor.numpy(net.selector_indices) # Indices of measurements
idx_sort = np.sort(idx) # Indices to guide zero-filling
nin = net.predictor_nneurons[0] # Number of neurons in first layer
nout = net.predictor_nneurons[-1] # Number of output measurements
sout_data = np.zeros((imgsize[0],imgsize[1],imgsize[2],nout))
### Loop to predict signals
print('')
print(' ... predicting MRI signals with SARDU-net ...')
for xx in range(0, imgsize[0]):
for yy in range(0, imgsize[1]):
for zz in range(0, imgsize[2]):
# Get voxel within mask
if(mask_data[xx,yy,zz]==1):
# Get qMRI signal
myvoxel = sin_data[xx,yy,zz,:]
# Normalise qMRI signal
myvoxel[myvoxel<smin] = smin
myvoxel = np.float32( (myvoxel - smin) / (smax - smin) )
# Zero-fill missing measurements
myvoxel_zeroed = np.zeros(nin)
myvoxel_zeroed = np.float32(myvoxel_zeroed)
for qq in range(0,imgsize[3]):
myvoxel_zeroed[idx_sort[qq]] = myvoxel[qq]
# Pass voxel through SARDU-net
myvoxel_up = net(Tensor(myvoxel_zeroed))
myvoxel_up = myvoxel_up.detach().numpy()
# Bring voxel back to original signal space
myvoxel_up = myvoxel_up*(smax - smin) + smin
# Store voxel
sout_data[xx,yy,zz,:] = myvoxel_up
# Save the predicted NIFTI file as output
print('')
print(' ... saving output file as 4D NIFTI ...')
buffer_header = sin_obj.header
if(nbit==64):
buffer_header.set_data_dtype('float64') # Make sure we save output files float64, even if input is not
elif(nbit==32):
buffer_header.set_data_dtype('float32') # Make sure we save output files float64, even if input is not
sout_obj = nib.Nifti1Image(sout_data,sin_obj.affine,buffer_header)
nib.save(sout_obj, args.nifti_out)
print('')
print(' Done!')
print('')
sys.exit(0) | ainas/upsample_sardunet_v1.py |
### Load libraries
import argparse, os, sys
import numpy as np
import nibabel as nib
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import sardunet
if __name__ == "__main__":
### Print help and parse arguments
parser = argparse.ArgumentParser(description='This program uses a trained SARDU-net to upsample a multi-contrast MRI scan on a voxel-by-voxel basis. It requires as input a SARDU-net, a 4D NIFTI file with an MRI scan to upsample, a mask (optional) and outputs the upsampled MRI scan as a 4D NIFTI. Author: <NAME>, University College London (<<EMAIL>><<EMAIL>>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('nifti_in', help='path to a 4D NIFTI file storing a multi-contrast MRI scan to upsample')
parser.add_argument('nifti_out', help='path of the output 4D NIFTI file that will store the upsampled MRI scan')
parser.add_argument('sardunet_file', help='path to a pickle binary file containing a trained SARDU-net')
parser.add_argument('sigmax_file', help='path to a pickle binary file containing the value of the maximum signal smax used for normalisation (smax = 1 in normalised signal space)')
parser.add_argument('sigmin_file', help='path to a pickle binary file containing the value of the minimum signal smin used for normalisation (smin = 0 in normalised signal space)')
parser.add_argument('--mask', metavar='<nifti>', help='path to a mask flagging with 1 voxels to analysise and with 0 voxels to ignore (the output NIFTI will contain 0 in voxels to ignore)')
parser.add_argument('--bits', metavar='<value>', default='32', help='number of bits per voxel in the output file (default 32 bits; choose either 32 or 64 bits)')
args = parser.parse_args()
### Print some information
print('')
print('********************************************************************')
print(' UPSAMPLE WITH S.A.R.D.U.-NET ')
print('********************************************************************')
print('')
print('** Called on 4D Nifti file: {}'.format(args.nifti_in))
print('** SARDU-net file: {}'.format(args.sardunet_file))
print('** 4D output Nifti file: {}'.format(args.nifti_out))
print('** File storing maximum signal for linear scaling: {}'.format(args.sigmax_file))
print('** File storing minimum signal for linear scaling: {}'.format(args.sigmin_file))
print('')
### Load input NIFTI
print('')
print(' ... loading data ...')
sin_obj = nib.load(args.nifti_in)
sin_header = sin_obj.header
sin_affine = sin_header.get_best_affine()
sin_data = sin_obj.get_fdata()
sin_dims = sin_data.shape
imgsize = sin_data.shape
imgsize = np.array(imgsize)
sin_data = np.array(sin_data,'float64')
if imgsize.size!=4:
print('')
print('ERROR: the input 4D NIFTI file {} is not actually not 4D. Exiting with 1...'.format(args.nifti_in))
print('')
sys.exit(1)
### Deal with optional arguments: mask
if isinstance(args.mask, str)==1:
# A mask for SARDU-net has been provided
mask_obj = nib.load(args.mask)
mask_dims = mask_obj.get_shape()
mask_header = mask_obj.header
mask_affine = mask_header.get_best_affine()
# Make sure the mask is a 3D file
mask_data = mask_obj.get_fdata()
masksize = mask_data.shape
masksize = np.array(masksize)
if masksize.size!=3:
print('')
print('WARNING: the mask file {} is not a 3D Nifti file. Ignoring mask...'.format(mask_nifti))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
elif ( (np.sum(sin_affine==mask_affine)!=16) or (sin_dims[0]!=mask_dims[0]) or (sin_dims[1]!=mask_dims[1]) or (sin_dims[2]!=mask_dims[2]) ):
print('')
print('WARNING: the geometry of the mask file {} does not match that of the input data. Ignoring mask...'.format(args.mask))
print('')
mask_data = np.ones(imgsize[0:3],'float64')
else:
mask_data = np.array(mask_data,'float64')
# Make sure mask data is a numpy array
mask_data[mask_data>0] = 1
mask_data[mask_data<=0] = 0
else:
# A mask for fitting has not been provided
mask_data = np.ones(imgsize[0:3],'float64')
### Bits per pixel
nbit = int(args.bits)
if( (nbit!=32) and (nbit!=64) ):
print('')
print('ERROR: the voxel bit depth must be either 32 or 64. You set {}. Exiting with 1...'.format(args.bits))
print('')
sys.exit(1)
#### Load SARDU-net in evaluation mode
h = open(args.sardunet_file,'rb')
net = pk.load(h)
h.close()
net.eval()
#### Load normalisation factors
h = open(args.sigmax_file,'rb')
smax = pk.load(h)
h.close()
h = open(args.sigmin_file,'rb')
smin = pk.load(h)
h.close()
### Allocate memory for network inputs and outputs
if(imgsize[3]!=net.selector_downsampsize):
print('')
print('ERROR: the input 4D NIFTI file {} has a number of measurements that does not match that of SARDU-net. Exiting with 1...'.format(args.nifti_in))
print('')
sys.exit(1)
idx = Tensor.numpy(net.selector_indices) # Indices of measurements
idx_sort = np.sort(idx) # Indices to guide zero-filling
nin = net.predictor_nneurons[0] # Number of neurons in first layer
nout = net.predictor_nneurons[-1] # Number of output measurements
sout_data = np.zeros((imgsize[0],imgsize[1],imgsize[2],nout))
### Loop to predict signals
print('')
print(' ... predicting MRI signals with SARDU-net ...')
for xx in range(0, imgsize[0]):
for yy in range(0, imgsize[1]):
for zz in range(0, imgsize[2]):
# Get voxel within mask
if(mask_data[xx,yy,zz]==1):
# Get qMRI signal
myvoxel = sin_data[xx,yy,zz,:]
# Normalise qMRI signal
myvoxel[myvoxel<smin] = smin
myvoxel = np.float32( (myvoxel - smin) / (smax - smin) )
# Zero-fill missing measurements
myvoxel_zeroed = np.zeros(nin)
myvoxel_zeroed = np.float32(myvoxel_zeroed)
for qq in range(0,imgsize[3]):
myvoxel_zeroed[idx_sort[qq]] = myvoxel[qq]
# Pass voxel through SARDU-net
myvoxel_up = net(Tensor(myvoxel_zeroed))
myvoxel_up = myvoxel_up.detach().numpy()
# Bring voxel back to original signal space
myvoxel_up = myvoxel_up*(smax - smin) + smin
# Store voxel
sout_data[xx,yy,zz,:] = myvoxel_up
# Save the predicted NIFTI file as output
print('')
print(' ... saving output file as 4D NIFTI ...')
buffer_header = sin_obj.header
if(nbit==64):
buffer_header.set_data_dtype('float64') # Make sure we save output files float64, even if input is not
elif(nbit==32):
buffer_header.set_data_dtype('float32') # Make sure we save output files float64, even if input is not
sout_obj = nib.Nifti1Image(sout_data,sin_obj.affine,buffer_header)
nib.save(sout_obj, args.nifti_out)
print('')
print(' Done!')
print('')
sys.exit(0) | 0.419529 | 0.323941 |
from datetime import datetime, timedelta
from uuid import uuid4
import pytest
from django.utils import timezone
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from ee.clickhouse.models.session_recording_event import create_session_recording_event
from ee.clickhouse.queries.funnels.funnel_unordered_persons import ClickhouseFunnelUnorderedActors
from ee.clickhouse.test.test_journeys import journeys_for
from ee.clickhouse.util import ClickhouseTestMixin, snapshot_clickhouse_queries
from posthog.constants import INSIGHT_FUNNELS
from posthog.models.filters import Filter
from posthog.models.person import Person
from posthog.test.base import APIBaseTest
FORMAT_TIME = "%Y-%m-%d 00:00:00"
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid, uuid=person.uuid)
def _create_event(**kwargs):
if "event_uuid" not in kwargs:
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
def _create_session_recording_event(team_id, distinct_id, session_id, timestamp, window_id="", has_full_snapshot=True):
create_session_recording_event(
uuid=uuid4(),
team_id=team_id,
distinct_id=distinct_id,
timestamp=timestamp,
session_id=session_id,
window_id=window_id,
snapshot_data={"timestamp": timestamp.timestamp(), "has_full_snapshot": has_full_snapshot,},
)
class TestFunnelUnorderedStepsPersons(ClickhouseTestMixin, APIBaseTest):
def _create_sample_data_multiple_dropoffs(self):
events_by_person = {}
for i in range(5):
events_by_person[f"user_{i}"] = [
{"event": "step one", "timestamp": datetime(2021, 5, 1)},
{"event": "step three", "timestamp": datetime(2021, 5, 3)},
{"event": "step two", "timestamp": datetime(2021, 5, 5)},
]
for i in range(5, 15):
events_by_person[f"user_{i}"] = [
{"event": "step two", "timestamp": datetime(2021, 5, 1)},
{"event": "step one", "timestamp": datetime(2021, 5, 3)},
]
for i in range(15, 35):
events_by_person[f"user_{i}"] = [{"event": "step one", "timestamp": datetime(2021, 5, 1)}]
journeys_for(events_by_person, self.team)
def test_invalid_steps(self):
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": "blah",
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
with self.assertRaises(ValueError):
ClickhouseFunnelUnorderedActors(filter, self.team).run()
filter = filter.with_data({"funnel_step": -1})
with pytest.raises(ValueError):
_, _ = ClickhouseFunnelUnorderedActors(filter, self.team).run()
def test_first_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(35, len(serialized_results))
def test_last_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(5, len(serialized_results))
def test_second_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(20, len(serialized_results))
def test_last_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(10, len(serialized_results))
@snapshot_clickhouse_queries
@freeze_time("2021-01-02 00:00:00.000Z")
def test_unordered_funnel_does_not_return_recordings(self):
p1 = _create_person(distinct_ids=[f"user_1"], team=self.team)
_create_event(
event="step two",
distinct_id="user_1",
team=self.team,
timestamp=timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
properties={"$session_id": "s1", "$window_id": "w1"},
event_uuid="21111111-1111-1111-1111-111111111111",
)
_create_event(
event="step one",
distinct_id="user_1",
team=self.team,
timestamp=(timezone.now() + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S.%f"),
properties={"$session_id": "s1", "$window_id": "w1"},
event_uuid="11111111-1111-1111-1111-111111111111",
)
_create_session_recording_event(self.team.pk, "user_1", "s1", timezone.now() + timedelta(days=1))
filter = Filter(
data={
"insight": INSIGHT_FUNNELS,
"date_from": "2021-01-01",
"date_to": "2021-01-08",
"interval": "day",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
"include_recordings": "true", # <- The important line
}
)
_, results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(results[0]["id"], p1.uuid)
self.assertEqual(
results[0]["matched_recordings"], [],
) | ee/clickhouse/queries/funnels/test/test_funnel_unordered_persons.py | from datetime import datetime, timedelta
from uuid import uuid4
import pytest
from django.utils import timezone
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from ee.clickhouse.models.session_recording_event import create_session_recording_event
from ee.clickhouse.queries.funnels.funnel_unordered_persons import ClickhouseFunnelUnorderedActors
from ee.clickhouse.test.test_journeys import journeys_for
from ee.clickhouse.util import ClickhouseTestMixin, snapshot_clickhouse_queries
from posthog.constants import INSIGHT_FUNNELS
from posthog.models.filters import Filter
from posthog.models.person import Person
from posthog.test.base import APIBaseTest
FORMAT_TIME = "%Y-%m-%d 00:00:00"
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=person.uuid, uuid=person.uuid)
def _create_event(**kwargs):
if "event_uuid" not in kwargs:
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
def _create_session_recording_event(team_id, distinct_id, session_id, timestamp, window_id="", has_full_snapshot=True):
create_session_recording_event(
uuid=uuid4(),
team_id=team_id,
distinct_id=distinct_id,
timestamp=timestamp,
session_id=session_id,
window_id=window_id,
snapshot_data={"timestamp": timestamp.timestamp(), "has_full_snapshot": has_full_snapshot,},
)
class TestFunnelUnorderedStepsPersons(ClickhouseTestMixin, APIBaseTest):
def _create_sample_data_multiple_dropoffs(self):
events_by_person = {}
for i in range(5):
events_by_person[f"user_{i}"] = [
{"event": "step one", "timestamp": datetime(2021, 5, 1)},
{"event": "step three", "timestamp": datetime(2021, 5, 3)},
{"event": "step two", "timestamp": datetime(2021, 5, 5)},
]
for i in range(5, 15):
events_by_person[f"user_{i}"] = [
{"event": "step two", "timestamp": datetime(2021, 5, 1)},
{"event": "step one", "timestamp": datetime(2021, 5, 3)},
]
for i in range(15, 35):
events_by_person[f"user_{i}"] = [{"event": "step one", "timestamp": datetime(2021, 5, 1)}]
journeys_for(events_by_person, self.team)
def test_invalid_steps(self):
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": "blah",
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
with self.assertRaises(ValueError):
ClickhouseFunnelUnorderedActors(filter, self.team).run()
filter = filter.with_data({"funnel_step": -1})
with pytest.raises(ValueError):
_, _ = ClickhouseFunnelUnorderedActors(filter, self.team).run()
def test_first_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(35, len(serialized_results))
def test_last_step(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": 3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(5, len(serialized_results))
def test_second_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -2,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(20, len(serialized_results))
def test_last_step_dropoff(self):
self._create_sample_data_multiple_dropoffs()
data = {
"insight": INSIGHT_FUNNELS,
"interval": "day",
"date_from": "2021-05-01 00:00:00",
"date_to": "2021-05-07 00:00:00",
"funnel_window_days": 7,
"funnel_step": -3,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
}
filter = Filter(data=data)
_, serialized_results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(10, len(serialized_results))
@snapshot_clickhouse_queries
@freeze_time("2021-01-02 00:00:00.000Z")
def test_unordered_funnel_does_not_return_recordings(self):
p1 = _create_person(distinct_ids=[f"user_1"], team=self.team)
_create_event(
event="step two",
distinct_id="user_1",
team=self.team,
timestamp=timezone.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
properties={"$session_id": "s1", "$window_id": "w1"},
event_uuid="21111111-1111-1111-1111-111111111111",
)
_create_event(
event="step one",
distinct_id="user_1",
team=self.team,
timestamp=(timezone.now() + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S.%f"),
properties={"$session_id": "s1", "$window_id": "w1"},
event_uuid="11111111-1111-1111-1111-111111111111",
)
_create_session_recording_event(self.team.pk, "user_1", "s1", timezone.now() + timedelta(days=1))
filter = Filter(
data={
"insight": INSIGHT_FUNNELS,
"date_from": "2021-01-01",
"date_to": "2021-01-08",
"interval": "day",
"funnel_window_days": 7,
"funnel_step": 1,
"events": [
{"id": "step one", "order": 0},
{"id": "step two", "order": 1},
{"id": "step three", "order": 2},
],
"include_recordings": "true", # <- The important line
}
)
_, results = ClickhouseFunnelUnorderedActors(filter, self.team).get_actors()
self.assertEqual(results[0]["id"], p1.uuid)
self.assertEqual(
results[0]["matched_recordings"], [],
) | 0.486819 | 0.337859 |
from Imaging import Imaging, Camera
import numpy as np
from skimage.draw import circle
from skimage.measure import regionprops,label
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cam = Camera()
cam.configureGPIO()
cam.configureProperties()
imaging = Imaging(cam)
exposures = np.array([7000,3000])
TRIALS = 20
for trial in range(TRIALS):
images = imaging.acquireImages(exposures=exposures)
print('trial #',trial)
image = images[:,:,1]
dx,dy = np.gradient(image)
m = np.hypot(dx,dy)
thres = m>(5*np.median(m))
cclbl,nlbl = label(thres,neighbors=8,return_num=True)
regions = regionprops(cclbl, image, True)
data = np.empty((len(regions),3))
mean=[]
sqmean=[]
cent_next=[]
for i,r in enumerate(regions):
# grab centroids with appropriate bbox
bbox_area = (r.bbox[2]-r.bbox[0])*(r.bbox[3]-r.bbox[1])
data[i,:2] = r.centroid
data[i,2] = bbox_area
data = data[(data[:,2] > 4) & (data[:,2] < 100)]
if trial > 0:
for j in range(data.shape[0]):
#check if centroid is already located
if np.min(np.hypot(data[j,0]-cent_last[:,0],data[j,1]-cent_last[:,1])) < 2:
#find closest centroid in last image
idx = (np.hypot(data[j,0]-cent_last[:,0],data[j,1]-cent_last[:,1])).argmin()
cent_next.append((trial*cent_last[idx] + data[j,:2])/(trial+1))
#calculate centroid-sum for current centroid in current image
csi = circle(data[j,0], data[j,1], radius=3, shape=image.shape)
csum = image[csi].sum()
#calculate mean and sqmean
mean.append((trial*mean_last[idx] + csum)/(trial+1))
sqmean.append((trial*sqmean_last[idx] + csum**2)/(trial+1))
# initialization
else:
cent_next = data
for j in range(data.shape[0]):
csi = circle(data[j,0], data[j,1], radius=3, shape=image.shape)
csum = image[csi].sum()
mean.append(csum)
sqmean.append(csum**2)
mean_last = mean
sqmean_last = sqmean
cent_last = np.array(cent_next)[:,:2]
mean = np.array(mean_last)
sqmean = np.array(sqmean_last)
std = np.sqrt(sqmean-mean**2)
CV = 100*std/mean
good_ind = np.isfinite(CV)
plt.hist(CV[good_ind],50)
plt.title('CV measurement on beads')
plt.xlabel('CV')
plt.ylabel('number of bead centroids')
cam.cleanUp() | BeadCV.py | from Imaging import Imaging, Camera
import numpy as np
from skimage.draw import circle
from skimage.measure import regionprops,label
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
cam = Camera()
cam.configureGPIO()
cam.configureProperties()
imaging = Imaging(cam)
exposures = np.array([7000,3000])
TRIALS = 20
for trial in range(TRIALS):
images = imaging.acquireImages(exposures=exposures)
print('trial #',trial)
image = images[:,:,1]
dx,dy = np.gradient(image)
m = np.hypot(dx,dy)
thres = m>(5*np.median(m))
cclbl,nlbl = label(thres,neighbors=8,return_num=True)
regions = regionprops(cclbl, image, True)
data = np.empty((len(regions),3))
mean=[]
sqmean=[]
cent_next=[]
for i,r in enumerate(regions):
# grab centroids with appropriate bbox
bbox_area = (r.bbox[2]-r.bbox[0])*(r.bbox[3]-r.bbox[1])
data[i,:2] = r.centroid
data[i,2] = bbox_area
data = data[(data[:,2] > 4) & (data[:,2] < 100)]
if trial > 0:
for j in range(data.shape[0]):
#check if centroid is already located
if np.min(np.hypot(data[j,0]-cent_last[:,0],data[j,1]-cent_last[:,1])) < 2:
#find closest centroid in last image
idx = (np.hypot(data[j,0]-cent_last[:,0],data[j,1]-cent_last[:,1])).argmin()
cent_next.append((trial*cent_last[idx] + data[j,:2])/(trial+1))
#calculate centroid-sum for current centroid in current image
csi = circle(data[j,0], data[j,1], radius=3, shape=image.shape)
csum = image[csi].sum()
#calculate mean and sqmean
mean.append((trial*mean_last[idx] + csum)/(trial+1))
sqmean.append((trial*sqmean_last[idx] + csum**2)/(trial+1))
# initialization
else:
cent_next = data
for j in range(data.shape[0]):
csi = circle(data[j,0], data[j,1], radius=3, shape=image.shape)
csum = image[csi].sum()
mean.append(csum)
sqmean.append(csum**2)
mean_last = mean
sqmean_last = sqmean
cent_last = np.array(cent_next)[:,:2]
mean = np.array(mean_last)
sqmean = np.array(sqmean_last)
std = np.sqrt(sqmean-mean**2)
CV = 100*std/mean
good_ind = np.isfinite(CV)
plt.hist(CV[good_ind],50)
plt.title('CV measurement on beads')
plt.xlabel('CV')
plt.ylabel('number of bead centroids')
cam.cleanUp() | 0.319015 | 0.538377 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareSecureFPGAConsts:
FPGAIMAGE_STATUS_ACTIVATION_FAILED = "Activation Failed"
FPGAIMAGE_STATUS_ACTIVATION_IN_PROGRESS = "Activation In Progress"
FPGAIMAGE_STATUS_READY = "Ready"
SECURED_FALSE = "false"
SECURED_NO = "no"
SECURED_TRUE = "true"
SECURED_YES = "yes"
class FirmwareSecureFPGA(ManagedObject):
"""This is FirmwareSecureFPGA class."""
consts = FirmwareSecureFPGAConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareSecureFPGA", "firmwareSecureFPGA", "fw-secure-fpga", VersionMeta.Version413a, "InputOutput", 0x7f, [], ["admin"], ['networkElement'], ['faultInst'], [None])
prop_meta = {
"fpga_image_status": MoPropertyMeta("fpga_image_status", "FPGAImageStatus", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Activation Failed", "Activation In Progress", "Ready"], []),
"secured": MoPropertyMeta("secured", "Secured", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["false", "no", "true", "yes"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version413a, MoPropertyMeta.INTERNAL, 0x8, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"FPGAImageStatus": "fpga_image_status",
"Secured": "secured",
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.fpga_image_status = None
self.secured = None
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FirmwareSecureFPGA", parent_mo_or_dn, **kwargs) | ucsmsdk/mometa/firmware/FirmwareSecureFPGA.py |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareSecureFPGAConsts:
FPGAIMAGE_STATUS_ACTIVATION_FAILED = "Activation Failed"
FPGAIMAGE_STATUS_ACTIVATION_IN_PROGRESS = "Activation In Progress"
FPGAIMAGE_STATUS_READY = "Ready"
SECURED_FALSE = "false"
SECURED_NO = "no"
SECURED_TRUE = "true"
SECURED_YES = "yes"
class FirmwareSecureFPGA(ManagedObject):
"""This is FirmwareSecureFPGA class."""
consts = FirmwareSecureFPGAConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareSecureFPGA", "firmwareSecureFPGA", "fw-secure-fpga", VersionMeta.Version413a, "InputOutput", 0x7f, [], ["admin"], ['networkElement'], ['faultInst'], [None])
prop_meta = {
"fpga_image_status": MoPropertyMeta("fpga_image_status", "FPGAImageStatus", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Activation Failed", "Activation In Progress", "Ready"], []),
"secured": MoPropertyMeta("secured", "Secured", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["false", "no", "true", "yes"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version413a, MoPropertyMeta.INTERNAL, 0x8, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version413a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version413a, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"FPGAImageStatus": "fpga_image_status",
"Secured": "secured",
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.fpga_image_status = None
self.secured = None
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FirmwareSecureFPGA", parent_mo_or_dn, **kwargs) | 0.446495 | 0.169028 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Grpc.Tls' : {
'meta_info' : _MetaInfoClass('Grpc.Tls',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable TLS
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('trustpoint', ATTRIBUTE, 'str' , None, None,
[], [],
''' Trustpoint Name
''',
'trustpoint',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'tls',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
'Grpc' : {
'meta_info' : _MetaInfoClass('Grpc',
False,
[
_MetaInfoClassMember('address-family', ATTRIBUTE, 'str' , None, None,
[], [],
''' Address family identifier type
''',
'address_family',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable GRPC
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-per-user', ATTRIBUTE, 'int' , None, None,
[('1', '32')], [],
''' Maximum concurrent requests per user
''',
'max_request_per_user',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-total', ATTRIBUTE, 'int' , None, None,
[('1', '256')], [],
''' Maximum concurrent requests in total
''',
'max_request_total',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('10000', '57999')], [],
''' Server listening port
''',
'port',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('tls', REFERENCE_CLASS, 'Tls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg', 'Grpc.Tls',
[], [],
''' Transport Layer Security (TLS)
''',
'tls',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'grpc',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
}
_meta_table['Grpc.Tls']['meta_info'].parent =_meta_table['Grpc']['meta_info'] | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_man_ems_cfg.py | import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Grpc.Tls' : {
'meta_info' : _MetaInfoClass('Grpc.Tls',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable TLS
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('trustpoint', ATTRIBUTE, 'str' , None, None,
[], [],
''' Trustpoint Name
''',
'trustpoint',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'tls',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
'Grpc' : {
'meta_info' : _MetaInfoClass('Grpc',
False,
[
_MetaInfoClassMember('address-family', ATTRIBUTE, 'str' , None, None,
[], [],
''' Address family identifier type
''',
'address_family',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable GRPC
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-per-user', ATTRIBUTE, 'int' , None, None,
[('1', '32')], [],
''' Maximum concurrent requests per user
''',
'max_request_per_user',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-total', ATTRIBUTE, 'int' , None, None,
[('1', '256')], [],
''' Maximum concurrent requests in total
''',
'max_request_total',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('10000', '57999')], [],
''' Server listening port
''',
'port',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('tls', REFERENCE_CLASS, 'Tls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg', 'Grpc.Tls',
[], [],
''' Transport Layer Security (TLS)
''',
'tls',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'grpc',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
}
_meta_table['Grpc.Tls']['meta_info'].parent =_meta_table['Grpc']['meta_info'] | 0.398875 | 0.033876 |
import os
import unittest
from spotinst_sdk2 import SpotinstSession
from spotinst_sdk2.models.managed_instance.aws import *
class SimpleNamespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AwsManagedInstanceTestCase(unittest.TestCase):
def setUp(self):
self.session = SpotinstSession(
auth_token='<PASSWORD>',
account_id='dummy-account')
self.client = self.session.client("managed_instance_aws")
self.mock_mi_json = self.load_json()
def create_formatted_mi_request(self, managed_instance):
group_request = ManagedInstanceCreationRequest(managed_instance)
excluded_group_dict = self.client.exclude_missing(
json.loads(group_request.toJSON()))
formatted_group_dict = self.client.convert_json(
excluded_group_dict, self.client.underscore_to_camel)
return formatted_group_dict
@staticmethod
def load_json():
file_path = '../../test_lib/input/managed_instance/aws_managed_instance.json'
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), file_path)) as ma_json:
return json.load(ma_json)
class TestAwsManagedInstancePersistRootDevice(AwsManagedInstanceTestCase):
def runTest(self):
persistence_obj = Persistence(True, False, "onLaunch", False)
mi_obj = ManagedInstance(persistence=persistence_obj)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_persist_root = formatted_mi_request['managedInstance']['persistence']['persistRootDevice']
expected_request_persist_root = self.mock_mi_json['managedInstance']['persistence']['persistRootDevice']
self.assertEqual(actual_request_persist_root, True)
self.assertEqual(actual_request_persist_root, expected_request_persist_root)
class TestAwsManagedInstancePersistence(AwsManagedInstanceTestCase):
def runTest(self):
persistence_obj = Persistence(True, True, "onLaunch")
mi_obj = ManagedInstance(persistence=persistence_obj)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['persistence']
expected_request_json = self.mock_mi_json['managedInstance']['persistence']
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceIntegrationsRoute53DomainsConfiguration(AwsManagedInstanceTestCase):
def runTest(self):
recordset_config = Route53RecordSetConfiguration(name="someName", use_public_ip=True, use_public_dns=True)
route_53_domains_config = Route53DomainConfiguration(hosted_zone_id="123", spotinst_account_id="foo",
record_set_type="bar", record_sets=[recordset_config])
route_53_config = Route53Configuration(domains=[route_53_domains_config])
integrations_config = IntegrationsConfig(route53=route_53_config)
mi_obj = ManagedInstance(integrations=integrations_config)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['integrations']["route53"]["domains"][0]
expected_request_json = self.mock_mi_json['managedInstance']['integrations']["route53"]["domains"][0]
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceIntegrationsLoadBalancersConfiguration(AwsManagedInstanceTestCase):
def runTest(self):
lb = LoadBalancer(name="name", arn="arn", type="MULTAI_TARGET_SET", balancer_id="lb-1ee2e3q",
target_set_id="ts-3eq", az_awareness=True, auto_weight=True)
lbs_config = LoadBalancersConfiguration(load_balancers=[lb])
integrations_config = IntegrationsConfig(load_balancers_config=lbs_config)
mi_obj = ManagedInstance(integrations=integrations_config)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['integrations']["loadBalancersConfig"]
expected_request_json = self.mock_mi_json['managedInstance']['integrations']["loadBalancersConfig"]
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceComputeLaunchSpecification(AwsManagedInstanceTestCase):
def runTest(self):
# block_device_mappings
ebs = EBS(delete_on_termination=True, iops=0, throughput=125, volume_type="gp2", volume_size=12)
block_device_mappings = [BlockDeviceMapping(device_name="/dev/xvdcz", ebs=ebs)]
# network_interfaces
network_interfaces = [
NetworkInterface(device_index=0, associate_ipv6_address=True, associate_public_ip_address=True)]
# credit_specification
credit_specification = CreditSpecification(cpu_credits="unlimited")
# shutdown_script & user_data
shutdown_script = "dXNlcmJhc2g2NGVuY29kZWQ="
user_data = "dXNlcmJhc2g2NGVuY29kZWQ="
# resource_tag_specification
resource_tag_spec = ResourceTagSpecification(volumes=TagSpecification(should_tag=False),
snapshots=TagSpecification(should_tag=True),
enis=TagSpecification(should_tag=False),
amis=TagSpecification(should_tag=True))
# tags
tags = [Tag(tag_key="Creator", tag_value="<EMAIL>")]
# key_pair & image_id
key_pair = "labs-oregon"
image_id = "ami-01e24be29428c15b2"
# security_group_ids
security_group_ids = ["<KEY>"]
# iam_role
iam_role = IamRole(name="name", arn="arn")
# tenancy
tenancy = "default"
# monitoring
monitoring = False
# ebs_optimized
ebs_optimized = False
# instance_types
instance_types = InstanceTypes(preferred_type="t2.micro", types=["t2.micro"])
launch_spec = LaunchSpecification(block_device_mappings=block_device_mappings,
network_interfaces=network_interfaces,
credit_specification=credit_specification, shutdown_script=shutdown_script,
user_data=user_data, resource_tag_specification=resource_tag_spec, tags=tags,
key_pair=key_pair, image_id=image_id, security_group_ids=security_group_ids,
iam_role=iam_role, tenancy=tenancy, monitoring=monitoring,
ebs_optimized=ebs_optimized, instance_types=instance_types
)
mi_obj = ManagedInstance(compute=Compute(launch_specification=launch_spec))
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['compute']["launchSpecification"]
expected_request_json = self.mock_mi_json['managedInstance']['compute']["launchSpecification"]
self.assertDictEqual(actual_request_json, expected_request_json) | spotinst_sdk2/test/managed_instance/aws/test_aws_ma_model.py | import os
import unittest
from spotinst_sdk2 import SpotinstSession
from spotinst_sdk2.models.managed_instance.aws import *
class SimpleNamespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AwsManagedInstanceTestCase(unittest.TestCase):
def setUp(self):
self.session = SpotinstSession(
auth_token='<PASSWORD>',
account_id='dummy-account')
self.client = self.session.client("managed_instance_aws")
self.mock_mi_json = self.load_json()
def create_formatted_mi_request(self, managed_instance):
group_request = ManagedInstanceCreationRequest(managed_instance)
excluded_group_dict = self.client.exclude_missing(
json.loads(group_request.toJSON()))
formatted_group_dict = self.client.convert_json(
excluded_group_dict, self.client.underscore_to_camel)
return formatted_group_dict
@staticmethod
def load_json():
file_path = '../../test_lib/input/managed_instance/aws_managed_instance.json'
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), file_path)) as ma_json:
return json.load(ma_json)
class TestAwsManagedInstancePersistRootDevice(AwsManagedInstanceTestCase):
def runTest(self):
persistence_obj = Persistence(True, False, "onLaunch", False)
mi_obj = ManagedInstance(persistence=persistence_obj)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_persist_root = formatted_mi_request['managedInstance']['persistence']['persistRootDevice']
expected_request_persist_root = self.mock_mi_json['managedInstance']['persistence']['persistRootDevice']
self.assertEqual(actual_request_persist_root, True)
self.assertEqual(actual_request_persist_root, expected_request_persist_root)
class TestAwsManagedInstancePersistence(AwsManagedInstanceTestCase):
def runTest(self):
persistence_obj = Persistence(True, True, "onLaunch")
mi_obj = ManagedInstance(persistence=persistence_obj)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['persistence']
expected_request_json = self.mock_mi_json['managedInstance']['persistence']
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceIntegrationsRoute53DomainsConfiguration(AwsManagedInstanceTestCase):
def runTest(self):
recordset_config = Route53RecordSetConfiguration(name="someName", use_public_ip=True, use_public_dns=True)
route_53_domains_config = Route53DomainConfiguration(hosted_zone_id="123", spotinst_account_id="foo",
record_set_type="bar", record_sets=[recordset_config])
route_53_config = Route53Configuration(domains=[route_53_domains_config])
integrations_config = IntegrationsConfig(route53=route_53_config)
mi_obj = ManagedInstance(integrations=integrations_config)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['integrations']["route53"]["domains"][0]
expected_request_json = self.mock_mi_json['managedInstance']['integrations']["route53"]["domains"][0]
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceIntegrationsLoadBalancersConfiguration(AwsManagedInstanceTestCase):
def runTest(self):
lb = LoadBalancer(name="name", arn="arn", type="MULTAI_TARGET_SET", balancer_id="lb-1ee2e3q",
target_set_id="ts-3eq", az_awareness=True, auto_weight=True)
lbs_config = LoadBalancersConfiguration(load_balancers=[lb])
integrations_config = IntegrationsConfig(load_balancers_config=lbs_config)
mi_obj = ManagedInstance(integrations=integrations_config)
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['integrations']["loadBalancersConfig"]
expected_request_json = self.mock_mi_json['managedInstance']['integrations']["loadBalancersConfig"]
self.assertDictEqual(actual_request_json, expected_request_json)
class TestAwsManagedInstanceComputeLaunchSpecification(AwsManagedInstanceTestCase):
def runTest(self):
# block_device_mappings
ebs = EBS(delete_on_termination=True, iops=0, throughput=125, volume_type="gp2", volume_size=12)
block_device_mappings = [BlockDeviceMapping(device_name="/dev/xvdcz", ebs=ebs)]
# network_interfaces
network_interfaces = [
NetworkInterface(device_index=0, associate_ipv6_address=True, associate_public_ip_address=True)]
# credit_specification
credit_specification = CreditSpecification(cpu_credits="unlimited")
# shutdown_script & user_data
shutdown_script = "dXNlcmJhc2g2NGVuY29kZWQ="
user_data = "dXNlcmJhc2g2NGVuY29kZWQ="
# resource_tag_specification
resource_tag_spec = ResourceTagSpecification(volumes=TagSpecification(should_tag=False),
snapshots=TagSpecification(should_tag=True),
enis=TagSpecification(should_tag=False),
amis=TagSpecification(should_tag=True))
# tags
tags = [Tag(tag_key="Creator", tag_value="<EMAIL>")]
# key_pair & image_id
key_pair = "labs-oregon"
image_id = "ami-01e24be29428c15b2"
# security_group_ids
security_group_ids = ["<KEY>"]
# iam_role
iam_role = IamRole(name="name", arn="arn")
# tenancy
tenancy = "default"
# monitoring
monitoring = False
# ebs_optimized
ebs_optimized = False
# instance_types
instance_types = InstanceTypes(preferred_type="t2.micro", types=["t2.micro"])
launch_spec = LaunchSpecification(block_device_mappings=block_device_mappings,
network_interfaces=network_interfaces,
credit_specification=credit_specification, shutdown_script=shutdown_script,
user_data=user_data, resource_tag_specification=resource_tag_spec, tags=tags,
key_pair=key_pair, image_id=image_id, security_group_ids=security_group_ids,
iam_role=iam_role, tenancy=tenancy, monitoring=monitoring,
ebs_optimized=ebs_optimized, instance_types=instance_types
)
mi_obj = ManagedInstance(compute=Compute(launch_specification=launch_spec))
formatted_mi_request = self.create_formatted_mi_request(mi_obj)
actual_request_json = formatted_mi_request['managedInstance']['compute']["launchSpecification"]
expected_request_json = self.mock_mi_json['managedInstance']['compute']["launchSpecification"]
self.assertDictEqual(actual_request_json, expected_request_json) | 0.486332 | 0.176441 |
from django.test import TestCase
from django_orm.core.sql import RawExpression, SqlExpression, SqlFunction, AND, OR
from django_orm.postgresql.geometric.objects import Point, Circle, Box
from django_orm.postgresql.geometric.expressions import GeoExpression
from .models import SomeObject, CircleObjectModel, BoxObjectModel
class GeometricSearches(TestCase):
def setUp(self):
SomeObject.objects.all().delete()
BoxObjectModel.objects.all().delete()
CircleObjectModel.objects.all().delete()
def test_simple_contains(self):
SomeObject.objects.bulk_create([
SomeObject(pos=Point(1,1)),
SomeObject(pos=Point(2,2)),
SomeObject(pos=Point(1,5)),
SomeObject(pos=Point(3,4)),
])
# simple sql expresion.
qs = SomeObject.objects.where(
SqlExpression("pos", "<@", Box(1,1,4,4))
)
self.assertEqual(qs.count(), 3)
# builtin helper
qs = SomeObject.objects.where(
GeoExpression("pos").contained_on(Box(1,1,4,4))
)
self.assertEqual(qs.count(), 3)
def test_simple_overlap(self):
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(1,1,3,2)),
BoxObjectModel(barea=Box(2,2,4,7)),
BoxObjectModel(barea=Box(10,10,20,20)),
BoxObjectModel(barea=Box(-1,-4, -5, -2)),
])
# simple sql expression
qs = BoxObjectModel.objects.where(
SqlExpression("barea", "&&", Box(2,0,5,3))
)
self.assertEqual(qs.count(), 2)
# builtin helper
qs = BoxObjectModel.objects.where(
GeoExpression("barea").overlaps(Box(2,0,5,3))
)
self.assertEqual(qs.count(), 2)
def test_join_overlap_circle(self):
c_instance_0 = CircleObjectModel.objects.create(carea=Circle(1,1,5))
c_instance_1 = CircleObjectModel.objects.create(carea=Circle(-2, -2, 1))
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(1,1,3,2), other=c_instance_0),
BoxObjectModel(barea=Box(2,2,4,7), other=c_instance_0),
BoxObjectModel(barea=Box(10,10,20,20), other=c_instance_1)
])
qs = BoxObjectModel.objects.where(
SqlExpression("other__carea", "&&", Circle(2,2,2))
)
self.assertEqual(qs.count(), 2)
"""
-- this automaticaly creates this query with join
SELECT COUNT(*) FROM "pg_geometric_boxobjectmodel" LEFT OUTER JOIN "pg_geometric_circleobjectmodel" ON
("pg_geometric_boxobjectmodel"."other_id" = "pg_geometric_circleobjectmodel"."id") WHERE
("pg_geometric_circleobjectmodel"."carea" && circle '<(2,2),2>')
"""
def test_strict_left_and_right_of(self):
CircleObjectModel.objects.bulk_create([
CircleObjectModel(carea=Circle(-2,-2,1)),
CircleObjectModel(carea=Circle(0,5,1)),
CircleObjectModel(carea=Circle(10,0,1)),
])
qs = CircleObjectModel.objects.where(
GeoExpression("carea").is_strictly_left_of(Circle(5,0,1))
)
self.assertEqual(qs.count(), 2)
qs = CircleObjectModel.objects.where(
GeoExpression("carea").is_strictly_right_of(Circle(0,0,1))
)
self.assertEqual(qs.count(), 1)
def test_does_not_extend(self):
# box '((0,0),(1,1))' &< box '((0,0),(2,2))'
# box '((0,0),(3,3))' &> box '((0,0),(2,2))'
# TODO: improve this test
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(0,0,1,1)),
BoxObjectModel(barea=Box(0,0,1,1)),
])
qs = BoxObjectModel.objects.where(
GeoExpression("barea").does_not_extend_right(Box(0,0,2,2))
)
self.assertEqual(qs.count(), 2)
qs = BoxObjectModel.objects.where(
GeoExpression("barea").does_not_extend_left(Box(0,0,2,2))
)
self.assertEqual(qs.count(), 2)
class PointTests(TestCase):
def setUp(self):
self.obj0 = SomeObject.objects.create(pos=Point([1,1]))
self.obj1 = SomeObject.objects.create(pos=Point([2,1]))
self.obj2 = SomeObject.objects.create(pos=Point([5,6]))
self.obj3 = SomeObject.objects.create(pos=Point([4,4]))
def tearDown(self):
SomeObject.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.pos, Point)
self.assertEqual(self.obj0.pos, Point([1,1]))
def test_custom_instance(self):
self.assertEqual(Point([1,1]), Point(1,1))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Point([1,2,3])
with self.assertRaises(ValueError):
x = Point(1,2,3)
with self.assertRaises(ValueError):
x = Point(1)
class CircleTest(TestCase):
def setUp(self):
self.obj0 = CircleObjectModel.objects\
.create(carea=Circle(0,0,5))
def tearDown(self):
CircleObjectModel.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.carea, Circle)
self.assertEqual(self.obj0.carea, Circle(0,0,5))
def test_custom_instance(self):
self.assertEqual(Circle(1,1,1), Circle([1,1,1]))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Circle([1,2,3,3])
with self.assertRaises(ValueError):
x = Circle(1,2,3,2)
with self.assertRaises(ValueError):
x = Circle(1,2)
class BoxTest(TestCase):
def setUp(self):
self.obj0 = BoxObjectModel.objects\
.create(barea=Box(0,0,5,5))
def tearDown(self):
BoxObjectModel.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.barea, Box)
self.assertEqual(self.obj0.barea, Box(0,0,5,5))
def test_custom_instance(self):
self.assertEqual(Box(1,1,1,1), Box([1,1,1,1]))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Box([1,2,3,5,5])
with self.assertRaises(ValueError):
x = Box(1,2,3,2,5)
with self.assertRaises(ValueError):
x = Box(1,2,5) | tests/modeltests/pg_geometric/tests.py |
from django.test import TestCase
from django_orm.core.sql import RawExpression, SqlExpression, SqlFunction, AND, OR
from django_orm.postgresql.geometric.objects import Point, Circle, Box
from django_orm.postgresql.geometric.expressions import GeoExpression
from .models import SomeObject, CircleObjectModel, BoxObjectModel
class GeometricSearches(TestCase):
def setUp(self):
SomeObject.objects.all().delete()
BoxObjectModel.objects.all().delete()
CircleObjectModel.objects.all().delete()
def test_simple_contains(self):
SomeObject.objects.bulk_create([
SomeObject(pos=Point(1,1)),
SomeObject(pos=Point(2,2)),
SomeObject(pos=Point(1,5)),
SomeObject(pos=Point(3,4)),
])
# simple sql expresion.
qs = SomeObject.objects.where(
SqlExpression("pos", "<@", Box(1,1,4,4))
)
self.assertEqual(qs.count(), 3)
# builtin helper
qs = SomeObject.objects.where(
GeoExpression("pos").contained_on(Box(1,1,4,4))
)
self.assertEqual(qs.count(), 3)
def test_simple_overlap(self):
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(1,1,3,2)),
BoxObjectModel(barea=Box(2,2,4,7)),
BoxObjectModel(barea=Box(10,10,20,20)),
BoxObjectModel(barea=Box(-1,-4, -5, -2)),
])
# simple sql expression
qs = BoxObjectModel.objects.where(
SqlExpression("barea", "&&", Box(2,0,5,3))
)
self.assertEqual(qs.count(), 2)
# builtin helper
qs = BoxObjectModel.objects.where(
GeoExpression("barea").overlaps(Box(2,0,5,3))
)
self.assertEqual(qs.count(), 2)
def test_join_overlap_circle(self):
c_instance_0 = CircleObjectModel.objects.create(carea=Circle(1,1,5))
c_instance_1 = CircleObjectModel.objects.create(carea=Circle(-2, -2, 1))
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(1,1,3,2), other=c_instance_0),
BoxObjectModel(barea=Box(2,2,4,7), other=c_instance_0),
BoxObjectModel(barea=Box(10,10,20,20), other=c_instance_1)
])
qs = BoxObjectModel.objects.where(
SqlExpression("other__carea", "&&", Circle(2,2,2))
)
self.assertEqual(qs.count(), 2)
"""
-- this automaticaly creates this query with join
SELECT COUNT(*) FROM "pg_geometric_boxobjectmodel" LEFT OUTER JOIN "pg_geometric_circleobjectmodel" ON
("pg_geometric_boxobjectmodel"."other_id" = "pg_geometric_circleobjectmodel"."id") WHERE
("pg_geometric_circleobjectmodel"."carea" && circle '<(2,2),2>')
"""
def test_strict_left_and_right_of(self):
CircleObjectModel.objects.bulk_create([
CircleObjectModel(carea=Circle(-2,-2,1)),
CircleObjectModel(carea=Circle(0,5,1)),
CircleObjectModel(carea=Circle(10,0,1)),
])
qs = CircleObjectModel.objects.where(
GeoExpression("carea").is_strictly_left_of(Circle(5,0,1))
)
self.assertEqual(qs.count(), 2)
qs = CircleObjectModel.objects.where(
GeoExpression("carea").is_strictly_right_of(Circle(0,0,1))
)
self.assertEqual(qs.count(), 1)
def test_does_not_extend(self):
# box '((0,0),(1,1))' &< box '((0,0),(2,2))'
# box '((0,0),(3,3))' &> box '((0,0),(2,2))'
# TODO: improve this test
BoxObjectModel.objects.bulk_create([
BoxObjectModel(barea=Box(0,0,1,1)),
BoxObjectModel(barea=Box(0,0,1,1)),
])
qs = BoxObjectModel.objects.where(
GeoExpression("barea").does_not_extend_right(Box(0,0,2,2))
)
self.assertEqual(qs.count(), 2)
qs = BoxObjectModel.objects.where(
GeoExpression("barea").does_not_extend_left(Box(0,0,2,2))
)
self.assertEqual(qs.count(), 2)
class PointTests(TestCase):
def setUp(self):
self.obj0 = SomeObject.objects.create(pos=Point([1,1]))
self.obj1 = SomeObject.objects.create(pos=Point([2,1]))
self.obj2 = SomeObject.objects.create(pos=Point([5,6]))
self.obj3 = SomeObject.objects.create(pos=Point([4,4]))
def tearDown(self):
SomeObject.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.pos, Point)
self.assertEqual(self.obj0.pos, Point([1,1]))
def test_custom_instance(self):
self.assertEqual(Point([1,1]), Point(1,1))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Point([1,2,3])
with self.assertRaises(ValueError):
x = Point(1,2,3)
with self.assertRaises(ValueError):
x = Point(1)
class CircleTest(TestCase):
def setUp(self):
self.obj0 = CircleObjectModel.objects\
.create(carea=Circle(0,0,5))
def tearDown(self):
CircleObjectModel.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.carea, Circle)
self.assertEqual(self.obj0.carea, Circle(0,0,5))
def test_custom_instance(self):
self.assertEqual(Circle(1,1,1), Circle([1,1,1]))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Circle([1,2,3,3])
with self.assertRaises(ValueError):
x = Circle(1,2,3,2)
with self.assertRaises(ValueError):
x = Circle(1,2)
class BoxTest(TestCase):
def setUp(self):
self.obj0 = BoxObjectModel.objects\
.create(barea=Box(0,0,5,5))
def tearDown(self):
BoxObjectModel.objects.all().delete()
def test_casting(self):
self.assertIsInstance(self.obj0.barea, Box)
self.assertEqual(self.obj0.barea, Box(0,0,5,5))
def test_custom_instance(self):
self.assertEqual(Box(1,1,1,1), Box([1,1,1,1]))
def test_incorrect_constructor(self):
with self.assertRaises(ValueError):
x = Box([1,2,3,5,5])
with self.assertRaises(ValueError):
x = Box(1,2,3,2,5)
with self.assertRaises(ValueError):
x = Box(1,2,5) | 0.511473 | 0.404655 |
from .... pyaz_utils import _call_az
def list_sku(name, resource_group, workspace_name):
'''
Returns the SKUs available for the provided resource.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-sku", locals())
def list(resource_group, workspace_name):
'''
List all Kusto pools.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list", locals())
def show(name, resource_group, workspace_name):
'''
Gets a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool show", locals())
def delete(name, resource_group, workspace_name, no_wait=None, yes=None):
'''
Deletes a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az synapse kusto pool delete", locals())
def list_follower_database(name, resource_group, workspace_name):
'''
Returns a list of databases that are owned by this Kusto Pool and were followed by another Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-follower-database", locals())
def list_language_extension(name, resource_group, workspace_name):
'''
Returns a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-language-extension", locals())
def start(name, resource_group, workspace_name, no_wait=None):
'''
Starts a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool start", locals())
def stop(name, resource_group, workspace_name, no_wait=None):
'''
Stops a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool stop", locals())
def wait(name, resource_group, workspace_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the synapse kusto pool is met.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az synapse kusto pool wait", locals())
def create(name, resource_group, sku, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, if_none_match=None, location=None, no_wait=None, optimized_autoscale=None, tags=None, workspace_uid=None):
'''
Create a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- sku -- The SKU of the kusto pool.
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- if_none_match -- Set to '*' to allow a new Kusto Pool to be created, but to prevent updating an existing Kusto Pool. Other values will result in a 412 Pre-condition Failed response.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool create", locals())
def update(name, resource_group, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, no_wait=None, optimized_autoscale=None, sku=None, tags=None, workspace_uid=None):
'''
Update a Kusto Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- sku -- The SKU of the kusto pool.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool update", locals())
def add_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Add a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool add-language-extension", locals())
def detach_follower_database(attached_database_configuration_name, kusto_pool_resource_id, name, resource_group, workspace_name, no_wait=None):
'''
Detaches all followers of a database owned by this Kusto Pool.
Required Parameters:
- attached_database_configuration_name -- Resource name of the attached database configuration in the follower cluster.
- kusto_pool_resource_id -- Resource id of the cluster that follows a database owned by this cluster.
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool detach-follower-database", locals())
def remove_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Remove a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool remove-language-extension", locals()) | pyaz/synapse/kusto/pool/__init__.py | from .... pyaz_utils import _call_az
def list_sku(name, resource_group, workspace_name):
'''
Returns the SKUs available for the provided resource.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-sku", locals())
def list(resource_group, workspace_name):
'''
List all Kusto pools.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list", locals())
def show(name, resource_group, workspace_name):
'''
Gets a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool show", locals())
def delete(name, resource_group, workspace_name, no_wait=None, yes=None):
'''
Deletes a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az synapse kusto pool delete", locals())
def list_follower_database(name, resource_group, workspace_name):
'''
Returns a list of databases that are owned by this Kusto Pool and were followed by another Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-follower-database", locals())
def list_language_extension(name, resource_group, workspace_name):
'''
Returns a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-language-extension", locals())
def start(name, resource_group, workspace_name, no_wait=None):
'''
Starts a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool start", locals())
def stop(name, resource_group, workspace_name, no_wait=None):
'''
Stops a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool stop", locals())
def wait(name, resource_group, workspace_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the synapse kusto pool is met.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az synapse kusto pool wait", locals())
def create(name, resource_group, sku, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, if_none_match=None, location=None, no_wait=None, optimized_autoscale=None, tags=None, workspace_uid=None):
'''
Create a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- sku -- The SKU of the kusto pool.
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- if_none_match -- Set to '*' to allow a new Kusto Pool to be created, but to prevent updating an existing Kusto Pool. Other values will result in a 412 Pre-condition Failed response.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool create", locals())
def update(name, resource_group, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, no_wait=None, optimized_autoscale=None, sku=None, tags=None, workspace_uid=None):
'''
Update a Kusto Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- sku -- The SKU of the kusto pool.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool update", locals())
def add_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Add a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool add-language-extension", locals())
def detach_follower_database(attached_database_configuration_name, kusto_pool_resource_id, name, resource_group, workspace_name, no_wait=None):
'''
Detaches all followers of a database owned by this Kusto Pool.
Required Parameters:
- attached_database_configuration_name -- Resource name of the attached database configuration in the follower cluster.
- kusto_pool_resource_id -- Resource id of the cluster that follows a database owned by this cluster.
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool detach-follower-database", locals())
def remove_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Remove a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool remove-language-extension", locals()) | 0.886678 | 0.207315 |
import os
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.enable_mlir_bridge()
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
model.compile(
optimizer=keras.optimizers.Adam(lr=0.001),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir="tb_callback_dir", histogram_freq=1,
)
model.fit(
ds_train,
epochs=5,
validation_data=ds_test,
callbacks=[tensorboard_callback],
verbose=2,
) | tensorboard/callback.py | import os
import io
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
# Make sure we don't get any GPU errors
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.enable_mlir_bridge()
(ds_train, ds_test), ds_info = tfds.load(
"cifar10",
split=["train", "test"],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images"""
return tf.cast(image, tf.float32) / 255.0, label
def augment(image, label):
if tf.random.uniform((), minval=0, maxval=1) < 0.1:
image = tf.tile(tf.image.rgb_to_grayscale(image), [1, 1, 3])
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_flip_left_right(image)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
# Setup for train dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.map(augment)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setup for test Dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
class_names = [
"Airplane",
"Autmobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
def get_model():
model = keras.Sequential(
[
layers.Input((32, 32, 3)),
layers.Conv2D(8, 3, padding="same", activation="relu"),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation="relu"),
layers.Dropout(0.1),
layers.Dense(10),
]
)
return model
model = get_model()
model.compile(
optimizer=keras.optimizers.Adam(lr=0.001),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir="tb_callback_dir", histogram_freq=1,
)
model.fit(
ds_train,
epochs=5,
validation_data=ds_test,
callbacks=[tensorboard_callback],
verbose=2,
) | 0.713232 | 0.297011 |